text stringlengths 8 6.05M |
|---|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
from time import *
k=3
t=[0 for i in range(20)]
def init():
glClearColor(1.0,1.0,1.0,0.0)
glPointSize(2.0)
glColor3f(1.0,0.0,0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0,20.0,0.0,20.0)
def setPixel(x,y):
glBegin(GL_POINTS)
glVertex2f(x,y)
glEnd()
glFlush()
def read_controlpoint():
global px,py,no_controlpoint
no_controlpoint=input("Enter no of control point: ")
px=[0 for x in range(no_controlpoint)]
py=[0 for y in range(no_controlpoint)]
for i in range(no_controlpoint):
px[i]=input("Enter control_x: ")
py[i]=input("Enter control_y: ")
#setPixel(px[i],py[i])
def cal_knot_value():
n=no_controlpoint -1
for i in range(n+k+1):
if i<k:
t[i]=0
elif k<=i<=n:
t[i]=i-k+1
elif i>n:
t[i]=n-k+2
def Bspline(i,k,u):
result=0
if k==1:
if u>=t[i] and u<=t[i+1]:
return 1
else:
return 0
if (t[i+k-1] -t[i]) !=0:
result+=float((u-t[i])*Bspline(i,k-1,u)/(t[i+k-1]-t[i]))
if (t[i+k] - t[i+1])!=0:
result+=float((t[i+k]-u)*Bspline(i+1,k-1,u)/(t[i+k]-t[i+1]))
return result
def bspline():
cal_knot_value()
n=no_controlpoint -1
u=0.0
while u<=n-k+2:
x=0.0
y=0.0
for i in range(no_controlpoint):
x+=Bspline(i,k,u)*px[i]
y+=Bspline(i,k,u)*py[i]
setPixel(x,y)
u+=0.0005
def draw_Bspline_curve():
while True:
read_controlpoint()
bspline()
print("Enter any decimal to continue")
check=int(input("Enter 0 to exit: "))
if check==0:
sleep(5)
sys.exit()
else:
pass
def Display():
glClear(GL_COLOR_BUFFER_BIT)
draw_Bspline_curve()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(50,50)
#read_controlpoint()
glutCreateWindow("Bspline")
glutDisplayFunc(Display)
init()
glutMainLoop()
main()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index/$', views.discussion_index, name='index'),
url(r'^(?P<d_id>\d+)/$', views.discussion_detail, name='detail'),
url(r'^new_discussion/$', views.new_discussion, name='new_discussion'),
]
|
# Working with Twitter API and parsing received data
import html
import os
#import socket
from twython import Twython
from twython import TwythonAuthError, TwythonError, TwythonRateLimitError
from time import strftime, strptime
def get_user_timeline(screen_name, limit):
"""Consume screen_name and limit.
1) get "screen_name"'s most recent tweets from Twitter API
2) parse received data to the list of dictionaries: [{"date": <date>, "text":<tweet's_text>}, ...]
"""
# Ensure environment variables are set
if not os.environ.get("API_KEY"):
os.environ["API_KEY"] = "<removed for security purposes>" #!
if not os.environ.get("API_SECRET"):
os.environ["API_SECRET"] = "<removed for security purposes>" #!
# https://dev.twitter.com/rest/reference/get/users/lookup
# https://dev.twitter.com/rest/reference/get/statuses/user_timeline
# https://github.com/ryanmcgrath/twython/blob/master/twython/endpoints.py
try:
twitter = Twython(os.environ.get("API_KEY"), os.environ.get("API_SECRET"))
user = twitter.lookup_user(screen_name=screen_name)
if user[0]["protected"]:
return None
tweets = twitter.get_user_timeline(screen_name=screen_name, count=limit, tweet_mode="extended")
return [{'date': tweet['created_at'],
'text': html.unescape(tweet['full_text'].replace("\n", " "))} for tweet in tweets]
except TwythonAuthError:
raise RuntimeError("invalid API_KEY and/or API_SECRET") from None
except TwythonError:
return None
|
def classFactory(iface):
from LMConf import LMConf
return LMConf(iface)
|
import pandas as pd
import matplotlib.pyplot as plt
import string
class RestaurantGrades:
'''This class will analyze data of the grade of each restaurant in new york city over time
and generate relevant plots.'''
def __init__(self):
#load data
self.grades = load_data()
self.boroughs = ['BRONX', 'BROOKLYN', 'MANHATTAN', 'QUEENS', 'STATEN ISLAND']
#grade changes of five boroughs and nyc
self.boro_changes = []
self.total_change = 0
#number of restaurants of each grade over time
self.grades_counts = {'total':pd.DataFrame(index=range(2011,2016), columns=['A', 'B', 'C']).fillna(0)}
for b in self.boroughs:
self.grades_counts[b] = pd.DataFrame(index=range(2011,2016), columns=['A', 'B', 'C'])
def test_restaurant_grades(self, camis_id):
'''This function will get measure the grade change of the restaurant given camis.
Return 1 if the grade improved and -1 if the grade dropped, otherwise return 0.'''
restaurant = self.grades.loc[self.grades['CAMIS']==camis_id]
return test_grades(restaurant['GRADE'])
def grade_changes(self):
'''This function will measure grade changes in all restaurant in nyc.'''
#in each borough
for b in self.boroughs:
boro = self.grades.loc[self.grades['BORO']==b]
camis_ids = boro['CAMIS'].unique()
s = 0
for i in camis_ids:
s = s + self.test_restaurant_grades(i)
self.boro_changes.append(s)
#in nyc
self.total_change = sum(self.boro_changes)
def print_changes(self):
'''This function will print the grade change result to console.'''
print 'Change of all restaurants is ', self.total_change
print 'Changes in each borough are ', zip(self.boroughs,self.boro_changes)
def plot_grades(self):
'''This function will plot number of restaurants in each grade
in every borough over time by histogram. '''
self.grades_counts['total'].plot(kind='bar', alpha=0.5)
plt.title('NYC')
plt.savefig('grade_improvement_nyc.pdf')
for b in self.boroughs:
self.grades_counts[b].plot(kind='bar', alpha=0.5)
plt.title(b)
plt.savefig('grade_improvement_' + string.lower(b.split(' ', 1)[0]) + '.pdf')
def count_grades(self):
'''This function will summarize number of restaurants in each grade in every borough over time.'''
for b in self.boroughs:
for y in range(2011,2016):
counts = self.count_grades_by_year(b, y)
self.grades_counts[b].A[y] = counts['A']
self.grades_counts[b].B[y] = counts['B']
self.grades_counts[b].C[y] = counts['C']
self.grades_counts['total'].A[y] += counts['A']
self.grades_counts['total'].B[y] += counts['B']
self.grades_counts['total'].C[y] += counts['C']
def count_grades_by_year(self, boro, year):
'''This function will count number of restaurants in each grade in a given borough of a given year.'''
sub = self.grades[(self.grades.BORO==boro) & (self.grades.YEAR <= year)]
camis_ids = sub.CAMIS.unique()
counts = {'A':0, 'B':0, 'C':0}
for i in camis_ids:
counts[sub[sub.CAMIS==i].GRADE.values[0]] += 1
return counts
def load_data():
'''this function will load data and clean invalid entries'''
#load data
df = pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv', index_col=False,
dtype={'PHONE': str})
df = df[['CAMIS', 'BORO', 'GRADE', 'GRADE DATE']]
#remove rows with invalid Grade, Borough and GRADE DATE
df = df.loc[df['GRADE'].isin(['A', 'B', 'C'])]
df = df.loc[df['BORO'].isin(['BRONX', 'BROOKLYN', 'MANHATTAN', 'QUEENS', 'STATEN ISLAND'])]
df = df.dropna(subset=['GRADE DATE'])
df = df.drop_duplicates()
#extract year from GRADE DATE
df['YEAR'] = df['GRADE DATE'].apply(lambda date: int(date[-4:]))
return df
def test_grades(grade_list):
'''This function will measure the grade change in grade_list
return 1 if the grade improved and -1 if the grade dropped, otherwise return 0.'''
if len(grade_list) == 1:
return 0
else:
index = 0
for i, j in zip(grade_list, grade_list[1:]):
if i < j:
index += 1
elif i > j:
index -= 1
if index > 0:
return 1
elif index < 0:
return -1
else:
return 0
|
import sys
sys.path.insert(0, "D:\PythonProjectsDDrive\stylegan2-master")
import argparse
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import pretrained_networks
def generate_images(Gs, seeds, truncation_psi):
#noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):#[0][0]):
print('Generating image for seed %d/%d ...' % (seed_idx, len(seeds)))
#rnd = np.random.RandomState()
#tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(np.array([seed[0][0]]), None, **Gs_kwargs) # [minibatch, height, width, channel]
#images = Gs.run(np.array([seed]), None, **Gs_kwargs) # [minibatch, height, width, channel]
PIL.Image.fromarray(images[0], 'RGB').show()
vec4 = np.load("D:\PythonProjectsDDrive\stylegan2-master\\results\\00023-project-real-images\image0003-stepSeed15000.pk.npy", mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII')
sc = dnnlib.SubmitConfig()
sc.num_gpus = 1
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
sc.run_dir_root = "D:\PythonProjectsDDrive\stylegan2-master"
sc.run_desc = 'generate-images'
network_pkl = 'D:\PythonProjectsDDrive\stylegan2-master\TrainedGANs\\network-snapshot-018708.pkl'
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
vector_size = Gs.input_shape[1:][0]
generate_images(Gs, [vec4], truncation_psi=0.5) |
# -*- python -*-
class Call(object):
def __init__( self, id, name, phone_number, time, reason ):
self.id = id
self.name = name
self.phone_number = phone_number
self.time = time
self.reason = reason
def display( self ):
print "object Call:"
print " id:", self.id
print " name:", self.name
print " phone_number:", self.phone_number
print " time:", self.time
print " reason:", self.reason
|
from setuptools import setup, find_packages
setup(
name="publish",
version="0.2.1",
packages=find_packages(),
install_requires=["pyyaml", "cerberus", "jinja2"],
tests_require=["pytest"],
entry_points={
"console_scripts": [
"publish = publish:cli",
"publish-utils = publish.utils:cli",
]
},
)
|
#!/usr/bin/env python3
print("你好, World!");
|
from os import listdir
from os.path import isfile, join
pasta_input = "output\\"
pasta_output = "output_separado\\"
encoding = "utf8"
file_input = "output_raw.txt"
N = 21
for qtd in range(1, 26):
file_output = "output_5.2.05_" + str(qtd)+ ".txt"
count = [0] * N
with open(join(pasta_output, file_output), "w", encoding=encoding) as file_w:
with open(join(pasta_input, file_input), "r", encoding=encoding) as file_r:
lines = file_r.readlines()
for line in lines:
if line == '\n':
continue
line_split = line.split('_')
perg = int(line_split[0])
count[perg] += 1
if count[perg] <= qtd:
file_w.write(line)
|
from flask import request
def delete_links(id):
return {'message': 'ok' }
def share_links(id):
return {'messange': 'Link enviado para ' + request.form['destinatario'] + '!' }, 201
def get_links():
return {
"message": "Ok",
"links": []
}, 200
def save_link():
return {
"message": "OK"
}, 200
|
scina = int(input())
for _ in range(scina):
count = 0
i = 1
col_num = int(input())
colors = list(map(int, input().split()))
b = [colors[0], 0]
while i < len(colors):
if colors[i] == colors[i - 1]:
i += 1
elif colors[i] in b:
i += 1
continue
else:
j = 1
count += 1
while j + i < len(colors):
if colors[i + j] in b:
a = b.index(colors[i + j])
if a != 0:
b[0] = colors[i]
else: b[1] = colors[i]
break
j+=1
if i+j == len(colors):
b[1]=colors[i]
i+=1
print(count+1) |
from urllib.parse import parse_qs, urlencode, urljoin
from wsgiref.simple_server import (
WSGIRequestHandler as BaseWSGIRequestHandler,
make_server,
)
from wsgiref.util import setup_testing_defaults
import sys
from pymacaroons import Macaroon
import stacksmith
AUTHORIZE_ENDPOINT = urljoin(stacksmith.url, 'authn/oauth2/authorize')
REDIRECT_PORT = 8551
REDIRECT_URI = 'http://localhost:{port}'.format(port=REDIRECT_PORT)
TEMPLATE = """
<html>
<head>
<title>Stacksmith auth</title>
</head>
<body>
<p>{message}</p>
<script>setTimeout(window.close, 5000);</script>
</body>
</html>
"""
class AuthMacaroonHandler():
def __init__(self):
self.auth_macaroon = None
def extract_auth_macaroon(self, environ, start_response):
setup_testing_defaults(environ)
status = '200 OK'
headers = [('Content-type', 'text/html; charset=utf-8')]
start_response(status, headers)
query_string = parse_qs(environ.get('QUERY_STRING'))
if not query_string:
msg = 'No query string found, retry. Closing window...'
elif 'authMacaroon' not in query_string:
msg = 'No authMacaroon in query string, retry. Closing window...'
else:
msg = (
'Authenticated. Please switch back to your shell.'
' Closing window...')
self.auth_macaroon = Macaroon.deserialize(
query_string['authMacaroon'][0])
return [TEMPLATE.format(message=msg).encode()]
def get_auth_macaroon():
if stacksmith.auth_macaroon:
return Macaroon.deserialize(stacksmith.auth_macaroon)
# Strip default request logs from server.
class WSGIRequestHandler(BaseWSGIRequestHandler):
def log_request(self, code='-', size='-'):
pass
handler = AuthMacaroonHandler()
httpd = make_server(
'', REDIRECT_PORT, handler.extract_auth_macaroon,
handler_class=WSGIRequestHandler)
httpd.handle_request()
assert handler.auth_macaroon is not None, 'No Auth Macaroon found, retry.'
return handler.auth_macaroon
def main():
"""
Authenticate with Stacksmith and obtain an Authorization Macaroon.
"""
url = "{endpoint}?{args}".format(
endpoint=AUTHORIZE_ENDPOINT,
args=urlencode({'redirect_uri': REDIRECT_URI}))
print("Please open this URL in your browser: {url}\n"
.format(url=url), file=sys.stderr)
auth_macaroon = get_auth_macaroon()
print('Successfully authenticated. Execute the following in your shell '
'to add the authentication macaroon to your environment for further '
'samples: \n', file=sys.stderr)
print('export STACKSMITH_AUTH_MACAROON="{macaroon}"'.format(
macaroon=auth_macaroon.serialize()))
if __name__ == "__main__":
main()
|
class Base:
def __init__(self):
self.a = 5
class Derived1(Base):
def __init__(self):
super().__init__()
self.b = 7
class Derived2(Derived1):
def __init__(self):
super().__init__()
self.c = 10
def sum(self):
result = self.a + self.b + self.c
return result
@staticmethod
def display(number):
print("the sum is {0}".format(number))
def main():
prabalobj = Derived2()
result = prabalobj.sum()
Derived2.display(result)
if __name__ == "__main__":
main() |
"""
This module lets you practice:
-- ITERATING (i.e. LOOPING) thru a SEQUENCE
-- Using OBJECTS
-- DEFINING functions
-- CALLING functions
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Muqing Zheng. October 2015.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import math
def main():
""" Calls the TEST functions in this module. """
test_generate_points_on_circle()
test_draw_points_on_circle()
test_pizza()
test_polygon()
test_fancy_polygon()
def test_generate_points_on_circle():
""" Tests the generate_points_on_circle function. """
# ------------------------------------------------------------------
# TODO: 2. Implement this TEST function.
# It TESTS the generate_points_on_circle function defined below.
# Include at least ** 2 ** tests (we wrote one for you).
#
# Use the same 4-step process as for previous TEST functions:
# Step 1: Read the green doc-string (below) that provides the
# specification of the function you are to test.
# Understand what that function SHOULD return.
#
# Step 2: Pick a test case: numbers that you could send as
# actual arguments to the function.
#
# Step 3: Figure out (by hand, or by trusting a test case that
# your instructor provided) the CORRECT (EXPECTED) answer
# for your test case.
#
# Step 4: Write code that prints both the EXPECTED answer
# and the ACTUAL answer returned when you call the function.
# Follow the same form as in the test case we provided below.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the generate_points_on_circle function:')
print('--------------------------------------------------')
# Test 1:
expected = [(125.0, 50.0), # The answer should be a list
(112.5, 71.7), # of rg.Points with these coordinates.
(87.5, 71.7), # All numbers are approximate.
(75.0, 50.0),
(87.5, 28.3),
(112.5, 28.3)]
circle = rg.Circle(rg.Point(100.0, 50.0), 25.0)
answer = generate_points_on_circle(circle, 6)
print('Expected is:', expected)
print('Actual is: ', answer)
# Test 2: (YOU write THIS test)
expected = [(220.0, 200.0), # The answer should be a list
(200, 220), # of rg.Points with these coordinates.
(180, 200), # All numbers are approximate.
(200, 180)]
circle = rg.Circle(rg.Point(200.0, 200.0), 20.0)
answer = generate_points_on_circle(circle, 4)
print('Expected is:', expected)
print('Actual is: ', answer)
def generate_points_on_circle(circle_for_points,
number_of_points_to_generate):
"""
Returns a list containing the given number of rg.Points,
where the rg.Points:
-- all lie on the circumference of the given rg.Circle,
-- are equally distant from each other, and
-- go clockwise around the circumference of the given rg.Circle,
starting at the rightmost point on the rg.Circle.
See the 'draw_points_on_circle' pictures in the pizza.pdf
file attached, with the points shown on those pictures.
Preconditions:
:type circle: rg.Circle
:type number_of_points_to_generate: int that is nonnegative.
"""
# ----------------------------------------------------------------------
# Students: This function is ALREADY DONE.
# You MUST use (call) this generate_points_on_circle function
# in the exercises that follow.
# Do NOT modify this function or add to it.
# ----------------------------------------------------------------------
radius = circle_for_points.radius
center_x = circle_for_points.center.x
center_y = circle_for_points.center.y
# ------------------------------------------------------------------
# Each point is delta_degrees from the previous point,
# along the circumference of the given circle.
# ------------------------------------------------------------------
delta_degrees = 360 / number_of_points_to_generate
points = []
degrees = 0
for _ in range(number_of_points_to_generate):
# --------------------------------------------------------------
# Compute x and y of the point on the circumference of the
# circle by using a polar representation.
# --------------------------------------------------------------
angle = math.radians(degrees)
x = radius * math.cos(angle) + center_x
y = radius * math.sin(angle) + center_y
# --------------------------------------------------------------
# Construct the point and append it to the list.
# --------------------------------------------------------------
point_on_circumference = rg.Point(x, y)
points.append(point_on_circumference)
# --------------------------------------------------------------
# The next point will be delta_degrees from this point,
# along the circumference of the given circle.
# --------------------------------------------------------------
degrees = degrees + delta_degrees
return points
def test_draw_points_on_circle():
""" Tests the draw_points_on_circle function. """
# ------------------------------------------------------------------
# TODO: 3. Implement this TEST function.
# It TESTS the draw_points_on_circle function defined below.
# Include at least ** 4 ** tests (we wrote three for you).
#
# Use the same 4-step process as for previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the draw_points_on_circle function:')
print('See the windows that pop up.')
print('--------------------------------------------------')
# Test 1:
title = 'DRAW_POINTS_ON_CIRCLE, test 1: 7 yellow dots.'
window = rg.RoseWindow(400, 400, title)
circle = rg.Circle(rg.Point(200, 200), 150)
draw_points_on_circle(window, circle, 7, 'yellow')
window.close_on_mouse_click()
# Tests 2 and 3 (on the same window):
title = 'Tests 2 and 3: 6 blue on deep pink circle; 10 green1'
window = rg.RoseWindow(400, 400, title)
circle = rg.Circle(rg.Point(125, 125), 50)
circle.fill_color = 'deep pink'
draw_points_on_circle(window, circle, 6, 'blue')
window.continue_on_mouse_click()
circle = rg.Circle(rg.Point(200, 200), 100)
draw_points_on_circle(window, circle, 10, 'green1')
window.close_on_mouse_click()
# Test 4: (YOU write THIS test)
title = 'DRAW_POINTS_ON_CIRCLE, test 4: 12 gray dots.'
window = rg.RoseWindow(400, 400, title)
circle = rg.Circle(rg.Point(200, 200), 150)
draw_points_on_circle(window, circle, 100, 'gray')
window.close_on_mouse_click()
def draw_points_on_circle(window, circle, number_of_points, color):
"""
See the 'draw_points_on_circle' pictures in pizza.pdf in this
project; they may help you better understand the following
specification:
1. Attaches the given rg.Circle to the given rg.RoseWindow.
2. Generates (constructs) the given number of rg.Point objects
on the given rg.Circle's circumference,
spaced equally from each other.
3. For each of those rg.Point objects:
a. Constructs an rg.Circle centered at that point,
filled with the given color and with a radius of 10.
b. Attaches the new rg.Circle to the given rg.RoseWindow.
c. Attaches the rg.Point object to the given rg.RoseWindow.
4. Renders the given rg.RoseWindow.
Note that the rg.Point objects will generally be visible
since each is attached AFTER its corresponding rg.Circle object,
hence on TOP of its corresponding rg.Circle object.
Pre-conditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type number_of_points: int that is nonnegative
:type color: (str, rg.Color) that RoseGraphics understands
"""
# ------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# You MUST USE (call) the generate_points_on_circle
# (defined above) to generate the points to draw.
#
# Your professor may do this exercise with you as "live coding".
# ------------------------------------------------------------------
circle.attach_to(window)
for k in generate_points_on_circle(circle, number_of_points):
cc = rg.Circle(k, 10)
cc.fill_color = color
cc.attach_to(window)
k.attach_to(window)
window.render()
def test_pizza():
""" Tests the pizza function. """
# ------------------------------------------------------------------
# TODO: 5. Implement this TEST function.
# It TESTS the pizza function defined below.
# Include at least ** 4 ** tests (we wrote three for you).
#
# Use the same 4-step process as for previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the pizza function:')
print('See the windows that pop up.')
print('--------------------------------------------------')
# Test 1:
title = 'PIZZA test 1: 5 slices, thin (thickness=3) blue lines'
window = rg.RoseWindow(400, 400, title)
circle = rg.Circle(rg.Point(200, 200), 150)
pizza(window, circle, 5, 'blue', 3)
window.close_on_mouse_click()
# Tests 2 and 3 (on the same window):
title = 'PIZZA tests 2 + 3: 8 white slices on purple circle;'
title = title + ' 100 black on yellow'
window = rg.RoseWindow(500, 400, title)
circle = rg.Circle(rg.Point(125, 125), 50)
circle.fill_color = 'purple'
pizza(window, circle, 8, 'white', 5)
window.continue_on_mouse_click()
circle = rg.Circle(rg.Point(300, 200), 100)
circle.fill_color = 'blue'
pizza(window, circle, 20, 'green1', 3)
window.close_on_mouse_click()
# Test 4: (YOU write THIS test)
# SUGGESTION: You might enjoy:
# -- a large number of thin black lines
# -- on a yellow-filled circle.
title = 'PIZZA test 4: 200 slices, thin (thickness=1) black lines'
window = rg.RoseWindow(400, 400, title)
circle = rg.Circle(rg.Point(200, 200), 150)
circle.fill_color = 'yellow'
pizza(window, circle, 200, 'black', 1)
window.close_on_mouse_click()
def pizza(window, circle, number_of_slices, color, thickness):
"""
See the 'pizza' pictures in pizza.pdf in this project;
they may help you better understand the following specification:
1. Draws the given rg.Circle in the given rg.RoseWindow.
2. Constructs and draws rg.Line objects to make the picture
look like a 'pizza pie' cut into the given number of 'slices'.
Each line has the given color and thickness (width).
Pre-conditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type number_of_slices: int that is at least 2
:type color: (str, rg.Color) that RoseGraphics understands
:type int that is positive and not more than about 100
"""
# ------------------------------------------------------------------
# TODO: 6. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# You MUST USE (call) the generate_points_on_circle
# (defined above) to generate the relevant points,
# and then draw lines that are based in part on those points.
# ------------------------------------------------------------------
circle.attach_to(window)
for k in generate_points_on_circle(circle, number_of_slices):
ll = rg.Line(circle.center, k)
ll.color = color
ll.thickness = thickness
ll.attach_to(window)
window.render()
def test_polygon():
""" Tests the polygon function. """
# ------------------------------------------------------------------
# TODO: 7. Implement this TEST function.
# It TESTS the polygon function defined below.
# Include at least ** 3 ** tests (we wrote two for you).
#
# Use the same 4-step process as for previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the polygon function:')
print('See the windows that pop up.')
print('--------------------------------------------------')
# Tests 1 and 2 (on the same window):
title = 'POLYGON tests: 3 segments, thick blue lines;'
title = title + ' 6 with medium red lines'
window = rg.RoseWindow(500, 400, title)
circle = rg.Circle(rg.Point(100, 100), 80)
polygon(window, circle, 3, 'blue', 10)
window.continue_on_mouse_click()
circle = rg.Circle(rg.Point(300, 200), 150)
polygon(window, circle, 6, 'red', 5)
window.close_on_mouse_click()
# Test 3: (YOU write THIS test)
title = 'POLYGON test4: 4 segments, thick black lines;'
title = title + ' on green cicle'
window = rg.RoseWindow(500, 400, title)
circle = rg.Circle(rg.Point(100, 100), 80)
circle.fill_color = 'green'
polygon(window, circle, 4, 'black', 5)
window.continue_on_mouse_click()
window.close_on_mouse_click()
def polygon(window, circle, number_of_segments, color, thickness):
"""
See the 'polygon' pictures in pizza.pdf in this project;
they may help you better understand the following specification:
1. Draws the given rg.Circle in the given rg.RoseWindow.
2. Constructs and draws rg.Line objects that form a regular polygon
with the given number of segments, inscribed in the rg.Circle.
Each line has the given color and thickness (width).
Pre-conditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type number_of_segments: int that is at least 3
:type color: (str, rg.Color) that RoseGraphics understands
:type int that is positive and not more than about 100
"""
# ------------------------------------------------------------------
# TODO: 8. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# You MUST USE (call) the generate_points_on_circle
# (defined above) to generate the relevant points,
# and then draw lines that are based in part on those points.
# ------------------------------------------------------------------
circle.attach_to(window)
list = generate_points_on_circle(circle, number_of_segments)
for k in range(len(list)):
ll = rg.Line(list[k], list[k - 1])
ll.color = color
ll.thickness = thickness
ll.attach_to(window)
window.render()
def test_fancy_polygon():
""" Tests the fancy_polygon function. """
# ------------------------------------------------------------------
# TODO: 9. Implement this TEST function.
# It TESTS the fancy_polygon function defined below.
# Include at least 2 tests, i.e., 2 calls to the function to test.
#
# Indeed, try a variety of tests to get some really cool pictures.
# Some that I especially like are:
# -- 20 segments, hops of length 7
# -- 51 segments, hops of length 25
# -- 300 segments, hops of length 61
#
# Use the same 4-step process as for previous TEST functions.
# ------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the fancy_polygon function:')
print('See the windows that pop up.')
print('--------------------------------------------------')
# Test1
window = rg.RoseWindow(400, 400)
circle = rg.Circle(rg.Point(200, 200), 180)
circle.fill_color = 'yellow'
fancy_polygon(window, circle, 300, 61, 'black', 1)
window.close_on_mouse_click()
# Test2
window = rg.RoseWindow(400, 400)
circle = rg.Circle(rg.Point(200, 200), 180)
circle.fill_color = 'blue'
fancy_polygon(window, circle, 20, 5, 'yellow', 3)
window.close_on_mouse_click()
def fancy_polygon(window, circle, number_of_segments,
hops_to_next_point, color, thickness):
"""
See the 'fancy_polygon' pictures in pizza.pdf in this project;
they may help you better understand the following specification:
1. Draws the given rg.Circle in the given rg.RoseWindow.
2. Constructs and draws rg.Line objects to make the picture
look like an inscribed regular polygon with the given
number of segments, but with each rg.Line going from one point
on the given rg.Circle to the point on the given rg.Circle
that is the given number of 'hops' away (wrapping as needed).
Each line has the given color and thickness.
For example, if hops_to_next_point is 1,
then the picture is a regular polygon.
Or, if hops_to_next_point is 2, the lines go:
-- from point 0 to point 2
-- from point 1 to point 3
-- from point 2 to point 4
-- etc.
One more example:
if hops_to_next_point is 3 and number_of_segments is 5,
then the lines go:
-- from point 0 to point 3
-- from point 1 to point 4
-- from point 2 to point 0 (note the 'wrap' effect)
-- from point 3 to point 1
-- from point 4 to point 2
Pre-conditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type number_of_segments: int that is at least 3
:type hops_to_next_point: int that is at least 1
:type color: (str, rg.Color) that RoseGraphics understands
:type int that is positive and not more than about 100
"""
# ------------------------------------------------------------------
# TODO: 10. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# IMPLEMENTATION REQUIREMENT:
# You MUST USE (call) the generate_points_on_circle
# (defined above) to generate the relevant points,
# and then draw lines that are based in part on those points.
#
# HINT: One way to do "wrapping" is to use the % operator
# appropriately. THIS REQUIRES SOME THOUGHT and perhaps
# an example - ASK AS NEEDED.
# ------------------------------------------------------------------
circle.attach_to(window)
list = generate_points_on_circle(circle, number_of_segments)
for k in range(len(list)):
ll = rg.Line(list[k], list[k - hops_to_next_point])
ll.color = color
ll.thickness = thickness
ll.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
import random
import point
import target as tg
"Checks if path generated is valid"
def check_if_path_correct(ordered_nexts, targets, radius):
last = float("inf")
for i in ordered_nexts:
temp = [j for j in targets if i.euclidean_distance(j) <= radius]
if len(temp) > last:
return False
else:
for j in temp:
targets.remove(j)
last = len(temp)
return True
"Generates randomized targets"
def generate_targets(number_targets):
file = open("targets.txt", "w")
for i in range(number_targets):
file.write(str(random.random() * 360) + " " + str((random.random() * 180) - 90) + "\n")
"Runs Randomized Testing With number_targets random targets and radius radius"
def run_test(radius, number_targets):
generate_targets(50)
file = open("targets.txt", "r")
targets = []
for i in file:
target = i.split(" ")
target[1] = target[1][:-1]
targets.append(point.Point(float(target[0]), float(target[1])))
for i in targets:
for j in targets:
if i != j:
i.add_target_if_fov(radius, j)
path = tg.get_optimal_path(tg.get_nodes(radius, targets))
|
#!/usr/bin/python
import os
import pickle
import sys
import subprocess
import Queue
import threading
import getopt
import copy
import string
import glob
import csv
import re
import random
def usage():
print "\n-----------------------------------------------------------------"
print "Usage: "
print " sifter_run.py [options] <queries_prep_folder> <results_folder>"
print "-----------------------------------------------------------------\n"
print "Examples:"
print " sifter_run.py ../example/queries ../example/results\n"
print " sifter_run.py -n 10 ../example/queries ../example/results\n"
print " sifter_run.py -e PF12491,PF13820 ../example/queries ../example/results\n"
print " sifter_run.py -f PF12491,PF13820 ../example/queries ../example/results\n"
print " sifter_run.py -n 1 --if ../example/family_list.txt ../example/queries ../example/results\n"
print "This function runs SIFTER on the prepared files generated by 'sifter_prepare.py'."
print "@author Sayed Mohammad Ebrahim Sahraeian (mohammad@compbio.berkeley.edu)"
print "Please cite new paper:"
print "-Sahraeian SME, Luo KR, Brenner SE (2015)"
print "\nThe SIFTER algorithm presented in the following paper:"
print "- Engelhardt BE, Jordan MI, Srouji JR, Brenner SE. 2011. Genome-scale phylogenetic function annotation of large and diverse protein families. Genome Research 21:1969-1980. \n"
print "inputs:"
print " <queries_prep_folder> Path to the queries preperation."
print " folder. The folder should contain the"
print " necessary query files (generated"
print " by 'sifter_prepare.py' script). Use"
print " the same directory used as output"
print " in 'sifter_prepare.py' script"
print " <results_folder> Path to the output folder where"
print " results will be written to."
print "options:"
print " -n INT Number of threads (Default=4)"
print " -e STRING List of families for which you want"
print " to exclude running SIFTER on."
print " (in comma seperated format)"
print " --ie STRING Path to the input file where the"
print " list of families for which you"
print " want to exclude running SIFTER"
print " on."
print " -f STRING List of families for which you want"
print " to run SIFTER on."
print " (in comma seperated format)"
print " If not provided, SIFTER will run on"
print " all families in queries_prep_folder"
print " --if STRING Path to the input file where the"
print " list of families for which you"
print " want to run SIFTER on."
print " If not provided, SIFTER will run on"
print " all families in queries_prep_folder"
print " -t INT Number of functions to truncate"
print " to in approximation [Default:"
print " Automatically computed in sifter_prepare.py]"
print " Smaller value leads to faster running time."
print " -h Help. Print Usage."
def write_goa_anns_to_pli_constrained(evidence_file,goa_anns, fam_id, seq_lookup,evidence_constraints):
'''
This converts the database rows to B. Engelhardt's arbitrary evidence XML foramt.
Input looks like:
{'A2VE79': [{'acc': 'GO:0000287',
'code': 'ISS',
'full_name': 'Diphosphoinositol polyphosphate phosphohydrolase 1',
'genus': 'Bos',
'is_not': 0L,
'name': 'magnesium ion binding',
'species': 'taurus',
'symbol': 'NUDT3',
'xref_dbname': 'UniProtKB',
'xref_key': 'A2VE79'},
{'acc': 'GO:0008486',
'code': 'ISS',
'full_name': 'Diphosphoinositol polyphosphate phosphohydrolase 1',
'genus': 'Bos',
'is_not': 0L,
'name': 'diphosphoinositol-polyphosphate diphosphatase activity',
'species': 'taurus',
'symbol': 'NUDT3',
'xref_dbname': 'UniProtKB',
'xref_key': 'A2VE79'},
...
'''
f = open(evidence_file, 'w')
f.write("<?xml version=\"1.0\"?>\n<Family>\n")
f.write(" <FamilyID>%s</FamilyID>\n"%fam_id)
for p_id, anns in goa_anns.iteritems():
filtered_anns=[w for w in anns if w['code'] in evidence_constraints]
if not filtered_anns:
continue
f.write(" <Protein>\n")
f.write(" <ProteinName>%s</ProteinName>\n"%seq_lookup[p_id])
f.write(" <ProteinNumber>%s</ProteinNumber>\n"%p_id)
go_str = ''
moc_str = ''
for i,a in enumerate(filtered_anns):
go_str += a['acc'][3:]
moc_str += a['code']
if i < len(filtered_anns)-1:
go_str += ', '
moc_str += ', '
f.write(" <GONumber>%s</GONumber>\n"%('['+go_str+']'))
f.write(" <MOC>%s</MOC>\n"%('['+moc_str+']'))
f.write(" </Protein>\n")
f.write("</Family>\n")
f.close()
class ProcessingThread(threading.Thread):
"""Thread for running sequence alignments on a given input homolog cluster."""
def __init__(self, thread_queue):
threading.Thread.__init__(self)
self.thread_queue = thread_queue
def thread_operation(self, thread_data):
query_data, output_prefix,pfam_id = thread_data
self.output_prefix = output_prefix
try:
print "--------------------------------------------------"
print "Running SIFTER for ",query_data['pfam_id'],':',pfam_id
# Input evidence
rand_id_1=random.randint(1000000,9999999)
rand_id_2=random.randint(1000000,9999999)
evidence_file = query_data['annotation_loc']
evidence_file_pickle = query_data['annotation_loc_pickle']
evidence_format = query_data['annotation_format']
evidence_constraints = query_data['evidence_constraints']
tree_file = query_data['tree_loc']
if os.path.exists(tree_file+'.gz'):
if os.path.exists('%s.%d'%(tree_file,rand_id_1)):
subprocess.check_call("rm %s.%d"%(tree_file,rand_id_1),shell=True)
subprocess.check_call("gunzip -c %s.gz > %s.%d"%(tree_file,tree_file,rand_id_1),shell=True)
if os.path.exists(evidence_file_pickle+'.gz'):
if os.path.exists('%s.%d'%(evidence_file,rand_id_2)):
subprocess.check_call("rm %s.%d"%(evidence_file,rand_id_2),shell=True)
if os.path.exists('%s.%d'%(evidence_file_pickle,rand_id_2)):
subprocess.check_call("rm %s.%d"%(evidence_file_pickle,rand_id_2),shell=True)
subprocess.check_call("gunzip -c %s.gz > %s.%d"%(evidence_file_pickle,evidence_file_pickle,rand_id_2),shell=True)
[ee,anns, p, seq_lookup]=pickle.load(open("%s.%d"%(evidence_file_pickle,rand_id_2), 'r'))
write_goa_anns_to_pli_constrained("%s.%d"%(evidence_file,rand_id_2),anns, p, seq_lookup,evidence_constraints)
pfam_id = query_data['pfam_id']
n_terms=query_data['n_terms']
if query_data['enforced_trunc']>0:
max_simul_fcns=query_data['enforced_trunc']
else:
max_simul_fcns=query_data['e_time'][0]
print "n_terms :",n_terms
print "truncation level :",max_simul_fcns
print "Estimated running time for family %s = %s (95%% confidence upper bound = %s)"%(pfam_id,query_data['e_time'][4],query_data['e_time'][5])
evidence_str = ''
for ev_type in evidence_constraints:
evidence_str = evidence_str + "--with-" + string.lower(ev_type) + " "
go_ontology_sqlite = main_dir+'/data/goterms.sqlite'
java_sifter_cmd = \
"java -jar -Xmx4g " + sifter_java + " " +\
"--generate " + " " +\
evidence_str + " " +\
"--protein " + evidence_file+'.%d'%rand_id_2+ " " +\
"--reconciled " + tree_file+'.%d'%rand_id_1 + " " +\
"--ontology " + go_ontology_sqlite + " " +\
"--output " + self.output_prefix+'_result.txt ' +\
pfam_id
# Copy xml into the "input directory" of the new evaluation directory
print "[aphylo] Executing the following Command:\n"
print java_sifter_cmd;
exec_command = java_sifter_cmd
#print "Running: "+exec_command
retcode = subprocess.call(exec_command, shell=True)
if (retcode == 0):
print "Success: Ran generation step.\n"
else:
print "Error: Sifter failed to generate.\n"
raise Exception
java_sifter_cmd = \
"java -jar -Xmx4g " + sifter_java + " " +\
evidence_str + " " +\
"--protein " + evidence_file+'.%d'%rand_id_2 + " " +\
"--reconciled " + tree_file+'.%d'%rand_id_1 + " " +\
"--ontology " + go_ontology_sqlite + " " +\
"--output " + self.output_prefix +'_result.txt ' +\
"--familyfile " + self.output_prefix[:-len(pfam_id)] + "/infer-" + pfam_id + ".fx " +\
"--scale " + self.output_prefix[:-len(pfam_id)] + "/scale-" + pfam_id + ".fx " +\
"--alpha " + self.output_prefix[:-len(pfam_id)] + "/alpha-" + pfam_id + ".fx " +\
("--truncation %d "%max_simul_fcns) +\
"--xvalidation --folds 0 " +\
pfam_id
print "[aphylo] Executing the following command:\n"
print java_sifter_cmd
exec_command = java_sifter_cmd
#print "Running: "+exec_command
retcode = subprocess.call(exec_command, shell=True)
if (retcode == 0):
print "Success: Ran inference step.\n"
else:
print "Error: Sifter failed to run inference.\n"
res_file=self.output_prefix+'_result.txt'
if os.path.exists(res_file):
results={}
data = list(csv.reader(open(res_file, 'rb'), delimiter='\t'))
if data:
candids = []
store_data = {}
candids=data[0][1:]
for line in data[1:]:
gene=line[0].split('/')[0]
if gene not in results:
results[gene]=[]
if len(line[0].split('/'))>1:
results[gene].append([line[0].split('/')[1],[float(w) for w in line[1:]]])
else:
results[gene].append(['',[float(w) for w in line[1:]]])
res_pickle_file=self.output_prefix+'_result.pickle'
pickle.dump({'results':results,'candids':candids},open(res_pickle_file,'w'))
if os.path.exists('%s.%d'%(evidence_file,rand_id_2)):
subprocess.check_call("rm %s.%d"%(evidence_file,rand_id_2),shell=True)
if os.path.exists('%s.%d'%(evidence_file_pickle,rand_id_2)):
subprocess.check_call("rm %s.%d"%(evidence_file_pickle,rand_id_2),shell=True)
if os.path.exists('%s.%d'%(tree_file,rand_id_1)):
subprocess.check_call("rm %s.%d"%(tree_file,rand_id_1),shell=True)
print "Query complete for ", pfam_id
except Exception as e:
print >> sys.stderr, "Error evaluating %s"%query_data['pfam_id']
print >> sys.stderr, "Error: ", e
exit(1)
def flag_start(self):
f = open(self.output_prefix + ".sifterj.processing", 'w')
f.close()
def unflag_start(self):
os.remove(self.output_prefix + ".sifterj.processing")
def flag_finish(self):
#self.unflag_start()
f = open(self.output_prefix + ".sifterj.processed", 'w')
f.close()
def run(self):
while True:
# Spawn a thread with data from the queue
thread_data = self.thread_queue.get()
self.output_prefix = thread_data[1]
# Run thread's function on the data
try:
self.flag_start()
self.thread_operation(thread_data)
self.flag_finish()
except:
self.unflag_start()
print "Unexpected thread error:", sys.exc_info()[0]
print "Thread data:", thread_data
# Send signal that this task finished
self.thread_queue.task_done()
if __name__=="__main__":
# Initialization
main_dir=os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sifter_java=os.path.dirname(main_dir)+'/core/sifter2.1.1.jar'
if not os.path.exists(sifter_java):
print "\nERROR: No SIFTER jar file exists at %s\n"%sifter_java
sys.exit()
num_threads=4
exclude_fams=[]
# Check for options
exclude_fams=[]
only_fams=[]
opts, args = getopt.getopt(sys.argv[1:], "hn:e:f:t:",['ie=','if='])
truncation_level=0
if len(args) != 2:
usage()
sys.exit()
if len(opts)>0:
for o, a in opts:
if o == "-n":
num_threads=int(a)
elif o == "-e":
splited =a.strip().split(',')
exclude_fams.extend(list(set([w for w in splited if w])))
elif o == "--ie":
x_input_file = a
if not os.path.exists(x_input_file):
print "\nERROR: No file exists at %s\n"%x_input_file
sys.exit()
f = open(x_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
exclude_fams.extend(list(set([w for w in splited if w])))
elif o == "-f":
splited =a.strip().split(',')
only_fams.extend(list(set([w for w in splited if w])))
elif o == "--if":
i_input_file = a
if not os.path.exists(i_input_file):
print "\nERROR: No file exists at %s\n"%i_input_file
sys.exit()
f = open(i_input_file, 'r')
a=f.read()
splited =re.split(' |,|;|\n',a.strip())
only_fams.extend(list(set([w for w in splited if w])))
elif o == "-t":
truncation_level=int(a)
else:
usage()
sys.exit()
queries_folder=args[0]
if not os.path.exists(queries_folder):
print "\nERROR: Queries preperation folder ( %s ) does not exist\n"%queries_folder
print "Please use the same directory used as output in 'sifter_prepare.py'"
sys.exit()
results_folder=args[1]
if not os.path.exists(results_folder):
os.mkdir(results_folder)
print "\n-----------Reading the queries information-------------\n"
query_files=glob.glob(queries_folder+"/*_query.pickle")
queries_to_process = []
for qfile in query_files:
pfam_id=qfile.split('/')[-1].split('_')[0]
if only_fams:
if pfam_id not in only_fams:
continue
if pfam_id in exclude_fams:
print "Excludeing family %s"%pfam_id
continue
output_prefix = results_folder + '/' + pfam_id
if not(os.path.isfile(output_prefix + "_result.pickle")):
query_data = pickle.load(open(qfile, "rb" ))
query_data['output_to'] = output_prefix
query_data['enforced_trunc']=truncation_level
queries_to_process.append((copy.deepcopy(query_data), output_prefix,pfam_id))
else:
print "Predictions have already been made for %s"%pfam_id
print "\n-------------------Running SIFTER-----------------------\n"
thread_queue = Queue.Queue()
for i in range(num_threads):
t = ProcessingThread(thread_queue)
t.setDaemon(True)
t.start()
for thread_data in queries_to_process:
thread_queue.put(item=thread_data, block=False)
# Wait on the queue until everything has been processed
thread_queue.join()
errors=0
qs=0
all_qs=[]
for w in queries_to_process:
pfam_id=w[2]
all_qs.append(pfam_id)
if pfam_id in exclude_fams:
continue
if only_fams:
if pfam_id not in only_fams:
continue
qs+=1
res_pickle_file=w[1]+'_result.pickle'
if not (os.path.isfile(res_pickle_file)):
errors+=1
if only_fams:
all_qs=only_fams
all_qs=set(all_qs)-set(exclude_fams)
n_qs=len(all_qs)-qs
succus=qs-errors
print "\nSIFTER results are ready for %d out of %d families. (%s missed due to errors, %s missed due to lack of query files)"%(succus,len(all_qs),n_qs,errors)
if succus>0:
print "-------------------Runnig SIFTER is Done----------------------"
print "You may extract results for specific queries using 'sifter_extract.py'.\n"
|
from __future__ import print_function # Helper class to convert a DynamoDB item to JSON.
import boto3
import json
import decimal
import movies_common
dynamodb = boto3.resource('dynamodb')
# dynamodb = boto3.resource('dynamodb', region_name='ap-northeast-1', endpoint_url='http://localhost:4569')
table = dynamodb.Table('Movies')
title = 'The ほげほげ'
year = 2017
response = table.update_item(
Key={
'year': year,
'title': title,
},
UpdateExpression='set info.rating = info.rating + :val',
ExpressionAttributeValues={
':val': decimal.Decimal(1),
},
ReturnValues='UPDATED_NEW',
)
print('UpdateItem succeeded:')
print(json.dumps(response, indent=4, ensure_ascii=False, cls=movies_common.DecimalEncoder))
|
# ======================
# Functions for ingesting and cleaning Landsat geotiff
# ======================
# import libraries
import numpy as np
import rasterio
# ======================
# Function to read in vegetation index images and create np image stack
# ======================
def create_image_stack(files, extension):
"""
This function creates an image stack from the geotiff files given,
and adds a year element
Args:
files (list): list of file paths
extension (string): 'NDVI', 'NBR', 'SAVI'
Returns:
image_stack (numpy array): numpy ndarray
year_list (list): list of years in image stack
stack_depth (int): depth of image stack
meta (dict): meta data for raster file
bounds: bounds for raster file
"""
# get year list
if extension == 'NBR':
year_list = [int(i[-16:-12]) for i in files]
elif extension == 'NDVI' or extension == 'SAVI':
year_list = [int(i[-17:-13]) for i in files]
stack_depth = len(year_list)
# get image shape
with rasterio.open(files[0]) as f:
meta = f.meta
bounds = f.bounds
image_get_shape = f.read(1)
height, width = image_get_shape.shape
# create empty np array for image stack
image_stack = np.empty([height, width, stack_depth])
print('empty image stack shape: ', image_stack.shape)
# create image stack
for i, file in enumerate(files):
with rasterio.open(file) as f:
image = f.read(1)
image_stack[:, :, i] = image
print('Finished reading and creating raw image stack...')
return image_stack, year_list, stack_depth, meta, bounds
# ======================
# Function to add missing years of np.nan arrays to original image stack
# ======================
def add_missing_years(image_stack, year_list):
"""
This function adds missing years as np.nan arrays to the original images stack
to ensure that image stack depth covers the time period
Args:
image_stack (numpy array): ndarray stack of yearly vegetation indices
year_list (list): list of years in image stack (potentially missing some years)
Returns:
actual_arr (numpy array): ndarray stack of yearly vegetation indices without any missing years;
missing years have been filled as np.nan arrays
"""
# full list of years with no missing values
yearly_years = [year for year in range(year_list[0], year_list[-1] + 1)]
actual_depth = len(yearly_years)
height, width, depth = image_stack.shape
actual_arr = np.full([height, width, actual_depth], np.nan)
img_stack_index = 0
for i in range(len(yearly_years)):
if yearly_years[i] in year_list:
actual_arr[:, :, i] = image_stack[:, :, img_stack_index]
img_stack_index += 1
print("Added missing years to image stack...")
return actual_arr
# ======================
# DATA CLEANING: Create valid pixel mask to label if pixel is valid or not
# Check for poor quality data; missing data is recorded in numpy array as nan
# ======================
# count number of nans for each pixel over the years
def clean_data(full_pix_arr, valid_num=20):
"""
This function creates a mask to label is a pixel is considered "valid" or not.
Pixels are considered valid if the number of nans (valid_num) is <20, i.e., if there at least
19 data points (i.e., half of the full 38)
Args:
full_pix_arr (numpy array): full np array of yearly VIs without missing years
valid_num (int): at least half of the total time period (total number of years);
default for Unzen volcano is 20 (total time period = 38 years)
Returns:
val_pix_reshaped (numpy array): mask of valid pixels
"""
bool_mask = np.isnan(full_pix_arr)
nan_per_pixel = np.count_nonzero((bool_mask), axis=2)
val_pix = nan_per_pixel < valid_num
# if pixel is valid, bool = True
# plt.imshow(val_pix)
# reshape pixel array to concatenate with reshaped base array
shp = val_pix.shape
val_pix_reshaped = val_pix.reshape(shp[0]*shp[1], 1)
print('reshaped shape of valid pixel mask: ', val_pix_reshaped.shape)
print('Created mask of valid pixels...')
return val_pix_reshaped
# ======================
# Function to reshape image stack to apply regression
# ======================
def reshape_image_stack(image_stack, year_list):
"""
Function to reshape image stack into 2D array, then concatenate with year array
along axis = 2 to create a final 3D np array
Args:
image_stack (ndarray): full VI image stack without missing years
year_list (list): list of years from original data set
Returns:
base_array: 3D ndarray
"""
# full list of years with no missing values
yearly_years = [year for year in range(year_list[0], year_list[-1] + 1)]
# create empty base array
print('START: reshaping data...')
row, col, stack_depth = image_stack.shape
print('row, col: ',row, col)
base_array = np.empty([row*col, stack_depth])
print('base array shape: ', base_array.shape)
for i in range(stack_depth):
img = image_stack[:, :, i]
img_reshape = img.reshape((row*col))
base_array[:, i] = img_reshape
# add years array
years_array = np.array(yearly_years)
years_array_reshaped = years_array.reshape((1, years_array.shape[0]))
# reshape years array to combine with base array along axis=2
shp = base_array.shape
base_years_array = np.repeat(years_array_reshaped, shp[0], axis = 0).reshape((shp[0], shp[1], 1))
base_array = base_array.reshape((shp[0], shp[1], 1))
# concatenate years to data
base_array = np.concatenate((base_years_array, base_array), axis=2)
# print('max and min veg: ', max_veg, min_veg)
print('FINISHED: shape of reshaped array that is returned: ', base_array.shape)
return base_array
# ======================
# function1: classify whether pixel is disturbed or not. Log reg fitted only to disturbed pixels
# function2: combine disturbed and nan filters
# function3: filter original image stack by combined filters to produce valid and disturbed image stack
# Filter pixels by validity array and disturbed pixel array
# original array to filter: only_NBR from (reshaped_NBR)
# validity array: val_pix_arr_reshaped
# disturbed pixel array: disturbed_pixels (array of bools)
# ======================
def get_disturbed_pixel_array(reshaped_image_stack, year_list):
"""
This functions classifies pixels as disturbed (affected by eruption) or not. Pixels are considered "disturbed"
if VImax_pre - VIerup > 0.20 (adapted from DeSchutter et al., 2015)
Args:
reshaped_image_stack (numpy array): reshaped vegetation index image stack
year_list (list): incomplete list of years
Returns:
disturbed_pix_reshaped (numpy array): retrieved disturbed pixels array
only_years (numpy array): only the years of the original array
only_veg_ind (numpy array): only the vegetation indices of the original array
"""
# full list of years with no missing values
yearly_years = [year for year in range(year_list[0], year_list[-1] + 1)]
only_years = reshaped_image_stack[:, :, 0]
only_veg_ind = reshaped_image_stack[:, :, 1]
year_ind_dict = {} # dictionary to get year index
for ind, year in enumerate(yearly_years):
if year not in year_ind_dict:
year_ind_dict[year] = ind
veg1985 = only_veg_ind[:, year_ind_dict[1985]]
veg1986 = only_veg_ind[:, year_ind_dict[1986]]
veg_erup = only_veg_ind[:, year_ind_dict[1995]] # veg index value for year 1995 (immediately post eruption)
veg_max_pre = np.where(np.isnan((veg1985 + veg1986)/2), veg1985, (veg1985 + veg1986)/2) # average of veg ind values for two pre-eruption years; 1985, 1986
# check if nbr_max_pre - nbr_erup > 0.20 (or determine own value) # change this to percentage
pre_020 = veg_max_pre * 0.2
pix_diff = veg_max_pre - veg_erup
disturbed_pix = pix_diff > pre_020
# original disturbed calculatation
# pix_diff = ((veg_max_pre - veg_erup)/veg_max_pre)
# isturbed_pix = pix_diff > 0.20 # used 0.20 after checking disturbed/undisturbed pixels
# reshape to concatenate with base array
disturbed_pix_reshaped = disturbed_pix.reshape(disturbed_pix.shape[0], 1)
print('disturbed_pix_reshaped shape: ', disturbed_pix_reshaped.shape)
return disturbed_pix_reshaped, only_years, only_veg_ind
def get_valid_pixel_filter(val_pix_reshaped, disturbed_pix_reshaped):
'''
Applies valid pixel mask to disturbed pixel array mask
'''
# pixel is considered valid if it is disturbed + has > x nan values
concat_val_disturbed = np.concatenate([val_pix_reshaped, disturbed_pix_reshaped], axis=1)
pix_to_filter = concat_val_disturbed.all(axis=1)
pix_to_filter_reshaped = pix_to_filter.reshape(pix_to_filter.shape[0], 1)
print('valid pixel array shape: ', pix_to_filter_reshaped.shape)
return pix_to_filter_reshaped
def get_valid_image_stack(pix_to_filter_reshaped, only_years, only_veg_index):
'''
Applies combined mask to image stack
Returns:
Complete clean and valid image stack
'''
pixels_to_filter_mask = np.repeat(pix_to_filter_reshaped, only_veg_index.shape[1], axis=1)
valid_veg = np.where(pixels_to_filter_mask, only_veg_index, np.nan)
# concatenate valid_veg with only_years
shp = valid_veg.shape
valid_veg = valid_veg.reshape(shp[0], shp[1], 1)
only_years = only_years.reshape(shp[0], shp[1], 1)
valid_veg_withyears = np.concatenate((only_years, valid_veg), axis=2)
print('valid veg index with years shape: ', valid_veg_withyears.shape)
return valid_veg_withyears
|
import torch
from torch.utils import benchmark
from .main_cnn import create_model as ResNet
from .main_transformer import create_model as ViT
for model in (ResNet(), ViT()):
print(f"#### {model.__class__.__name__} ####")
x = torch.randn(1, 3, 32, 32) # CIFAR10
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"Parameters: {params / 1e6 :.3f}M")
t_cpu = benchmark.Timer(
stmt="with torch.no_grad(): model(x)", globals={"x": x, "model": model}
)
print(t_cpu.timeit(100))
x = torch.randn(1, 3, 32, 32).cuda() # CIFAR10
model.cuda()
t_gpu = benchmark.Timer(
stmt="with torch.no_grad(): model(x)", globals={"x": x, "model": model}
)
print(t_gpu.timeit(100))
"""
#### ResNet ####
Parameters: 11.174M
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C70AA6D250>
with torch.no_grad(): model(x)
15.06 ms
1 measurement, 100 runs , 1 thread
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C733442FD0>
with torch.no_grad(): model(x)
2.86 ms
1 measurement, 100 runs , 1 thread
#### VisionTransformer ####
Parameters: 26.355M
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C73345C640>
with torch.no_grad(): model(x)
11.85 ms
1 measurement, 100 runs , 1 thread
<torch.utils.benchmark.utils.common.Measurement object at 0x000001C70AE59AF0>
with torch.no_grad(): model(x)
5.58 ms
1 measurement, 100 runs , 1 thread
"""
|
"""
수 찾기
N <200,000
"""
input()
first_data = list(map(int, input().split()))
input()
second_data = list(map(int, input().split()))
first_data.sort()
def binary_search(array, target, start, end):
mid = (start + end) // 2
if start > end:
return None
if array[mid] == target:
return mid
elif array[mid] < target:
return binary_search(array, target, mid+1, end)
else:
return binary_search(array, target, start, mid-1)
for second in second_data:
if binary_search(first_data, second, 0, len(first_data)-1) != None:
print("1")
else:
print("0") |
# Generated by Django 4.2.3 on 2023-08-07 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("elections", "0072_alter_election_current_alter_election_group_type"),
]
operations = [
migrations.AlterField(
model_name="election",
name="requires_voter_id",
field=models.CharField(
choices=[
("EFA-2002", "Electoral Fraud (Northern Ireland) Act 2002"),
("EA-2022", "Elections Act 2022"),
("pilot-2018", "2018 voter ID pilot scheme"),
("pilot-2019", "2019 voter ID pilot scheme"),
],
max_length=100,
null=True,
),
),
]
|
import json
def formHdrResp(code,message):
rslt = {"code":code,"message":message}
return rslt
def formErrResp(code,message,responseNode,responseObj):
rslt = {"code":code,"message":message,"response":{}}
rslt["response"] = responseObj
finalRslt = {}
finalRslt[responseNode] = rslt
return finalRslt
def formScssResp(code,message,responseNode,responseObj):
rslt = {"code":code,"message":message,"response":{}}
rslt["response"] = responseObj
finalRslt = {}
finalRslt[responseNode] = {}
finalRslt[responseNode] = rslt
return finalRslt
def updateRespJson(resp,responseNode,responseObj):
resp[responseNode]['response'] = responseObj
return resp
|
from re import compile, match
REGEX = compile(r'\$(?P<integer>\d+)\.(?P<frac>\d{0,2})$')
def to_cents(amount):
m = match(REGEX, amount)
if m and m.group(0) == amount:
return int(m.group('integer')) * 100 + int(m.group('frac'))
|
from django.apps import AppConfig
class EshopProductsConfig(AppConfig):
name = 'eshop_products'
verbose_name = 'ماژول محصولات' |
#from scipy.stats.stats import pearsonr
#a = [20,20,1]
#b = [20,20,3]
#print (pearsonr(a,b))
#raise SystemExit
import numpy as np
from collections import Counter, defaultdict
import os
import pandas
import util
import pyutil
import time
title = "standard"
his_idx = 0
spentidx = 0
amountidx = 1
datesidx = 2
class Cost():
def __init__(self, mode, mode2, symbol):
self.mode = mode
self.mode2 = mode2
self.symbol = symbol
def __str__(self):
return "{}/{}/{}".format(self.mode, self.mode2, self.symbol)
def __eq__(self, other):
isinst = isinstance(other, type(self))
return isinst and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(self.__dict__[k]
for k in sorted(self.__dict__)))
hisdict = defaultdict(list)
topdict = dict()
latest_values = dict()
def isTop(hashable, symbol):
global topdict
if not topdict:
topdict = util.getp("tops")
return symbol in dict(Counter(topdict[hashable]).most_common(2))
#print (isTop(hashable, "IVV"))
spend = 2000
size = 10
spent = 1
tranfees = 0
#ivv = util.getETF()
#etfs = []
etfs = util.getFromHoldings()
etfvs = defaultdict(int)
cost_basis = dict()
portf = dict()
more_etf = True
path_dict = {}
etf_purchase_times = defaultdict(int)
def tallyFrom(path, mode, ascending, isLast = False):
global spent, tranfees, cost_basis, latest_values, etf_purchase_times
loaded = None
try:
if path in path_dict:
loaded = path_dict[path]
else:
loaded = pandas.read_csv(path)
path_dict[path] = loaded
if loaded is None:
return
except Exception as e:
print ('2Filed: '+ str(e))
return
loaded.sort_values(by=[mode], inplace=True, ascending=ascending)
if mode == "Score" and more_etf:
for anetf in etfs:
try:
etfn = float(loaded[loaded['Unnamed: 0'] == anetf]['Last'])
buycount = spend / etfn
etfvs[anetf] += buycount
# print("anetf: {}".format( anetf))
# print("buycount: {}".format( buycount))
etf_purchase_times[anetf] += 1
except Exception as e:
# print ('Failed: '+ str(e))
# print ("Problem with {}".format(anetf))
# print("path: {}".format( path))
# raise SystemExit
continue
per = spend / size
spent += spend
tranfees += 10
purchased = 0
if isLast and more_etf:
for idxin loaded.index:
symbol = loaded.at[idx, "Unnamed: 0"]
last = loaded.at[idx, "Last"]
latest_values[symbol] = last
# if symbol == "SPY":
# print("last: {}".format( last))
# print("symbol: {}".format( symbol))
count = 0
for idx in loaded.index:
symbol = loaded.at[idx, "Unnamed: 0"]
if symbol in dontBuy:
continue
# if count < 2:
# count += 1
# continue
# if symbol in etfs or symbol in ivv:
# if not symbol in ivv:
# continue
# hashable = "{}{}".format(mode, ascending)
# if isTop(hashable, symbol):
# continue
last = loaded.at[idx, "Last"]
if last < 3:
continue
lasth = loaded.at[idx, "LastH"]
lastl = loaded.at[idx, "LastL"]
amount = per / last
amounth = per / lasth
amountl = per / lastl
if size == 20:
date = path.split("_")[3].split(".")[0]
custom = Cost(mode, ascending, symbol)
cost_basis.setdefault(custom, [0,0,[]])
cost_basis[custom][spentidx] += per
cost_basis[custom][amountidx] += amount
cost_basis[custom][datesidx].append(date)
purchases[symbol] += round(amount,3)
purchasesh[symbol] += round(amounth,3)
purchasesl[symbol] += round(amountl,3)
purchased += 1
if purchased == size:
break
#path = util.getPath("analysis/strategy_report_2015-11-23.csv")
#tallyFrom(path, ["Score", True])
#raise SystemExit
#rememberedFiles = []
#def getFiles(where):
# global rememberedFiles
# import fnmatch
# if rememberedFiles:
# return rememberedFiles
# holds = []
# reportname = "{}_{}".format(where, his_idx)
# pattern = "{}*.csv".format(reportname)
# parentdir = util.getPath(where)
# listOfFiles = os.listdir(parentdir)
# for entry in listOfFiles:
# date = entry.split("_")
# try:
# if his_idx == 0 and len(date) < 3 or "-" not in date[2]:
# continue
# except:
# continue
# if fnmatch.fnmatch(entry, pattern):
# rememberedFiles.append("{}/{}".format(parentdir, entry))
# rememberedFiles.sort()
# return rememberedFiles
#his_idx = 7
#print (getFiles())
#raise SystemExit
def getTrainingTemps(mode, ascending, where):
paths = pyutil.getFiles(where, his_idx)
leng = len(paths)
for i,path in enumerate(paths):
try:
tallyFrom(path, mode, ascending, isLast=(i==leng-1))
except Exception as e:
print ('Failed: '+ str(e))
print("path : {}".format( path ))
pass
purchases = defaultdict(float)
purchasesl = defaultdict(float)
purchasesh = defaultdict(float)
dontBuy = util.getp("dont")
#print(dontBuy)
#raise SystemExit
#dontBuy = list()
#latest_values = util.getp("lastValues")
mode_average = defaultdict(list)
def calcIt(mode, ascending, where):
global purchases, purchasesl, purchasesh, spent, tranfees, mode_average
global dontBuy
spent = 1
tranfees = 0
asize = 0
asizel= 0
asizeh= 0
purchases = defaultdict(float)
purchasesl = defaultdict(float)
purchasesh = defaultdict(float)
getTrainingTemps(mode, ascending, where)
try:
for astock in purchases:
lvalue = latest_values[astock]
asize += purchases[astock] * lvalue
asizel += purchasesl[astock] * lvalue
asizeh += purchasesh[astock] * lvalue
except:
dontBuy.append(astock)
# import traceback
# print (traceback.format_exc())
# print("dont astock: {}".format( astock))
# return
if size == 20:
portf[mode, ascending] = purchases
low = round(asizel / (spent + tranfees),3)
high = round(asizeh / (spent + tranfees),3)
close = round(asize / (spent + tranfees),3)
changel = util.formatDecimal(low)
changeh = util.formatDecimal(high)
change = util.formatDecimal(close)
mode_hash = mode
if ascending:
mode_hash += "A"
mode_average[mode_hash].append(high)
mode_average[mode_hash].append(close)
one = "C {0:12} {1:8}".format(mode_hash, change)
two = "H {0:12} {1:8}".format(mode_hash, changeh)
three = "L {0:12} {1:8}".format(mode_hash, changel)
return ["{} {} {}".format(one, two, three)]
def etfData():
maxetf = 0
max_etf_name = ""
ret = []
for etf in etfvs:
etfvalue = round(etfvs[etf] * latest_values[etf], 3)
if etfvalue > maxetf:
maxetf = etfvalue
max_etf_name = etf
for etf in etfvs:
if etf == max_etf_name:
etfvalue = round(etfvs[etf] * latest_values[etf], 3)
# print("etfvalue : {}".format( etfvalue ))
count = etf_purchase_times[etf]
spent2 = count * spend
change = etfvalue/spent
# print("spent: {}".format( spent))
hisdict[etf].append(change)
# print("change: {}".format( change))
change = util.formatDecimal(change)
ret.append("{0:4}({1:5})\n".format(etf, change))
for i,etf in enumerate(etfvs):
if not etf == max_etf_name:
# print("etf : {}".format( etf ))
etfvalue = round(etfvs[etf] * latest_values[etf], 3)
# print("etfvalue : {}".format( etfvalue ))
count = etf_purchase_times[etf]
spent2 = count * spend
change = etfvalue/spent
hisdict[etf].append(change)
# print("change: {}".format( change))
change = util.formatDecimal(change)
ret.append("{0:4}({1:5})".format(etf, change))
if i == 6:
ret.append("\n")
return [["".join(ret), "{}\n".format(spent)]]
def costToDict():
global topdict
newdict = dict()
for cost in cost_basis:
values = cost_basis[cost]
spent = values[spentidx]
amount = values[amountidx]
dates = values[datesidx]
astock = cost.symbol
try:
currentValue = round(amount * latest_values[astock])
except:
dontBuy.append(astock)
print("astock: {}".format( astock))
continue
change = util.formatDecimal(currentValue/spent)
# if not (cost.mode == "Variance" and not cost.mode2):
# continue
hashable = "{}".format(str(cost))
newdict[hashable] = [currentValue, round(spent), change,
round(currentValue-spent), astock, " ".join(dates)]
hashable = "{}{}".format(cost.mode, cost.mode2)
if not topdict.get(hashable):
topdict[hashable] = dict()
topdict[hashable][astock] = currentValue - spent
# util.setp(topdict, "tops")
return newdict
def testCostToDict():
custom = Cost("MODE2", False, "IVV")
cost_basis.setdefault(custom, [0,0])
cost_basis[custom][spentidx] += 200
cost_basis[custom][amountidx] += 100
custom = Cost("MODE2", False, "GOOG")
cost_basis.setdefault(custom, [0,0])
cost_basis[custom][spentidx] += 1200
cost_basis[custom][amountidx] += 10
custom = Cost("MODE", True, "ABC")
cost_basis.setdefault(custom, [0,0])
cost_basis[custom][spentidx] += 200
cost_basis[custom][amountidx] += 100
newdict = costToDict()
def writeCostDict(newdict, where):
import pandas
df = pandas.DataFrame.from_dict(newdict, orient = 'index',
columns=["Value", "Cost", "Change",
"DollarChange", "Ticker","PurchaseDates"])
path = util.getPath("{}/selection_{}_{}.csv".format(where, title, his_idx))
df.to_csv(path)
print ("written {}".format(path))
#writeCostDict(newdict)
modes = util.report.headers[:-2]
def doit(where):
global size, more_etf
testingModes = [20]
# testingModes = [10, 15, 20]
appended = []
for csize in testingModes:
size = csize
appended.append(["stocks {}".format(size)])
for mode in modes:
ret = calcIt(mode, True, where)
if ret:
appended.append(ret)
more_etf = False
ret = calcIt(mode, False, where)
if ret:
appended.append(ret)
prevavg = 0
prevvar = 0
appended.append(["\n"])
for i, mode in enumerate(mode_average):
items = mode_average[mode]
average = sum(items)/len(items)
vari = round(np.var(items),3)
hisdict[mode] += items
appended.append(["A {0:12} {1:8} {2}".format(mode,
util.formatDecimal(average), vari)])
if i % 2 == 1:
daverage = abs(prevavg - average)
dvari = (prevvar + vari)/2
appended.append(["{0:8} {1}".format(round(daverage,4), dvari)])
prevavg = average
prevvar = vari
try:
appended += etfData()
appended = [item for sublist in appended for item in sublist]
except Exception as e:
print ('2Failed: '+ str(e))
import traceback
print (traceback.format_exc())
appended = []
path = util.getPath("{}/report_{}_{}.txt".format(where, title, his_idx))
with open(path, "w") as f:
f.write("\n".join(appended))
print ("written {}".format(path))
newdict = costToDict()
writeCostDict(newdict, where)
#"".join(bar)
def multi(where):
global his_idx, spent, more_etf, etfvs, latest_values, cost_basis
global etf_purchase_times
for i in range(3, 10):
more_etf = True
his_idx = i
spent = 1
doit(where)
etfvs = defaultdict(int)
latest_values = dict()
cost_basis = dict()
etf_purchase_times = defaultdict(int)
pyutil.getFiles.rememberedFiles = []
writeReport(where)
util.setp(dontBuy, "dont")
def writeReport(where):
import csv
path = util.getPath("{}/Final_{}report.csv".format(where, where))
with open(path, 'w') as f:
for key in hisdict.keys():
appended = []
current = hisdict[key]
negs = 0
mini = util.formatDecimal(min(current))
rawmax = max(current)
maxi = util.formatDecimal(rawmax)
for item in current:
if item < 1:
negs += 1
appended.append(util.formatDecimal(item))
ravg = sum(current)/len(current)
avg = util.formatDecimal(ravg)
vari = round(np.var(current),3)
percentages = " ".join(appended)
rawnegs = round(negs/len(current),3)
score = round((ravg*(1-rawnegs))+rawmax/2,3)
f.write("{},{},{},{},{},{},{},{}\n".format(key, avg,
vari,
maxi,mini,
rawnegs,score,
percentages))
#doit()
#util.setp(dontBuy, "dont")
#print(dontBuy)
|
#!/usr/bin/env python3
#-
# Copyright (c) 2015, David Kalliecharan <david.kalliecharan@dal.ca>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# @file asc2xy.py
#
# @author David Kalliecharan <david.kalliecharan@dal.ca>
#
# @brief This file parses the ASCII output of the Diffractometer 500 XRD +
# MDIScan software interface, and saves either a .xy or .txt file
#
from argparse import ArgumentParser
from re import findall
from numpy import array, savetxt
import os.path as path
def convert(ifile):
asc = open(ifile, 'r')
asc = asc.read()
expr_line = '\ +[0-9]+\.[0-9]+\t\ +[0-9]+';
data = findall(expr_line, asc)
expr_values = '[0-9]+\.[0-9]+|[0-9]+';
x = []
y = []
z = []
for i in range(len(data)):
a, b = findall(expr_values, data[i])
x.append(float(a))
y.append(int(b))
z.append(1)
xy = array([x, y, z]).transpose()
return xy
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('FILE', type=str,
help='Ascii file to be parsed to xy',)
args = parser.parse_args()
filename = args.FILE
dirname = path.dirname(filename)
basename = path.basename(filename)
xy = convert(filename)
ofile = path.join(dirname, basename[:-3] + "xy")
print('Writing ', ofile)
savetxt(ofile, xy, fmt='%.5e',
delimiter=' ', newline='\r\n')
print("Finished.")
|
with open ('input.2.txt','r') as f:
s=f.readline()
c="""QWERTYUIOPĂÎÂASDFGHJKLȘȚZXCVBNM"""
d="""0123456789"""
e="""+-*/"""
j="""qwertyuiopasdfghjklzxcvbnm"""
v=''
for i in s:
if i in c:
v=v+str(i)
with open ('litereA.txt','w') as f:
f.write(v)
b=''
for a in s:
if a in d:
b=b+str(a)
with open ('cifre.txt','w') as f:
f.write(b)
n=''
for k in s:
if k in j:
n=n+str(k)
with open ('litereB.txt','w') as f:
f.write(n)
m=''
for f in s:
if f in e:
m=m+str(f)
with open ('operatori.txt','w') as f:
f.write(m)
|
from watchdog.observers import Observer
import os
import time
# FileSystemEventHandler - класс по отслеживанию изменений
from watchdog.events import FileSystemEventHandler
# Создаем класс наследник, через него может отслеживать изменения в папках
class Handler(FileSystemEventHandler):
# При любых изменениях в папке, мы перемещаем файлы в ней
def on_modified(self, event):
# Перебираем все файлы в папке folder_track
for filename in os.listdir(folder_track):
# Проверяем расширенеи файла
extension = filename.split(".")
# Если это фото,
if len(extension) > 1 and (extension[1].lower() == "jpg" or extension[1].lower() == "png" or extension[1].lower() == "svg"):
# то перемещаем файл в папку с фото
file = folder_track + "/" + filename
new_path = folder_dest + "/Photos/" + filename
os.rename(file, new_path)
# Если файл видео, то в папку с видео
# Такое же можно прописать и для других расширений файлов
elif len(extension) > 1 and extension[1].lower() == "mp4":
file = folder_track + "/" + filename
new_path = folder_dest + "/Videos/" + filename
os.rename(file, new_path)
# Папка что отслеживается
folder_track = 'Путь папки'
# Папка куда перемещать будем
folder_dest = 'Путь папки 2'
# Запуск всего на отслеживание
handle = Handler()
observer = Observer()
observer.schedule(handle, folder_track, recursive=True)
observer.start()
# Программа будет срабатывать каждые 10 милисекунд
try:
while(True):
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
#!/usr/bin/env python
# encoding: utf-8
# @author: Zhipeng Ye
# @contact: Zhipeng.ye19@xjtlu.edu.cn
# @file: plus99.py
# @time: 2020-01-24 13:56
# @desc:
if __name__ == '__main__':
content_list = []
with open('/Data_SSD/zhipengye/zhipengye/LM/completed.data/data/n-gram3/GoogleWbi-Direct.small',encoding='utf-8') as file:
for line in file:
if line.startswith('-') and '-99' not in line:
line_content = line.strip()+'\t'+'-99'
content_list.append(line_content)
else:
content_list.append(line.strip())
if len(content_list) >= 10000000:
with open('/Data_SSD/zhipengye/zhipengye/LM/completed.data/data/n-gram3/GoogleWbi-Direct-99.small','a',encoding='utf-8') as file:
file.write('\n'.join(content_list))
print('10000000 rows have been processed!')
content_list = []
with open('/Data_SSD/zhipengye/zhipengye/LM/completed.data/data/n-gram3/GoogleWbi-Direct-99.small', 'a',
encoding='utf-8') as file:
file.write('\n'.join(content_list))
print(str(len(content_list))+' rows have been processed!') |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 19:44:29 2019
@author: Zilean
"""
from sklearn import datasets
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.linear_model import SGDClassifier
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
print( X_iris.shape, y_iris.shape)
print( X_iris[0], y_iris[0])
# Get dataset with only the first two attributes
X, y = X_iris[:, :2], y_iris
# Split the dataset into a training and a testing set
# Test set will be the 25% taken randomly
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.25, random_state=33)
print( X_train.shape, y_train.shape)
# Standardize the features
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
colors = ['red','greenyellow','blue']
for i in range(len(colors)):
xs = X_train[:,0][y_train == i]
ys = X_train[:,1][y_train == i]
plt.scatter(xs,ys,c=colors[i])
plt.legend(iris.target_names)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
clf = SGDClassifier()
clf.fit(X_train, y_train)
print( clf.coef_)
print( clf.intercept_)
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',header=None)
df.tail()
y = df.iloc[0:100,4].values
y = np.where(y == 'Iris-setosa',-1,1)
X = df.iloc[0:100,[0,2]].values
plt.scatter(X[:50,0],X[:50,1],color='red',marker='o',label='setosa')
plt.scatter(X[50:100,0],X[50:100,1],color='blue',marker='x',label='versicolor')
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
plt.show()
x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() +.5
y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() +.5
xs = np.arange(x_min, x_max, 0.5)
fig, axes = plt.subplots(1, 3)
fig.set_size_inches(10, 6)
for i in [0, 1, 2]:
axes[i].set_aspect('equal')
axes[i].set_title('Class '+ str(i) + ' versus the rest')
axes[i].set_xlabel('Sepal length')
axes[i].set_ylabel('Sepal width')
axes[i].set_xlim(x_min, x_max)
axes[i].set_ylim(y_min, y_max)
plt.sca(axes[i])
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train,
cmap=plt.cm.prism)
ys = (-clf.intercept_[i] - xs * clf.coef_[i, 0]) / clf.coef_[i, 1]
plt.plot(xs, ys)
y_train_pred= clf.predict(X_train)
print (metrics.accuracy_score(y_train,y_train_pred))
y_pred = clf.predict(X_test)
print (metrics.accuracy_score(y_test,y_pred))
print("My name is ZIHAN CHEN.")
print("My NetID is: zihanc7.")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.") |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from functools import reduce
class ClustersHandler:
def __init__(self, df: pd.DataFrame, df_stations: pd.DataFrame, country:str=None, weights:dict=None):
if country is not None:
df_stations = df_stations[df_stations["COUNTRY"] == country]
self.df = pd.merge(left=df, right=df_stations, how="right", left_on="STATION", right_on="STATION")
self._normalise()
if weights is not None:
self._apply_weights(weights)
normalised_df_stations = self.df[["STATION", "COUNTRY", "NAME", "LATITUDE", "LONGITUDE", "ELEVATION"]].drop_duplicates("STATION")
# effacement pour l'aggregation des stats par mois sinon latitude longitude etc seront mensualises
self.df = self.df.drop(["COUNTRY", "NAME", "LATITUDE", "LONGITUDE", "ELEVATION"], axis=1)
self.df["DATE"] = pd.to_datetime(self.df["DATE"])
self.df = self.df.groupby([self.df["STATION"], self.df["DATE"].dt.month.rename("MONTH")]).mean()
self.df = self.df.unstack(level=1)
self.df = self.df.dropna()
# refusion avec les donnes stations dont lat long elevation normalise
self.df = pd.merge(left=self.df, right=normalised_df_stations, how="inner", left_on="STATION", right_on="STATION")
self._k_scores = None
def _normalise(self):
column_to_not_normalise = ["DATE", "STATION", "COUNTRY", "NAME"]
column_to_normalise = [column for column in self.df.columns if column not in column_to_not_normalise]
self.df[column_to_normalise] = ((self.df[column_to_normalise] - self.df[column_to_normalise].min())
/ (self.df[column_to_normalise].max() - self.df[column_to_normalise].min()))
def _apply_weights(self, weights:dict):
for col_name, weight in weights.items():
self.df[col_name] = self.df[col_name] * weight
def get_clusters(self, clusters_name: str, list_columns: list, k_clusters_min: int=2, k_clusters_max: int=10) -> pd.DataFrame:
'''list_columns: nom des colonnes de niveau 1 dans la hiérarchie, le niveau 2 étant le numéro du mois
retourne un dataframe contenant les résultats des clusterisations'''
# init du df résultats avec les id des stations qui seront clusterises
df_results = self.df[["STATION"]].copy()
# contiendra la liste de tuple des colonnes (niveau1, niveau2)
list_columns_final = []
for column in list_columns:
if column in self.df.columns:
list_columns_final.append(column)
else:
for n_month in range(1, 13):
list_columns_final.append((column, n_month))
# initialisation du dictionnaire qui contiendra les scores des clusterisations pour chaque K
k_range = range(k_clusters_min, k_clusters_max + 1)
k_scores = {"K": list(k_range), f"{clusters_name}_inertia": [], f"{clusters_name}_silhouette": []}
# plusieurs clusterisations en variant le K et ajout des résultats dans le df_results
for k in k_range:
K_means = KMeans(k, random_state=0).fit(self.df[list_columns_final])
print(self.df[list_columns_final])
name_colonne = k
df_results.loc[:,name_colonne] = K_means.labels_ + 1
# Sum of squared distances of samples to their closest cluster center
k_scores[f"{clusters_name}_inertia"].append(K_means.inertia_)
# Silhouete_score
k_scores[f"{clusters_name}_silhouette"].append(silhouette_score(self.df[list_columns_final], K_means.labels_))
# sauvegarde des scores en tant que membre de la classe et retour des clusterisations en output
# si des scores sont deja présent (créer durant la même session de clusterisation à partir de la même
# instance de la classe ClusterHandler), on les réunit
if self._k_scores is not None:
last_scores = pd.DataFrame.from_dict(k_scores, orient="columns")
self._k_scores = pd.merge(left=self._k_scores, right=last_scores, on="K", how="outer")
else:
self._k_scores = pd.DataFrame.from_dict(k_scores, orient="columns")
return df_results
def get_k_scores(self) -> pd.DataFrame:
return self._k_scores
def create_clusters():
df = pd.read_csv("data/climat/clean_for_bi/ClimatFACT.csv")
df_stations = pd.read_csv("data/climat/clean_for_bi/StationDIM.csv")
clustering = ClustersHandler(df, df_stations, "France")
clusters1 = clustering.get_clusters("Cluster_TEMP", ["TEMP"], 2, 10)
clusters1 = clusters1.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_TEMP")
clusters2 = clustering.get_clusters("Cluster_TEMP+", ["TEMP", "MIN", "MAX", "DEWP"])
clusters2 = clusters2.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_TEMP+")
clusters3 = clustering.get_clusters("Cluster_WIND", ["WDSP", "MXSPD"])
clusters3 = clusters3.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_WIND")
clusters4 = clustering.get_clusters("Cluster_FRSHT", ["FOG", "RAIN", "SNOW", "HAIL", "THUN"])
clusters4 = clusters4.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_FRSHT")
clusters5 = clustering.get_clusters("Cluster_ALL", ["TEMP", "MIN", "MAX", "DEWP", "WDSP", "MXSPD",
"FOG", "RAIN", "SNOW", "HAIL", "THUN"])
clusters5 = clusters5.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_ALL")
clusters6 = clustering.get_clusters("Cluster_ALL+GEO", ["TEMP", "MIN", "MAX", "DEWP", "WDSP", "MXSPD",
"FOG", "RAIN", "SNOW", "HAIL", "THUN", "ELEVATION", "LATITUDE", "LONGITUDE"])
clusters6 = clusters6.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_ALL+GEO")
all_clusters = [clusters1, clusters2, clusters3, clusters4, clusters5, clusters6]
final_df = reduce(lambda left, right: pd.merge(left=left, right=right, on=["STATION", "K"],
how="inner"), all_clusters)
final_df.to_csv("data/climat/clean_for_bi/Clusters.csv", index=False)
k_scores = clustering.get_k_scores()
k_scores.to_csv("data/climat/clean_for_bi/k_scores.csv", index=False)
def create_optimal_cluster():
df = pd.read_csv("data/climat/clean_for_bi/ClimatFACT.csv")
df_stations = pd.read_csv("data/climat/clean_for_bi/StationDIM.csv")
weights = {"TEMP":3, "MIN":2.5, "MAX":2.5, "DEWP":2, "WDSP":1.5, "MXSPD":1.5,
"FOG":1, "RAIN":2, "SNOW":1, "HAIL":1, "THUN":1.5, "ELEVATION":1, "LATITUDE":1, "LONGITUDE":1}
clustering = ClustersHandler(df, df_stations, "France", weights)
clusters = clustering.get_clusters("Cluster_ALL", [key for key in weights.keys()], 2, 10)
clusters = clusters.melt(id_vars=["STATION"], var_name="K", value_name="Cluster_ALL")
clusters.to_csv("data/climat/clean_for_bi/Clusters_opt.csv", index=False)
if __name__ == "__main__":
pd.set_option('display.float_format', lambda x: '%.5f' % x)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 120)
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 30)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
create_optimal_cluster()
# create_clusters()
|
#!/usr/bin/python3
# 可写函数说明
sum = lambda arg1, arg2: arg1 + arg2
# 调用匿名函数
print("相加后的值: ", sum(10, 20))
print("相加后的值: ", sum(20, 20)) |
from setuptools import setup, find_packages
setup(name='sjresearchutil',
version='0.1.0',
url='',
license='MIT',
author='Sooyong Jang',
author_email='sooyong@seas.upenn.edu',
packages=find_packages(exclude=['tests']),
long_description=open('README.md').read(),
zip_safe=False,
setup_requires=[''],
test_suite='nose.collector')
|
import bz2
import fnmatch
import functools
import gzip
import os
import re
import sys
# Inside a function, the yield statement can also be used as an
# expression that appears on the right side of an assignment operator.
def receiver():
print("Ready to receive")
while True:
n = (yield)
print("Got {}".format(n))
# Example use:
r1 = receiver()
next(r1) # Ready to receive
next(r1) # Got None
r1.send(1) # Got 1
r1.send(2) # Got 2
r1.send('Hello') # Got Hello
# The requirement of first calling next() on a coroutine is easily
# overlooked and a common source of errors.Therefore, it is
# recommended that coroutines be wrapped with a decorator that
# automatically takes care of this step.
def autostart_coroutine(func):
@functools.wraps(func)
def start(*args, **kwargs):
gen = func(*args, **kwargs)
gen.__next__() # or `next(gen)`
return gen
return start
@autostart_coroutine
def receiver():
print("Ready to receive")
while True:
n = (yield)
print("Got {}".format(n))
# Example use:
r2 = receiver()
r2.send("Hello World") # Note: initial `next(r)` needed
# A coroutine will typically run indefinitely unless it is explicitly
# shut down or it exits on its own.To close the stream of input
# values, use the `close()` method
r1.close()
r2.close()
# r1.send(1) will raise StopIteration
@autostart_coroutine
def receiver():
print("Ready to receive")
try:
while True:
n = (yield)
print("Got {}".format(n))
except GeneratorExit:
print("Receiver done")
# Exceptions can be raised inside a coroutine using the
# `throw(exctype [, value [,tb]])` method where exctype is an
# exception type, value is the exception value, and tb is a traceback
# object. For example: r.throw(RuntimeError, "You're hosed!")
# A coroutine may simultaneously receive and emit return values using
# `yield` if values are supplied in the `yield` expression.
def line_splitter(delimiter=None):
print("Ready to split")
result = None
while True:
line = (yield result)
result = line.split(delimiter)
s = line_splitter(',')
print(next(s)) # None
print(s.send('A, B, C')) # ['A', 'B', 'C']
print(s.send('100, 200, 300')) # ['100', '200', '300']
# Using Generators and Coroutines
# Generator functions are useful if you want to set up a processing
# pipeline, similar in nature to using a pipe in the UNIX shell.
# Coroutines can be used to write programs based on data-flow
# processing. Programs organized in this way look like inverted
# pipelines. Instead of pulling values through a sequence of
# generator functions using a for loop, you send values into a
# collection of linked coroutines.
@autostart_coroutine
def find_files(target):
while True:
topdir, pattern = (yield)
for path, dirname, filelist in os.walk(topdir):
for name in filelist:
if fnmatch.fnmatch(name, pattern):
target.send(os.path.join(path, name))
@autostart_coroutine
def opener(target):
while True:
name = (yield)
if name.endswith('.gz'):
f = gzip.open(name)
elif name.endswith('.bz2'):
f = bz2.BZ2File(name)
else:
f = open(name)
target.send(f)
@autostart_coroutine
def cat(target):
while True:
f = (yield)
for line in f:
target.send(line)
@autostart_coroutine
def grep(pattern, target):
while True:
line = (yield)
if pattern in line:
target.send(line)
@autostart_coroutine
def printer():
while True:
line = (yield)
sys.stdout.write(line)
# Here is how you would link these coroutines to create a dataflow
# processing pipeline:
finder = find_files(opener(cat(grep('py', printer()))))
finder.send(
('/home/beenorgone/Documents/gDrive/Self-learning/Themes/Python/py-advanced', '*.md'))
'''
A critical aspect of this example is that the coroutine pipeline
remains active indefinitely or until close() is explicitly called
on it. Because of this, a program can continue to feed data into a
coroutine for as long as necessary—for example, the two repeated
calls to send() shown in the example.
Coroutines can be used to implement a form of concurrency.
For example, a centralized task manager or event loop can schedule
and send data into a large collection of hundreds or even thousands
of coroutines that carry out various processing tasks. The fact
that input data is “sent” to a coroutine also means that coroutines
can often be easily mixed with programs that use message queues and
message passing to communicate between program components.
'''
# Generator version:
def find_files(topdir, pattern):
for path, dirname, filelist in os.walk(topdir):
for name in filelist:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path.name)
def opener(filenames):
for name in filenames:
if name.endswith(".gz"):
f = gzip.open(name)
elif name.endswith(".bz2"):
f = bz2.BZ2File(name)
else:
f = open(name)
yield f
def cat(filelist):
for f in filelist:
for line in f:
yield line
def grep(pattern, lines):
for line in lines:
if pattern in line:
yield line
'''
Apply several regex to the text in a set of HTML files.
The purpose is to output each file’s URLs and level 1 and level 2 headings.
We’ll start by looking at the regular expressions, then
the creation of the coroutine “matchers”, and then
we will look at the coroutines and how they are used.
'''
@autostart_coroutine
def regex_matcher(receiver, regex):
'''A coroutine that takes a receiver function
(itself a coroutine) and a regex to match.
Whenever the matcher matches it sends the match to the receiver.'''
while True:
text = (yield)
for match in regex.finditer(text):
receiver.send(match)
@autostart_coroutine
def reporter():
'''A coroutine which be used to output results.
When suspended, it will wait until a match is sent to it, then
it prints the match's details, and then it waits again,
in an endless loop--stopping only if `close()` is called on it.'''
ignore = frozenset({"style.css", "favicon.png", "index.html"})
while True:
match = (yield)
if match is not None:
groups = match.groupdict()
if 'url' in groups and groups['url'] not in ignore:
print('URL: {}'.format(groups['url']))
elif 'h1' in groups:
print('H1: {}'.format(groups['h1']))
elif 'h2' in groups:
print('H2: {}'.format(groups['h2']))
URL_RE = re.compile(r'''href=(?P<quote>['"])(?P<url>[^\1]+?)'''
r'''(?P=quote)''', re.IGNORECASE)
flags = re.MULTILINE | re.IGNORECASE | re.DOTALL
H1_RE = re.compile(r"<h1>(?P<h1>.+?)</h1>", flags)
H2_RE = re.compile(r"<h2>(?P<h2>.+?)</h2>", flags)
receiver = reporter()
matchers = (regex_matcher(receiver, URL_RE),
regex_matcher(receiver, H1_RE),
regex_matcher(receiver, H2_RE))
try:
for file in sys.argv[1:]:
print(file)
html = open(file, encoding="utf-8").read()
for matcher in matchers:
matcher.send(html)
finally:
for matcher in matchers:
matcher.close()
receiver.close()
'''The program reads the filenames listed on the command line,
and for each one prints the filename and then reads the file’s
entire text into the html variable using the UTF-8 encoding. Then
the program iterates over all the matchers (three in this case),
and sends the text to each of them. Each matcher then proceeds
independently, sending each match it makes to the reporter
coroutine. At the end we call close() on each matcher and
on the reporter--this terminates them, since otherwise they would
continue (suspended) waiting for text (or matches in the case of
the reporter) since they contain infinite loops.'''
|
def write_p2g(G, path, encoding: str = "utf-8") -> None: ...
def read_p2g(path, encoding: str = "utf-8"): ...
def parse_p2g(lines): ...
|
import os
from typing import Generator
import pandas as pd
from pandas import DataFrame, ExcelWriter
SHEET_NAME = "Master"
MERGED_FILE_NAME = "POC.xlsx"
def _get_all_excel_files(directory: str) -> Generator[str, None, None]:
for dir_path, _, filenames in os.walk(directory):
for f in filenames:
if f.endswith(".xlsx"):
yield os.path.abspath(os.path.join(dir_path, f))
def _read_sheet(excel_file: str, sheet_name: str) -> DataFrame:
return pd.read_excel(excel_file, sheet_name=sheet_name)
def _write_sheet(data: DataFrame, path: str, sheet_name: str) -> None:
with ExcelWriter(path, engine='xlsxwriter') as writer:
data.to_excel(writer, sheet_name=sheet_name, index=False)
def merge_excel_sheet(base_path: str) -> (str, int):
# Get all data
all_data = DataFrame()
for single_excel_file in _get_all_excel_files(base_path):
print(f"Begin to process excel file -> {single_excel_file}")
sheet_data = _read_sheet(single_excel_file, SHEET_NAME)
all_data = all_data.append(sheet_data)
# Output to a file
records_count = len(all_data.index)
output_path = os.path.abspath(os.path.join(base_path, "..", MERGED_FILE_NAME))
print(f"\nOutput {records_count} records to file {output_path}")
_write_sheet(all_data, output_path, SHEET_NAME)
return output_path, records_count
|
#!/usr/bin/python
teststr = "Hello DevOPS"
print teststr[0:5]
newvar = teststr + "hola"
print newvar
|
from django.shortcuts import render_to_response
from django.template.context import RequestContext
# Create your views here.
def Default(request):
return render_to_response('base.html', RequestContext(request, {
'content', "<p>Hello</p>"
})) |
from flask import Flask,render_template, request,json
import os
import logging
from pathlib import Path
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from pytorch_pretrained_bert import BertTokenizer
from pytorch_pretrained_bert.modeling import BertModel
from helperbot import BaseBot, TriangularLR
from Utils import *
app = Flask(__name__)
def predict(loader, model):
model.eval()
outputs = []
with torch.set_grad_enabled(False):
for *input_tensors, y_local in (loader):
input_tensors = [x.to("cuda:0") for x in input_tensors]
tmp = model(*input_tensors)
outputs.append(tmp)
outputs = torch.cat(outputs, dim=0)
return outputs
BERT_MODEL = 'bert-large-uncased'
CASED = False
tokenizer = BertTokenizer.from_pretrained(
BERT_MODEL,
do_lower_case=CASED,
never_split = ("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]", "[A]", "[B]", "[P]"))
tokenizer.vocab["[A]"] = -1
tokenizer.vocab["[B]"] = -1
tokenizer.vocab["[P]"] = -1
model = GAPModel(BERT_MODEL, torch.device("cuda:0"))
model.load_state_dict(torch.load("./cache/model_cache/best.pth"))
model.eval()
# df_inference = pd.read_csv("inference.tsv", delimiter="\t")
# inference_ds = GAPDataset(df_inference, tokenizer)
# inference_loader = DataLoader(
# inference_ds,
# collate_fn = collate_examples,
# batch_size=128,
# num_workers=2,
# pin_memory=True,
# shuffle=False)
# preds = predict(inference_loader, model)
# _, labels = torch.max(preds, 1)
# print("************************************************************************")
# print(labels.item())
@app.route('/')
def hello_world():
return render_template('input.html')
@app.route('/get_input', methods=['GET'])
def get_input():
df_test = pd.read_csv("gap-development.tsv", delimiter="\t")
df_elements = df_test.sample(n=1)
df_dict = df_elements.to_dict()
ind = df_elements.index.values[0]
return json.dumps({'status':'OK',
'content': df_dict['Text'][ind],
"target_a":df_dict['A'][ind],
"target_b":df_dict['B'][ind],
"pronoun":df_dict['Pronoun'][ind],
"target_a_pos":df_dict['A-offset'][ind],
"target_b_pos":df_dict['B-offset'][ind],
"pronoun_pos":df_dict['Pronoun-offset'][ind]})
@app.route('/predict', methods=['POST'])
def predict_model():
ID = "Inference_Sample"
URL = "https://www.google.com"
content = request.form['content']
target_a = request.form['target_a']
target_b = request.form['target_b']
pronoun = request.form['pronoun']
target_a_pos = request.form['target_a_pos']
target_b_pos = request.form['target_b_pos']
pronoun_pos = request.form['pronoun_pos']
line = "\n"+ID+"\t"+content+"\t"+pronoun+"\t"+pronoun_pos+"\t"+target_a+"\t"+target_a_pos+"\t"
line += "FALSE\t"+target_b+"\t"+target_b_pos+"\tFALSE\t"+URL
with open("inference.tsv", "w") as f:
f.write("ID\tText\tPronoun\tPronoun-offset\tA\tA-offset\tA-coref\tB\tB-offset\tB-coref\tURL")
f.write(line)
df_inference = pd.read_csv("inference.tsv", delimiter="\t")
inference_ds = GAPDataset(df_inference, tokenizer)
inference_loader = DataLoader(
inference_ds,
collate_fn = collate_examples,
batch_size=128,
num_workers=2,
pin_memory=True,
shuffle=False)
preds = predict(inference_loader, model)
_, labels = torch.max(preds, 1)
print("======================================================================")
print(labels.item())
return json.dumps({'status':'OK','value':labels.item()})
if __name__ == "__main__":
#init()
app.run(threaded=True, host='0.0.0.0')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-11-02 20:46
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0023_cargarestudiante'),
]
operations = [
migrations.RenameField(
model_name='cargarestudiante',
old_name='carrera',
new_name='materia',
),
]
|
"""
Cycle Detection - CodeWar
"""
def floyd(root):
hare = root.next.next
while tortoise != hare:
tortoise = f(tortoise)
hare = f(f(hare))
tortoise = root
while tortoise != hare:
tortoise = f(tortoise)
hare = f(hare)
start = tortoise
length = 1
hare = f(tortoise)
while tortoise != hare:
hare = f(hare)
length += 1
return length, start |
from matplotlib import pyplot as plt
import numpy as np
import os
import tda
def measure_square(radius, dim_pcd=2):
vec = np.empty(dim_pcd)
for i in range(dim_pcd):
vec[i] = np.random.uniform(-1 * radius, radius)
return vec
def measure_circle(radius, dim_pcd=2):
vec = np.random.randn(dim_pcd)
r = np.linalg.norm(vec)
if r != 0:
vec /= r
return radius * vec * np.power(np.random.random(), 1 / dim_pcd)
def measure_gauss(sigma, dim_pcd=2):
mean = np.zeros(dim_pcd)
cov = np.diag(np.ones(dim_pcd) * (sigma ** 2))
return np.random.multivariate_normal(mean, cov)
def measure(radius, name_measure, dim_pcd=2):
if name_measure == "gauss":
return measure_gauss(radius, dim_pcd)
elif name_measure == "circle":
return measure_circle(radius, dim_pcd)
else: # uniform on square
return measure_square(radius, dim_pcd)
def lattice(num_side, dim_pcd=2):
list_lat = []
if dim_pcd == 3:
for i in range(num_side):
for j in range(num_side):
for k in range(num_side):
list_lat.append([i, j, k])
else:
for i in range(num_side):
for j in range(num_side):
list_lat.append([i, j])
return list_lat
def save_points(list_lat, width, name_save):
num_lattice = len(list_lat)
plt.figure()
for k in range(num_lattice):
point = list_lat[k]
plt.plot(point[0], point[1], "bo")
plt.xlim(-1, width)
plt.ylim(-1, width)
plt.savefig(name_save)
plt.close()
NAME_DIR = "../data"
if not os.path.exists(NAME_DIR):
os.mkdir(NAME_DIR)
NAME_DIR += "/lattice"
if not os.path.exists(NAME_DIR):
os.mkdir(NAME_DIR)
main = [True, False][0]
plot = [True, False][0]
LIST_MEASURE = ["square", "square", "square", "square", "gauss"]
LIST_RADIUS = [np.sqrt(2) * 0.10, np.sqrt(3) * 0.10, 0.20, 0.10]
CONST_PCD = 2
if main:
CONST_IID = 100
CONST_SIDE = 20
num_type = len(LIST_RADIUS)
for idx_data in range(num_type):
# make directory to save point sets as txt file
name_dir_lattice = "%s/pcd%s_side%s_iid%s_%s_%s" % (
NAME_DIR, CONST_PCD, CONST_SIDE, CONST_IID,
LIST_MEASURE[idx_data], "%03d" % (LIST_RADIUS[idx_data] * 100))
if not os.path.exists(name_dir_lattice):
os.mkdir(name_dir_lattice)
# generate point sets
for temp_iid in range(CONST_IID):
print(temp_iid)
name_dir_pcd = "%s/pcd_pd" % name_dir_lattice
if not os.path.exists(name_dir_pcd):
os.mkdir(name_dir_pcd)
pcd = lattice(CONST_SIDE)
for temp_point in range(len(pcd)):
pcd[temp_point] += measure(LIST_RADIUS[idx_data],
LIST_MEASURE[idx_data])
np.savetxt("%s/pcd_%s.txt" % (name_dir_pcd, temp_iid),
np.asarray(pcd), delimiter='\t')
if plot:
CONST_IID = 10
CONST_SIDE = 5
# make directory to plot point sets as png file
name_parameter = "pcd%s_side%s_iid%s" % (CONST_PCD, CONST_SIDE, CONST_IID)
name_dir_png = "%s/plot_point_%s" % (NAME_DIR, name_parameter)
tda.os_mkdir(name_dir_png)
# plot point sets
num_type = len(LIST_RADIUS)
for idx_data in range(num_type):
for temp_iid in range(CONST_IID):
print(temp_iid)
pcd = lattice(CONST_SIDE)
for temp_point in range(len(pcd)):
pcd[temp_point] += measure(LIST_RADIUS[idx_data],
LIST_MEASURE[idx_data])
save_points(pcd, CONST_SIDE, "%s/%s_%s_%s.png" % (
name_dir_png, LIST_MEASURE[idx_data],
"%03d" % (LIST_RADIUS[idx_data] * 100), temp_iid))
|
from __future__ import print_function
from tinydb import Query
from datahandling import my_query, access_db, extractnames
from itertools import combinations
from logging import debug, info
from sklearn.decomposition import PCA
from pca import pca_X
try:
from winsound import Beep
except ImportError:
def Beep(a, b):
pass
from time import time
from sklearn.cross_validation import cross_val_score, ShuffleSplit
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
from gen_model_inputs import get_all_lin_model_inp
from numpy import mean
Q = Query()
def gen_Y(db, equipment, data_type):
""" Generates Y to for one equipment and data_type """
data = db.search((Q.equipment_name == equipment) &
(Q.data_type == data_type))
Y, sample_nos_Y = extractnames(data, 'value', 'sample_number')
Y_scaled = [(2*(y - min(Y))/(max(Y) - min(Y)) - 1) for y in Y]
return Y_scaled, sample_nos_Y
def gen_X(sample_numbers_Y, all_full_models, model_select_code):
""" Generates X to score one model for one equipment and data_type """
X = []
for i in sample_numbers_Y:
full_model_lin = all_full_models[i - 1]
full_model_selected = [p for i, p in enumerate(full_model_lin) if i in model_select_code]
X.append(full_model_selected)
return X
def gen_terms_key():
""" Generates the key to the model codes """
terms_key = list(range(7))
for i in combinations(list(range(7)), 2):
terms_key.append(list(i))
return terms_key
def my_sound():
Beep(600,300)
Beep(500,300)
Beep(400,300)
Beep(450,300)
Beep(300,300)
def gen_all_possible_models(number_of_terms):
""" Generates all the possible 2nd order Scheffe models
up to a given number of model terms if up_to is True
else only the number of terms """
terms_key = gen_terms_key()
cnt = 0
t = time()
db = access_db('All_Poss_Mod_{}_Terms'.format(number_of_terms), False)
if db.contains(Q.is_complete == 'yes'):
info('________________')
info('Models with %d terms already done', number_of_terms)
return
n_models_done = len(db.all())
cnt_mod = 0
for i in combinations(list(range(28)), number_of_terms):
invalid = False
for j in i:
if j >= 7:
key_1 = terms_key[j][0]
key_2 = terms_key[j][1]
if key_1 not in i or key_2 not in i:
invalid = True
if not invalid:
cnt_mod += 1
if not invalid and cnt_mod > n_models_done:
db.insert({'mc': i})
cnt += 1
db.insert({'is_complete': 'yes'})
info('________________')
info('%d models with %d terms entered into DB', cnt, number_of_terms)
req_time = time() - t
minutes, seconds = divmod(req_time, 60)
info('Required Time: %d min and %d s', round(minutes), round(seconds, 2))
def score_1_model(db, model, model_code, Y, sample_numbers_Y, all_full_models,
do_check=True):
""" Scores one model to given Y and enters into scored models db """
if do_check:
done = db.contains((Q.model_code == model_code))
if done:
return
X = gen_X(sample_numbers_Y, all_full_models, model_code)
my_cv = ShuffleSplit(len(Y), n_iter=3, test_size=0.333, random_state=0)
scores = cross_val_score(model, X, Y, cv=my_cv)
entry = {'model_code': model_code,
'n_terms': len(model_code),
'kfold_score': mean(list(scores))
}
db.insert(entry)
def get_data_req_to_score_model():
""" Calculates all the data required to run score_models_per_data_type
that does not need to be recalculated in score_models_per_data_type """
Q = Query()
all_model_codes = []
for i in range(28):
number_of_terms = i + 1
db = access_db(('All_Poss_Mod_{}_Terms'.format(number_of_terms)), False)
all_model_codes += extractnames(db.search(Q.mc.exists()), 'mc')
sv_db = access_db(0, True)
model = LinearRegression(fit_intercept=False)
all_full_models = get_all_lin_model_inp()
return sv_db, model, all_full_models, all_model_codes
def score_models_per_data_type(edt):
""" Fits all the models for a certain data type """
sv_db, model, all_full_models, all_model_codes = get_data_req_to_score_model()
equipment, data_type = edt
db = access_db('Score_results_'+ equipment + '_' + data_type, False)
Y, sn_Y = gen_Y(sv_db, equipment, data_type)
for i in all_model_codes:
model_code = i
score_1_model(db, model, model_code, Y, sn_Y, all_full_models)
def score_model_per_comp(i):
X, df = pca_X()
my_pca = PCA(n_components=0.99)
my_pca.fit(X)
X_trans = my_pca.transform(X)
sn_Y = list(df.index)
Ys = map(list, zip(*X_trans))
comp_no = i + 1
Y = Ys[i]
# Treat pca as equipment and component as data_type
data_type = 'component_' + str(comp_no)
equipment = 'pca'
sv_db, model, all_full_models, all_model_codes = get_data_req_to_score_model()
db = access_db('Score_results_'+ equipment + '_' + data_type, False)
for model_code in all_model_codes:
score_1_model(db, model, model_code, Y, sn_Y, all_full_models) |
# ****************************************************************** #
# ************************* Byte of Python ************************* #
# ****************************************************************** #
########################
# passing tuples around
########################
# def get_error_details():
# return (2, "second error details")
# errnum, errstr = get_error_details()
# print(errnum)
# print(errstr)
# a, *b = [1, 2, 3, 4]
# print(a)
# print(b)
# a = 5; b = 8
# a, b = b, a
# print(a, b)
########################
# special methods
########################
# __init__(self, ...)
# __del__(self)
# __str__(self)
# __lt__(self, other)
# __getitem__(self, key)
# __len__(self)
########################
# single_statement_block
########################
# if True: print("Yes")
########################
# lambda
########################
# points = [ {"x" : 2, "y" : 3}, {"x" : 4, "y" : 1} ]
# points.sort(key = lambda i : i["y"])
# print(points)
########################
# list_comprehension
########################
# listone = [2, 3, 4]
# listtwo = [2 * i for i in listone if i > 2]
# print(listtwo)
########################
# tuple and dic in func
########################
# def powersum(power, *args):
# """return the sum of each argument raised to specified power."""
# total = 0
# for i in args:
# total += pow(i, power)
# return total
# print(powersum(2, 3, 4))
# print(powersum(2, 10))
########################
# assert
########################
# mylist = ["item"]
# assert len(mylist) >= 1
# mylist.pop()
# assert len(mylist) >= 1
########################
# decorators
########################
from time import sleep
from functools import wraps
import logging
logging.basicConfig()
log = logging.getLogger("retry")
def retry(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
MAX_ATTEMPTS = 5
for attempt in range(1, MAX_ATTEMPTS + 1):
try:
return f(*args, **kwargs)
except:
log.exception( "Attemp %s/%s failed : %s",
attempt,
MAX_ATTEMPTS,
(args, kwargs) )
sleep(10 * attempt)
log.critical("All %s attempts failed : %s",
MAX_ATTEMPTS,
(args, kwargs) )
return wrapped_f
counter = 0
@retry
def save_to_database(arg):
print("Write to a database or make a network call or etc.")
print("This will be automatically retried if exception is thrown.")
global counter
counter += 1
if counter < 2:
raise ValueError(arg)
if __name__ == "__main__":
save_to_database("Some bad value") |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
MIT License
Copyright (c) [2019] [Orlin Dimitrov]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# for enum34, or the stdlib version
from enum import Enum
#/**
# * This class is dedicated to hold Operation Codes.
# */
class OpCode(Enum):
Ping = 1 # ///< When everything is OK.
Stop = 2
Disable = 3
Enable = 4
Clear = 5
MoveRelative = 6
MoveAblolute = 7
DO = 8
DI = 9
IsMoving = 10
CurrentPosition = 11
MoveSpeed = 12
@staticmethod
def to_text(code):
if (code == OpCode.Ping):
return "Ping"
elif (code == OpCode.Stop):
return "Stop"
elif (code == OpCode.Disable):
return "Disable"
elif (code == OpCode.Enable):
return "Enable"
elif (code == OpCode.Clear):
return "Clear"
elif (code == OpCode.MoveRelative):
return "Move Relative"
elif (code == OpCode.MoveAblolute):
return "Move Ablolute"
elif (code == OpCode.DO):
return "Digitl Outputs"
elif (code == OpCode.DI):
return "Digital Inputs"
elif (code == OpCode.IsMoving):
return "Is Moving"
elif (code == OpCode.CurrentPosition):
return "Current Position"
elif (code == OpCode.MoveSpeed):
return "Move Speed"
|
import os
import sqlite3
from config import DB_DIR, DB_NAME
class SQLite3Instance:
""" Класс (обертка) для работы с SQLite3 БД """
def __init__(self):
self.db_name = DB_NAME
self.db_dir = DB_DIR
self.con = sqlite3.connect(os.path.join(self.db_dir, self.db_name))
self.cur = self.con.cursor()
def pure_select(self, sql_statement):
""" Метод выборки данных из БД по чистому SQL
:param sql_statement: SQL запрос
:return: Возвращает результат выборки из БД в формате: лист словарей
"""
self.cur.execute(sql_statement)
return [dict(zip([desc[0] for desc in self.cur.description], row)) for row in self.cur.fetchall()]
def select(self, table: str, columns: list[str], where: str = '') -> list[dict]:
""" Метод выборки данных из БД
:param table: таблица
:param columns: какие колонки необходимо выбрать (необязательный параметр)
:param where: дополнительные условия выборки (необязательный параметр)
:return: Возвращает результат метода pure_select
"""
columns_joined = ', '.join(columns) if columns else '*'
sql = f'SELECT {columns_joined} FROM {table} ' + where
return self.pure_select(sql)
def insert(self, table: str, column_values: dict) -> None:
""" Метод вставки данных в БД
:param table: таблица
:param column_values: словарь для вставки
:return: None
"""
columns = ', '.join(column_values.keys())
values = [tuple(column_values.values())]
placeholders = ', '.join('?' * len(column_values.keys()))
sql = f'INSERT INTO {table} ({columns}) VALUES ({placeholders})'
self.cur.executemany(sql, values)
self.con.commit()
def delete(self, table: str, where: str) -> None:
""" Метод удаления данных из БД
:param table: таблица
:param where: дополнительные условия
:return: None
"""
sql = f'DELETE FROM {table} ' + where
self.cur.execute(sql)
self.con.commit()
|
nums = [1, 2, 3, 4]
#Sort the list from smallest to largest.
nums.sort()
#Find the median.
length = len(nums)
if (length % 2 == 0):
median = (nums[(length)//2] + nums[(length)//2-1]) / 2
else:
median = nums[(length-1)//2]
#Display the result.
print(median)
|
#!/usr/bin/env python
import Tkinter, tkFont
class Ball:
def __init__(self, id, x, y, radius, color="cyan"):
self.id, self.x, self.y, self.radius, self.color = id, x, y, radius, color
def move_right(self):
self.x += 5
if self.x + self.radius >= canvas_width:
self.x = self.radius
def move_left(self):
self.x -= 5
if self.x - self.radius < 0:
self.x = canvas_width
def draw(self):
canvas.create_oval(self.x-self.radius, self.y-self.radius, self.x+self.radius, self.y+self.radius, fill=self.color)
def run():
global canvas, canvas_width, canvas_height
root = Tkinter.Tk()
canvas_width = 300
canvas_height = 200
canvas = Tkinter.Canvas(root, width=canvas_width, height=canvas_height)
canvas.pack()
class Data:
pass
canvas.data = Data()
canvas.data.canvas_width = canvas_width
canvas.data.canvas_height = canvas_height
init()
timer_fired()
root.bind("<Button-1>", mouse_clicked)
root.bind("<Key>", key_pressed)
root.mainloop()
def init():
canvas.data.radius = 10
canvas.data.score = 0
canvas.data.delay = 15
canvas.data.balls = []
canvas.data.balls += [Ball(id=0, x=0, y=100, radius=10)]
canvas.data.balls += [Ball(id=1, x=canvas_width, y=100, radius=10)]
def timer_fired():
do_timer_fired()
delay = canvas.data.delay
canvas.after(delay, timer_fired)
def do_timer_fired():
canvas.delete(Tkinter.ALL)
move_balls()
draw_reports()
def move_balls():
for ball in canvas.data.balls:
if ball.id % 2 == 0:
ball.move_left()
else:
ball.move_right()
ball.draw()
def draw_reports():
score = canvas.data.score
helv18 = tkFont.Font ( family="Helvetica", size=18, weight="bold" )
canvas.create_text(150,20, font=helv18, justify="center", text="SCORE = " + str(score))
delay_time = canvas.data.delay
canvas.create_text(150,50, font=helv18, justify="center", text="Delay = " + str(delay_time))
def mouse_clicked(event):
count_score(event)
redraw_all()
def count_score(event):
if(is_inside(event)):
canvas.data.score += 1
else:
if(canvas.data.score > 0):
canvas.data.score -= 1
def is_inside(event):
x,y = (event.x, event.y)
for ball in canvas.data.balls:
if(ball.x-ball.radius <= x <= ball.x+ball.radius and ball.y-ball.radius <= y <= ball.y+ball.radius):
return True
else:
return False
def key_pressed(event):
if(event.keysym == "Up"):
canvas.data.delay -= 5
elif(event.keysym == "Down"):
canvas.data.delay += 5
run()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'userUI.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_userUI(object):
def setupUi(self, userUI, user, movieList):
userUI.setObjectName("userUI")
userUI.resize(971, 462)
userUI.setMaximumSize(971, 462)
userUI.setMinimumSize(971, 462)
self.greeting = QtWidgets.QLabel(userUI)
self.greeting.setGeometry(QtCore.QRect(20, 15, 371, 41))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.greeting.setFont(font)
self.greeting.setObjectName("greeting")
self.cash = QtWidgets.QLabel(userUI)
self.cash.setGeometry(QtCore.QRect(20, 60, 351, 31))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.cash.setFont(font)
self.cash.setObjectName("cash")
self.rank = QtWidgets.QLabel(userUI)
self.rank.setGeometry(QtCore.QRect(20, 100, 131, 31))
self.rank.setText("")
self.rank.setObjectName("rank")
self.label = QtWidgets.QLabel(userUI)
self.label.setGeometry(QtCore.QRect(0, 0, 971, 651))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("userBG.jpg"))
self.label.setObjectName("label")
self.ticketButton = QtWidgets.QPushButton(userUI)
self.ticketButton.setGeometry(QtCore.QRect(830, 10, 131, 41))
self.ticketButton.setObjectName("ticketButton")
self.quitButton = QtWidgets.QPushButton(userUI)
self.quitButton.setGeometry(QtCore.QRect(830, 60, 131, 41))
self.quitButton.setObjectName("quitButton")
self.label.raise_()
self.greeting.raise_()
self.cash.raise_()
self.rank.raise_()
self.ticketButton.raise_()
self.quitButton.raise_()
if len(movieList) >= 1:
self.movie1Button = QtWidgets.QPushButton(userUI)
self.movie1Button.setGeometry(QtCore.QRect(20, 210, 171, 231))
self.movie1Button.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(movieList[0].photo), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movie1Button.setIcon(icon)
self.movie1Button.setIconSize(QtCore.QSize(167, 229))
self.movie1Button.setObjectName("movie1Button")
if len(movieList) >= 2:
self.movie2Button = QtWidgets.QPushButton(userUI)
self.movie2Button.setGeometry(QtCore.QRect(210, 210, 171, 231))
self.movie2Button.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(movieList[1].photo), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movie2Button.setIcon(icon1)
self.movie2Button.setIconSize(QtCore.QSize(167, 229))
self.movie2Button.setObjectName("movie2Button")
if len(movieList) >= 3:
self.movie3Button = QtWidgets.QPushButton(userUI)
self.movie3Button.setGeometry(QtCore.QRect(400, 210, 171, 231))
self.movie3Button.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(movieList[2].photo), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movie3Button.setIcon(icon2)
self.movie3Button.setIconSize(QtCore.QSize(167, 229))
self.movie3Button.setObjectName("movie3Button")
if len(movieList) >= 4:
self.movie4Button = QtWidgets.QPushButton(userUI)
self.movie4Button.setGeometry(QtCore.QRect(590, 210, 171, 231))
self.movie4Button.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(movieList[3].photo), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movie4Button.setIcon(icon3)
self.movie4Button.setIconSize(QtCore.QSize(167, 229))
self.movie4Button.setObjectName("movie4Button")
if len(movieList) >= 5:
self.movie5Button = QtWidgets.QPushButton(userUI)
self.movie5Button.setGeometry(QtCore.QRect(780, 210, 171, 231))
self.movie5Button.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(movieList[4].photo), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.movie5Button.setIcon(icon4)
self.movie5Button.setIconSize(QtCore.QSize(167, 229))
self.movie5Button.setObjectName("movie5Button")
self.movie1Button.raise_()
self.movie2Button.raise_()
self.movie3Button.raise_()
self.movie4Button.raise_()
self.movie5Button.raise_()
self.retranslateUi(userUI,user)
QtCore.QMetaObject.connectSlotsByName(userUI)
def retranslateUi(self, userUI, user):
_translate = QtCore.QCoreApplication.translate
userUI.setWindowTitle(_translate("userUI", "Minor Cineprex"))
self.greeting.setText(_translate("userUI", "Hello, "+user.firstname + user.lastname))
self.cash.setText(_translate("userUI", "Your Cash: "+ str(user.cash) + "฿"))
self.ticketButton.setText(_translate("userUI", "TICKET"))
self.quitButton.setText(_translate("userUI", "QUIT"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
userUI = QtWidgets.QWidget()
ui = Ui_userUI()
ui.setupUi(userUI)
userUI.show()
sys.exit(app.exec_())
|
'''
Name: Chung-Hsin(Jack) Hou
'''
import requests, json, copy
from collections import defaultdict
# modified binary search to search for one product.
def findOne(seq, goal):
if not seq:
return None
min = 0
max = len(seq) - 1
#if the goal price is larger than the max price, simply return the max price.
if goal > seq[max]:
return [seq[max]]
while True:
if max < min:
if goal > seq[m]:
# determines the closeset value with respect to goal value
if abs(goal-seq[m]) < abs(goal-seq[m+1]):
return [seq[m]]
else:
return [seq[m+1]]
else:
if abs(goal-seq[m]) < abs(goal-seq[m-1]):
return [seq[m]]
else:
return [seq[m-1]]
m = (min + max) // 2
if seq[m] < goal:
min = m + 1
elif seq[m] > goal:
max = m - 1
else:
return [seq[m]]
# using two pointers, compare the first and last element in an array to minimize the price differnece
def findTwo(seq, goal):
if not seq:
return None
left = 0
right = len(seq) - 1
# initialize the tracker for the differences
goalDifference = float("inf")
dl = -1
dr = -1
# as long as the tracker on the left of the list is less than the tracker on the right of the list...
while(left < right):
# keep track of the closest differences right now.
if(abs(goal - (seq[left] + seq[right])) < goalDifference):
goalDifference = abs(goal - (seq[left] + seq[right]))
dl = left
dr = right
# if a goal is found, simply returns
if(seq[left] + seq[right] == goal):
return [seq[left], seq[right]]
elif(seq[left] + seq[right] < goal):
left += 1
else:
right -= 1
# if nothing is found after the while loop, return the best left and right tracker
return [seq[dl], seq[dr]]
# using three trackers similar to findTwo(), find the cloest three elements
def findThree(seq, goal):
if not seq:
return None
# initialize the differences
goalDifference = float("inf")
dl = -1
dr = -1
dx = -1
for x in range(len(seq)-1):
left = x+1
right = len(seq)-1
while (left < right):
tmp = seq[left] + seq[right] + seq[x]
# if the absolute value of the previous set and the goal is less than the current goalDifference, keep track of the new set
if(abs(goal - (seq[left] + seq[right] + seq[x])) < goalDifference):
goalDifference = abs(goal - (seq[left] + seq[right] + seq[x]))
dl = left
dr = right
dx = x
if tmp > goal:
right -= 1
elif tmp < goal:
left += 1
else:
return [seq[left],seq[right],seq[x]]
# if no match is found, return the closest set using the tracker values
return [seq[dl],seq[dr], seq[dx]]
# findFourPlus will utilizes the subset_sum function to find the most suitable subset_sum given an itemCount
def findFourPlus(itemCount, seq, goal):
#hello, world
hello = subset_sum(itemCount, seq, goal, partial=[])
# if hello is not None, return its value. Otherwise, let's do some searching...
if not hello is None:
return hello
# since there is no efficient way of implementing subset_sum, I've decided to simply broaden the search range from [goal-200, goal+200] and hope
# that one of the value will stike the gold mine. This is an inefficent approach for a huge list of prices, but since the API
#only returns at most 10 prices, the run-time will still be relatively reasonable for this case.
else:
tempGoal = 0
if (goal - 200 < 0):
tempGoal = goal
else:
tempGoal = goal - 200
# keep on searching for a potential match
while(tempGoal <= goal+200):
# hello world, again
hello = subset_sum(itemCount, seq, tempGoal, partial=[])
if not hello is None:
return hello
else:
tempGoal += 1
# subset_sum implementation using recursion.
def subset_sum(itemCount, seq, goal, partial):
s = sum(partial)
# check if the partial sum is equals to target
if(len(partial) == itemCount):
if s == goal:
return partial
for i in range(len(seq)):
n = seq[i]
remaining = seq[i+1:]
t = subset_sum(itemCount, remaining, goal, partial + [n])
if t:
return t
# prints out the final results
def returnResult(a, limit, dictionary):
print("with around $", limit, ",you can get...")
currentSum = 0
for x in range(len(a)):
currentSum += a[x]
print(" ", dictionary[a[x]][0], " for $", a[x])
del dictionary[a[x]][0]
print("for a total of $", currentSum, "!!")
# main body!
def main():
# my awesome api key
key = "52ddafbe3ee659bad97fcce7c53592916a6bfd73"
#Added another field to utilize the seach API. Before anything users will be asked what kind of product they are looking for
search_term = input("What type of product are you looking for?? (ie: boots, swimsuits, tanktops...) ")
search_term = search_term.replace(" ", "_")
#asks the user for the quantity. Catches potential errors such as quantity < 1 or not a number at all.
while True:
try:
quantity = int(input("How many do you want?? "))
except ValueError:
print("Please enter a whole number!")
continue
if quantity < 1:
print("Please enter a positive quantity!")
continue
else:
break
# asks for how many items the user wants
while True:
try:
limit = int(input("Around how much are you willing to spend?? (no decimals please!) "))
except ValueError:
print("Please enter a whole number!")
continue
else:
break
# set up the data from the server!
r = requests.get("http://api.zappos.com/Search?term="+search_term+"&key="+key)
data = r.json()
#get the data from the API
prices = [item["originalPrice"] for item in data["results"]]
brandName = [item["brandName"] for item in data["results"]]
productName = [item["productName"] for item in data["results"]]
# strip the $ sign from all prices and sort the prices in order
for x in range(len(prices)):
prices[x] = float(prices[x][1:])
prices = sorted(prices)
# save the prices into a dictionary, with the key being the prices and the values being what kind of products are associated with that price
dictionary = defaultdict(list)
for x in range(len(prices)):
dictionary[prices[x]].append(brandName[x]+" "+productName[x])
# determine how many items user wants and use the appropriate functions
if(quantity == 1):
a = findOne(prices, limit)
if a is None:
print("Sorry, no results found :(")
else:
print("with around $" , limit ,", you can get a " , dictionary[a[0]][0], " for $" , a[0] ,"!")
elif(quantity == 2):
a = findTwo(prices, limit)
if a is None:
print("Sorry, no results found :(")
else:
returnResult(a, limit, dictionary)
elif(quantity == 3):
a = findThree(prices, limit)
if a is None:
print("Sorry, no results found :(")
else:
returnResult(a, limit, dictionary)
else:
a = findFourPlus(quantity, prices, limit)
if a is None:
print("Sorry, no combinations were found within $200 of your desired budget :(")
else:
returnResult(a, limit, dictionary)
if __name__ == "__main__":
main()
input('Press Any Key to exit')
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import DefaultDict, Sequence
from unittest import mock
import pytest
from pants.engine.fs import EMPTY_DIGEST
from pants.jvm.resolve.common import Coordinate, Coordinates
from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile
from pants.jvm.resolve.key import CoursierResolveKey
coord1 = Coordinate("test", "art1", "1.0.0")
coord2 = Coordinate("test", "art2", "1.0.0")
coord3 = Coordinate("test", "art3", "1.0.0")
coord4 = Coordinate("test", "art4", "1.0.0")
coord5 = Coordinate("test", "art5", "1.0.0")
# No dependencies (coord1)
# 1 direct dependency, more transitive dependencies (coord2)
# 1 where direct dependencies provide no transitive dependencies (coord 4)
# 1 where direct dependencies provide repeated dependencies (coord5)
direct: dict[Coordinate, set[Coordinate]] = {
coord1: set(),
coord2: {
coord3,
}, # 1, 2, 3, 4, 5
coord3: {coord1, coord4, coord5}, # 1, 3, 4, 5
coord4: {
coord1,
}, # 1, 4
coord5: {coord1, coord4}, # 1, 4, 5
}
@pytest.fixture
def lockfile() -> CoursierResolvedLockfile:
# Calculate transitive deps
transitive_ = {(i, k) for i, j in direct.items() for k in j}
while True:
old_len = len(transitive_)
transitive_ |= {(i, k) for i, j in transitive_ for k in direct[j]}
if old_len == len(transitive_):
break
transitive = DefaultDict(set)
for i, j in transitive_:
transitive[i].add(j)
entries = (
CoursierLockfileEntry(
coord=coord,
file_name=f"{coord.artifact}.jar",
direct_dependencies=Coordinates(direct[coord]),
dependencies=Coordinates(transitive[coord]),
file_digest=mock.Mock(),
)
for coord in direct
)
return CoursierResolvedLockfile(entries=tuple(entries))
def test_no_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord1, lockfile, False)
assert filtered == [coord1]
def test_filter_non_transitive_includes_direct_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord2, lockfile, False)
assert filtered == [coord2, coord3]
def test_filter_transitive_includes_transitive_deps(lockfile: CoursierResolvedLockfile) -> None:
filtered = filter(coord2, lockfile, True)
assert set(filtered) == {coord1, coord2, coord3, coord4, coord5}
# Entries should only appear once.
assert len(filtered) == 5
def filter(coordinate, lockfile, transitive) -> Sequence[Coordinate]:
key = CoursierResolveKey("example", "example.json", EMPTY_DIGEST)
root, deps = (
lockfile.dependencies(key, coordinate)
if transitive
else lockfile.direct_dependencies(key, coordinate)
)
return [i.coord for i in (root, *deps)]
|
import graphene
from django.db.models import Sum, Q
from ...order import OrderStatus
from ...product import models
from ...product.utils import products_with_details
from ..utils import filter_by_query_param, filter_by_period, get_database_id
from .types import Category, ProductVariant, StockAvailability
PRODUCT_SEARCH_FIELDS = ('name', 'description', 'category__name')
CATEGORY_SEARCH_FIELDS = ('name', 'slug', 'description', 'parent__name')
COLLECTION_SEARCH_FIELDS = ('name', 'slug')
ATTRIBUTES_SEARCH_FIELDS = ('name', 'slug')
def resolve_attributes(info, category_id, query):
queryset = models.Attribute.objects.prefetch_related('values')
queryset = filter_by_query_param(queryset, query, ATTRIBUTES_SEARCH_FIELDS)
if category_id:
# Get attributes that are used with product types
# within the given category.
category = graphene.Node.get_node_from_global_id(
info, category_id, Category)
if category is None:
return queryset.none()
tree = category.get_descendants(include_self=True)
product_types = {
obj[0]
for obj in models.Product.objects.filter(
category__in=tree).values_list('product_type_id')}
queryset = queryset.filter(
Q(product_type__in=product_types)
| Q(product_variant_type__in=product_types))
return queryset.distinct()
def resolve_categories(info, query, level=None):
queryset = models.Category.objects.all()
if level is not None:
queryset = queryset.filter(level=level)
queryset = filter_by_query_param(queryset, query, CATEGORY_SEARCH_FIELDS)
return queryset.distinct()
def resolve_collections(info, query):
user = info.context.user
if user.has_perm('product.manage_products'):
qs = models.Collection.objects.all()
else:
qs = models.Collection.objects.public()
return filter_by_query_param(qs, query, COLLECTION_SEARCH_FIELDS)
def resolve_products(info, category_id, stock_availability, query):
user = info.context.user
qs = products_with_details(user=user)
qs = filter_by_query_param(qs, query, PRODUCT_SEARCH_FIELDS)
if category_id is not None:
category = graphene.Node.get_node_from_global_id(
info, category_id, Category)
if not category:
return qs.none()
qs = qs.filter(category=category)
if stock_availability:
qs = qs.annotate(total_quantity=Sum('variants__quantity'))
if stock_availability == StockAvailability.IN_STOCK:
qs = qs.filter(total_quantity__gt=0)
elif stock_availability == StockAvailability.OUT_OF_STOCK:
qs = qs.filter(total_quantity__lte=0)
return qs.distinct()
def resolve_product_types():
return models.ProductType.objects.all().distinct()
def resolve_product_variants(info, ids=None):
queryset = models.ProductVariant.objects.distinct()
if ids:
db_ids = [
get_database_id(info, node_id, only_type=ProductVariant)
for node_id in ids]
queryset = queryset.filter(pk__in=db_ids)
return queryset
def resolve_report_product_sales(info, period):
qs = models.ProductVariant.objects.prefetch_related(
'product', 'product__images', 'order_lines__order').all()
# exclude draft and canceled orders
exclude_status = [OrderStatus.DRAFT, OrderStatus.CANCELED]
qs = qs.exclude(order_lines__order__status__in=exclude_status)
# filter by period
qs = filter_by_period(qs, period, 'order_lines__order__created')
qs = qs.annotate(quantity_ordered=Sum('order_lines__quantity'))
qs = qs.filter(quantity_ordered__isnull=False)
return qs.order_by('-quantity_ordered')
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import subprocess
import time
###################################################################################################################
# Helper to call nmcli commands.
###################################################################################################################
def nmcli(cmd, response_should_be_empty=False):
full_command = 'nmcli ' + cmd
print 'Executing nmcli command: ' + full_command
response = subprocess.Popen(full_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
print 'Response: ' + response
if response_should_be_empty:
if response is not None and response.strip() != '':
raise Exception(response)
return response
###################################################################################################################
# Wifi manager class.
###################################################################################################################
class WifiManager(object):
CLOUDLET_NETWORK_PREFIX = 'cloudlet'
################################################################################################################
#
################################################################################################################
@staticmethod
def is_wifi_enabled():
response = nmcli('nm wifi')
lines = response.splitlines()
STATUS_LINE_ROW = 1
if len(lines) > STATUS_LINE_ROW:
status_line = lines[STATUS_LINE_ROW].strip()
is_enabled = (status_line == 'enabled')
return is_enabled
else:
raise Exception("No wifi adapter found!")
################################################################################################################
# Turn on wifi, and wait for a couple of seconds it turns on.
# NOTE: only works if called from root user or user at console, not from SSH with a regular user.
################################################################################################################
@staticmethod
def turn_wifi_on():
nmcli('nm wifi on', response_should_be_empty=True)
time.sleep(5)
################################################################################################################
# Turn on wifi, and wait for a couple of seconds it turns on.
# NOTE: only works if called from root user or user at console, not from SSH with a regular user.
################################################################################################################
@staticmethod
def turn_wifi_off():
nmcli('nm wifi off', response_should_be_empty=True)
################################################################################################################
# Return a list of SSIDs currently in range.
################################################################################################################
@staticmethod
def list_available_networks():
if not WifiManager.is_wifi_enabled():
WifiManager.turn_wifi_on()
# Will return a list of newline separated SSIDs.
response = nmcli('-t -f SSID device wifi list')
lines = response.splitlines()
ssids = []
for line in lines:
ssid = line.strip("'")
if ssid.startswith(WifiManager.CLOUDLET_NETWORK_PREFIX):
ssids.append(ssid)
return ssids
################################################################################################################
# Return a list of stored network profiles.
################################################################################################################
@staticmethod
def list_known_networks():
# Spaces may make it hard to get the profile name, unless we filter only the name.
# However, by getting only the name we will also get non wifi profiles, so we will do a cross-match between
# these two lists.
all_networks = nmcli('-t -f NAME con list')
wifi_networks = nmcli('con list | grep 802-11-wireless')
all_network_lines = all_networks.splitlines()
wifi_networks_lines = wifi_networks.splitlines()
wifi_network_list = []
for line in all_network_lines:
network_name = line.strip("'")
# Check that this network name is in the list of wifi networks.
for wifi_line in wifi_networks_lines:
if wifi_line.startswith(network_name):
if network_name.startswith(WifiManager.CLOUDLET_NETWORK_PREFIX):
wifi_network_list.append(network_name)
break
return wifi_network_list
################################################################################################################
# Returns a list of known networks that are currently available.
################################################################################################################
@staticmethod
def list_available_known_networks():
available_known_networks = []
known_networks = WifiManager.list_known_networks()
available_networks = WifiManager.list_available_networks()
for network in known_networks:
if network in available_networks:
available_known_networks.append(network)
return available_known_networks
################################################################################################################
# Return the current network SSID we are connected to using our configured interface, if any, or None.
################################################################################################################
@staticmethod
def get_current_network(interface):
ssid = None
response = nmcli('-t -f NAME,DEVICE connection show --active | grep {}'.format(interface))
for line in response.splitlines():
if len(line) > 0:
ssid = line.split(':')[0]
if ssid == '** (process':
# This means there was an error getting the current network, most likely because wifi is off.
ssid = 'disconnected'
break
return ssid
################################################################################################################
#
################################################################################################################
@staticmethod
def is_connected_to_cloudlet_network(interface):
is_connected_to_cloudlet_net = False
current_network = WifiManager.get_current_network(interface)
if current_network is not None:
is_connected_to_cloudlet_net = current_network.startswith(WifiManager.CLOUDLET_NETWORK_PREFIX)
return is_connected_to_cloudlet_net
################################################################################################################
# Connect to a stored network.
# NOTE: only works if called from root user or user at console, not from SSH with a regular user.
################################################################################################################
@staticmethod
def connect_to_network(connection_id):
if not WifiManager.is_wifi_enabled():
WifiManager.turn_wifi_on()
nmcli('connection up id "{}"'.format(connection_id), response_should_be_empty=True)
################################################################################################################
# Disconnect from current wifi network, only way to force it not to reconnect is to disable wifi.
################################################################################################################
@staticmethod
def disconnect_from_network(interface):
if WifiManager.is_wifi_enabled():
current_network = WifiManager.get_current_network(interface)
if current_network is not None:
nmcli('connection down id "{}"'.format(current_network), response_should_be_empty=True)
|
import gym
import sys, os
import time
import copy
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
from PIL import Image as Image
import matplotlib.pyplot as plt
from scipy import misc
def round_up(a):
if type(a) is type(np.array([1])):
abs_a = np.abs(a)
abs_a_int = np.int64(abs_a)
diff = abs_a - abs_a_int
r = abs_a.copy()
r[diff >=0.5] = abs_a_int[diff>=0.5]+1
r[diff <0.5] = abs_a_int[diff<0.5]
r[a<0] = r[a<0]*(-1)
return np.int64(r)
else:
if a >= 0:
if a-int(a) >= 0.5:
r = int(int(a)+1)
else:
r = int(a)
else:
abs_a = np.abs(a)
if abs_a - int(abs_a) >= 0.5:
r = -1*int(int(abs_a)+1)
else:
r = -1*int(abs_a)
return int(r)
class CarparkingEnv(gym.Env):
metadata = {'render.modes': ['human']}
num_env = 0
def __init__(self):
''' action space defintion '''
self.actions = [0, 1, 2, 3, 4] # stay, move ahead, move back, turn left, right
self.inv_actions = [0, 2, 1, 4, 3]
self.action_space = spaces.Discrete(5)
''' observation space definition '''
self.obs_shape = [256,256,4]
self.observation_space = spaces.Box(low=0, high=255, shape=self.obs_shape)
''' initialize system state '''
this_file_path = os.path.dirname(os.path.realpath(__file__))
self.bg_img_path = os.path.join(this_file_path, 'parking3.png')
self.car_img_path = os.path.join(this_file_path, 'Car.jpg')
self.bg_img = np.array(Image.open(self.bg_img_path))
self.car_img = np.array(Image.open(self.car_img_path))
self.background = img_reshape(self.bg_img, (self.obs_shape[0], self.obs_shape[1]))
self.car_size = [80, 50]
''' initialize observation space '''
self.observation = copy.deepcopy(self.background)
''' agent state: start, target, current state '''
self.agent_start_state = [30, self.car_size[0]+29, 30, self.car_size[0]+29, \
30, 30, self.car_size[1]+29, self.car_size[1] + 29, 0.0]
self.agent_target_state = [162, 241, 162, 241, 152, 152, 201, 201, 0.0]
self.agent_state = copy.deepcopy(self.agent_start_state)
self.observation, _ = self.update_observation(self.agent_state)
''' set other parameters '''
self.restart_once_done = True # restart once done or not
self.verbose = False # to show the environment or not
if self.verbose == True:
CarparkingEnv.num_env += 1
self.fig = plt.figure(CarparkingEnv.num_env)
plt.show(block=False)
plt.axis('off')
self._render()
def _agent_state_to_vex(self, agent_state):
(x1, x2, x3, x4, y1, y2, y3, y4, angle) = agent_state
return np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
def update_observation(self, agent_state=None):
if agent_state is None:
agent_state = copy.deepcopy(self.agent_state)
agent_vex = self._agent_state_to_vex(agent_state)
old_observation = copy.deepcopy(self.observation)
observation = copy.deepcopy(self.background)
observation = np.asarray(observation, np.uint8)
observation, V = merge_img(self.car_img, observation, agent_vex.T)
overlap_region = self.background[V[1,:], V[0,:], 0:3]
overlap_region = list(overlap_region)
is_collision = False
if any((np.array([255,255,255]) == x).all() for x in overlap_region):
is_collision = True
elif any((np.array([0,255,0]) == x).all() for x in overlap_region):
is_collision = True
if is_collision:
return old_observation, is_collision
else:
return observation, is_collision
def _step(self, action):
''' step function, returns obs, reward, done, success '''
success = False
if action == 0: # stay in place
return (self.observation, 0, False, True)
elif action == 1:
new_carpos_vex = self._agent_state_to_vex(self.agent_state)
sign = True
angle = self.agent_state[-1]
'''
for j in range(new_carpos_vex.shape[0]):
new_carpos_vex[j][0] += np.round(2.0 * np.cos(angle))
new_carpos_vex[j][1] += np.round(2.0 * np.sin(angle))
if new_carpos_vex[j][0] < 0 or new_carpos_vex[j][0] >= self.obs_shape[0] \
or new_carpos_vex[j][1] < 0 or new_carpos_vex[j][1] >= self.obs_shape[1]:
sign = False
break
'''
toadd = np.array([[np.round(2.0*np.cos(angle)),np.round(2.0*np.sin(angle))]]).astype(np.int32)
new_carpos_vex += toadd
if np.any(new_carpos_vex <0) or np.any(new_carpos_vex>=self.obs_shape[0]):
sign = False
if sign == True:
success = True
new_vex = copy.deepcopy(new_carpos_vex)
elif action == 2: # 'move back'
new_carpos_vex = self._agent_state_to_vex(self.agent_state)
sign = True
angle = self.agent_state[-1]
'''
for j in range(new_carpos_vex.shape[0]):
new_carpos_vex[j][0] -= np.round(2.0 * np.cos(angle))
new_carpos_vex[j][1] -= np.round(2.0 * np.sin(angle))
if new_carpos_vex[j][0] < 0 or new_carpos_vex[j][0] >= self.obs_shape[0] \
or new_carpos_vex[j][1] < 0 or new_carpos_vex[j][1] >= self.obs_shape[1]:
sign = False
break
'''
todel = np.array([[np.round(2.0*np.cos(angle)),np.round(2.0*np.sin(angle))]]).astype(np.int32)
new_carpos_vex -= todel
if np.any(new_carpos_vex <0) or np.any(new_carpos_vex>=self.obs_shape[0]):
sign = False
if sign == True:
success = True
new_vex = copy.deepcopy(new_carpos_vex)
elif action == 3 or action == 4:
old_angle = self.agent_state[-1]
rot_angle = 90.0/180.0*np.pi
new_carpos_vex = self._agent_state_to_vex(self.agent_state)
if action == 4:
rot_angle *= -1.0
new_angle = old_angle + rot_angle
rot_mat = np.array([[np.cos(rot_angle), -1.0 * np.sin(rot_angle)],
[np.sin(rot_angle), np.cos(rot_angle)]])
identity_mat = np.array([[1. ,0.], [0., 1.]])
coord_avg = np.mean(new_carpos_vex, axis=0)
x_avg, y_avg = int(np.round(coord_avg[0])), int(np.round(coord_avg[1]))
avg_coord = np.array([x_avg, y_avg])
sign = True
'''
for j in range(new_carpos_vex.shape[0]):
this_coord = new_carpos_vex[j]
new_coord = rot_mat.dot(this_coord)+(identity_mat-rot_mat).dot(avg_coord)
new_coord = np.round(new_coord)
new_carpos_vex[j] = new_coord
if new_carpos_vex[j][0] < 0 or new_carpos_vex[j][0] >= self.obs_shape[0]\
or new_carpos_vex[j][1] < 0 or new_carpos_vex[j][1] >= self.obs_shape[1]:
sign = False
break
'''
new_coord = ((identity_mat-rot_mat).dot(avg_coord)).reshape((1,2))
new_carpos_vex = (rot_mat.dot(new_carpos_vex.T)).T + new_coord
new_carpos_vex = np.round(new_carpos_vex)
if np.any(new_carpos_vex<0) or np.any(new_carpos_vex>=self.obs_shape[0]):
sign = False
if sign == True:
success = True
new_vex = copy.deepcopy(new_carpos_vex)
if success == False:
return (self.observation, -1, False, False)
new_agent_state = [new_vex[0,0], new_vex[1,0], new_vex[2,0], new_vex[3,0],
new_vex[0,1], new_vex[1,1], new_vex[2,1], new_vex[3,1], 0]
self.observation, is_collision = self.update_observation(new_agent_state)
if is_collision:
success = False
if success:
if action ==3 or action == 4:
while new_angle < 0:
new_angle += 2.0 * np.pi
while new_angle >= np.pi * 2.0:
new_angle -= 2.0 * np.pi
if new_angle >= 2.0 * np.pi:
new_angle = new_angle - 2.0 * np.pi
if new_angle < 0 or new_angle >= 2.0 * np.pi:
sys.exit('wrong angle!')
self.agent_state = new_agent_state[:-1] + [new_angle]
else:
self.agent_state[:-1] = new_agent_state[:-1]
diff = np.sum(np.abs(np.array(self.agent_state) - np.array(self.agent_target_state)))
if diff <= 10:
done = True
if self.restart_once_done:
self.observation = self._reset()
reward = 1
else:
done = False
reward = 0
self._render()
return (self.observation, reward, done, success)
else:
return (self.observation, -1, False, False)
def _reset(self):
self.agent_state = copy.deepcopy(self.agent_start_state)
self.observation, is_collision = self.update_observation(self.agent_state)
self._render()
return self.observation
def _render(self, mode='human', close=False):
if self.verbose == False:
return
else:
img = self.observation
fig = plt.figure(CarparkingEnv.num_env)
plt.clf()
plt.imshow(img)
fig.canvas.draw()
plt.pause(0.00001)
return
def change_start_state(self, sp):
if self.agent_start_state == sp:
_ = self._reset()
return True
else:
observation, is_collision = self.update_observation(sp)
if is_collision:
return False
else:
self.agent_start_state = list(sp)
self.agent_state = copy.deepcopy(list(sp))
self._reset()
return True
def change_target_state(self, tg):
if self.agent_target_state == tg:
_ = self._reset()
return True
else:
observation, is_collision = self.update_observation(tg)
if is_collision:
return False
else:
self.agent_target_state = list(tg)
self._reset()
return True
def get_agent_state(self):
return self.agent_state
def get_start_state(self):
return self.agent_start_state
def get_target_state(self):
return self.agent_target_state
def _jump_to_state(self, to_state):
''' move agent to another state '''
''' to_state: a list of x1-x4, y1-y4, angle '''
self.observation, is_collision = self.update_observation(to_state)
self._render()
if is_collision:
return (self.observation, -1, False, False)
else:
done = False
self.agent_state = list(to_state)
diff = np.sum(np.abs(np.array(self.agent_state) - np.array(self.agent_target_state)))
if diff <= 10:
done = True
if self.restart_once_done:
self.observation = self._reset()
return (self.observation, 1, True, True)
else:
return (self.observation, 0, False, True)
def jump_to_state(self, to_state):
a, b, c, d = self._jump_to_state(to_state)
return (a, b, c, d)
def img_reshape(img, target_shape):
''' Input: img: numpy array of size W*H*C,
target_shape: target shape list,
tw, th, the same channels
'''
img = Image.fromarray(img)
size = (target_shape[0], target_shape[1])
img = img.resize(size)
res_img = np.array(img)
return res_img
def homography_solve(u, v):
''' Input: u, v : are both 2*4 matrix, representing 4 points
Output: H matrix, 3*3
'''
A = np.zeros((8,8))
'''
for i in range(8): # rows
for j in range(8): # columns
if i>=0 and i <=3 and j >=0 and j <=1:
A[i,j] = u[j,i]
elif i>=0 and i<=3 and j == 2:
A[i,j] = 1
elif i>=0 and i<=3 and j >=6 and j <= 7:
A[i,j] = -1.0*u[j-6,i]*v[0,i]
elif i>3 and j>=3 and j<=4:
A[i,j] = u[j-3,i-4]
elif i>3 and j == 5:
A[i,j] = 1
elif i>3 and j >=6 and j <= 7:
A[i,j] = -1.0*u[j-6,i-4]*v[1,i-4]
b = np.zeros((8,1))
for i in range(8):
if i < 4:
b[i] = v[0, i]
else:
b[i] = v[1, i-4]
'''
A[0:4,0:2] = u.T
A[4:,3:5] = u.T
A[0:4,2] = 1.0
A[4:,5] = 1.0
uT = u.T
vT = v.T
A[0:4,6] = -1.0*uT[:,0]*vT[:,0]
A[0:4,7] = -1.0*uT[:,1]*vT[:,0]
A[4:8,6] = -1.0*uT[:,0]*vT[:,1]
A[4:8,7] = -1.0*uT[:,1]*vT[:,1]
b = np.zeros((8,1))
b[:4] = v[0,:].reshape((4,1))
b[4:] = v[1,:].reshape((4,1))
h = np.linalg.inv(A).dot(b)
H = np.ones((9,1))
H[0:8] = h
H = H.reshape((3,3))
return H
def homography_transform(u, H):
'''
u: 2 * N matrix
H: 3 * 3 matrix
'''
U = np.ones((3, u.shape[1]))
U[0:2,:] = u
V = H.dot(U)
V = V/(V[2,:])
v = V[0:2,:]
return v
def premerge_img(img1, img2, Hmat):
'''
merge img1 into img2
'''
H, W, C = img1.shape
H2, W2, C = img2.shape
# map points in img 1 to img 2
U = np.zeros((2, H*W))
'''
for i in range(W):
U[0, i*H : (i+1)*H] = i
U[1, i*H : (i+1)*H] = np.arange(H)
'''
f1 = np.arange(W)
ff1 = np.repeat(f1, H)
f2 = np.arange(H)
ff2 = np.tile(f2, W)
U = np.stack((ff1, ff2))
V = homography_transform(U, Hmat)
V = np.around(V)
V = V.astype(np.int32)
U = U.astype(np.int32)
'''
for i in range(U.shape[1]):
if V[0,i] < W2 and V[0,i] >= 0 and V[1,i] < H2 and V[1,i] >= 0:
img2[V[1,i], V[0,i], 0:3] = img1[U[1,i], U[0,i], 0:3]
'''
a,b = np.where(V[0,:]<W2), np.where(V[0,:]>=0)
c,d = np.where(V[1,:]<H2), np.where(V[1,:]>=0)
e = set(list(a[0])) & set(list(b[0])) & set(list(c[0])) & set(list(d[0]))
e = list(e)
img2[V[1,np.array(e)],V[0,np.array(e)],0:3] = img1[U[1,np.array(e)],U[0,np.array(e)],0:3]
return img2, V
def merge_img(img1, img2, v):
''' merge two images img1, img2 based on
two sets of corresponding points
'''
H, W, C = img1.shape
u = np.array([[1,1],[W-1,1], [1, H-1],[W-1,H-1]]).T
H1 = homography_solve(u, v)
img, V = premerge_img(img1, img2, H1)
return img, V
|
# Job: A process or task that has a priority.
# Your implementation should pass the tests in test_job.py.
# Andras Mihaly
class Job:
def __init__(self, priority=None, message=None):
self.priority = priority
self.message = message
def __eq__(self, node):
return self.priority == node.priority
def __lt__(self, node):
return self.priority < node.priority
def __gt__(self, node):
return self.priority > node.priority
def __le__(self, node):
return self.priority <= node.priority
def __ge__(self, node):
return node.priority <= self.priority
def __repr__(self):
return "Job {}: {}" .format(self.priority, self.message) |
# -*- coding: utf-8 -*-
import json
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from threading import Thread
from urlparse import urlparse
from PIL import Image, ImageDraw, ImageFont
from cv2 import cv
import ConfigParser
import logging
import time
import os
import urllib
import cv2
import numpy as np
import requests
import argparse
import time
import sys
import StringIO
SERVERS = {}
stream = None
video_src = "http://mjpg-server:8090/?action=stream"
# detect_api = "http://9.186.106.216:7000/detect_upload"
detect_api = os.environ.get("DETECT_API")
bytes = ""
base_font = ImageFont.truetype("/assert/msyh.ttf", 24)
face_border_color = (249,176,76)
score_text_color = (60,63,105)
face_scores = {
"neutral": {
"name": "neutral",
"cnName":"中性",
"score":"0",
"highLight": "false"
},
"happy": {
"name": "happy",
"cnName":"高兴",
"score":"0",
"highLight": "false"
},
"amazing": {
"name": "amazed",
"cnName":"吃惊",
"score":"0",
"highLight": "false"
},
"sad": {
"name": "sad",
"cnName":"悲伤",
"score":"0",
"highLight": "false"
},
"angry": {
"name": "angry",
"cnName":"生气",
"score":"0",
"highLight": "false"
},
"hate": {
"name": "hate",
"cnName":"厌恶",
"score":"0",
"highLight": "false"
},
"fear": {
"name": "fear",
"cnName":"恐惧",
"score":"0",
"highLight": "false"
},
"scorn": {
"name": "scorn",
"cnName":"蔑视",
"score":"0",
"highLight": "false"
}
}
class StatisticHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.end_headers()
def do_STATISTIC(self):
self.do_HEAD()
self.wfile.write(json.dumps({"result": face_scores}))
def do_GET(self):
self.do_STATISTIC()
class RequestHandler(BaseHTTPRequestHandler):
scale = 6
def create_opencv_image_from_stringio(self, img_buf, cv2_img_flag=cv2.IMREAD_COLOR):
img_array = np.asarray(bytearray(img_buf), dtype=np.uint8)
return cv2.imdecode(img_array, cv2_img_flag)
def create_pil_image_from_stringio(self, img_buf):
f = StringIO.StringIO(img_buf)
image = Image.open(f)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
return image
def do_GET(self):
self.do_STREAM()
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.end_headers()
def do_STATISTIC(self):
self.do_HEAD()
self.wfile.write(json.dumps({"result": face_scores}))
def do_STREAM(self):
global bytes
global pre_result
# Get client info
client = self.client_address
# Get the port the client connected to
port = self.server.server_port
# Get the image files corresponding to this port
imageDir = SERVERS[port]["images"]
imageFiles = os.listdir(imageDir)
imageFiles.sort()
# Get the min intra frame delay
maxFPS = SERVERS[port]["maxfps"]
if maxFPS == 0:
minDelay = 0
else:
minDelay = 1.0 / maxFPS
#logging.info("Serving client %s:%s from port %s at %s fps", client[0], client[1], port, maxFPS)
# Send headers
self.send_response(200)
self.send_header("Cache-Control", "no-cache")
self.send_header("Pragma", "no-cache")
self.send_header("Connection", "close")
self.send_header("Content-Type", "multipart/x-mixed-replace; boundary=--myboundary")
self.end_headers()
o = self.wfile
#从camara中获取帧开始
frame_pos = 0
results = None
while True:
bytes += stream.read(1024)
a = bytes.find("\r\n\r\n")
b = bytes.find("\r\n--boundarydonotcross")
if a != -1 and b != -1:
jpg = bytes[a+4:b]
bytes = bytes[b+22:]
#用cv2产生图片字符串
cv_img = self.create_opencv_image_from_stringio(jpg)
height, width, channels = cv_img.shape
cv_res = cv2.resize(cv_img, (width/self.scale, height/self.scale), interpolation=cv2.INTER_CUBIC)
cv_img_str = cv2.imencode('.jpg', cv_res)[1].tostring()
#用pil
pil_img = self.create_pil_image_from_stringio(jpg)
files = {'imagefile': cv_img_str}
if frame_pos % 2 == 0:
results = None
try:
start = int(round(time.time() * 1000))
response = requests.post(detect_api, files=files)
end = int(round(time.time() * 1000))
print "response_result %s" % response.text
print end - start
#返回的results不能含有中文
results = json.loads(response.text)
except Exception as exc:
pass
frame_pos += 1
# height, width, channels = img.shape
#img = cv2.flip(img, 1)
if results:
for result in results:
#xmin = width - int(result['xmin'])*self.scale
#xmax = width - int(result['xmax'])*self.scale
xmin = int(result['xmin'])*self.scale
ymin = int(result['ymin'])*self.scale
xmax = int(result['xmax'])*self.scale
ymax = int(result['ymax'])*self.scale
label_first = ""
label_second = ""
confidence_first = 0
confidence_second = 0
#循环赋值
if result['face_scores']:
keys = face_scores.keys()
for key in keys:
face_scores[key]['score'] = result['face_scores'][key]['score']
face_scores[key]['highLight'] = result['face_scores'][key]['highLight']
'''
if face_scores[key]['highLight'] == "true":
label = face_scores[key]['cnName'] + "(" + face_scores[key]['name'] + ")"
confidence = float(face_scores[key]['score'])
'''
sortedObj = result['face_scores']['sorted']
sortArray = sortedObj['sortArray']
if sortedObj['highLightNum'] == 1:
tag_first = sortArray[0]['tag']
label_first = face_scores[tag_first]['cnName'] + "(" + face_scores[tag_first]['name'] + ")"
confidence_first = float(face_scores[tag_first]['score'])
else:
#first
tag_first = sortArray[0]['tag']
label_first = face_scores[tag_first]['cnName'] + "(" + face_scores[tag_first]['name'] + ")"
confidence_first = float(face_scores[tag_first]['score'])
#second
tag_second = sortArray[1]['tag']
label_second = face_scores[tag_second]['cnName'] + "(" + face_scores[tag_second]['name'] + ")"
confidence_second = float(face_scores[tag_second]['score'])
#加载文字图片
draw = ImageDraw.Draw(pil_img)
draw.line([xmin, ymin, xmin, ymax], fill = face_border_color, width=5)#left
draw.line([xmin, ymin, xmax, ymin], fill = face_border_color, width=5)#top
draw.line([xmax, ymin, xmax, ymax], fill = face_border_color, width=5)#right
draw.line([xmin, ymax, xmax, ymax], fill = face_border_color, width=5)#bottom
if confidence_first > 0 and confidence_second > 0:
#画分数第一的框
draw.rectangle([(xmin-2, ymin-100), (xmax+2, ymin)], fill = face_border_color)
outtext_first = label_first + " %.2f" % confidence_first
draw.text((xmin+8, ymin-90), outtext_first.decode("utf8"), fill = score_text_color, font = base_font)
#画分数第二的框
draw.rectangle([(xmin-2, ymin-50), (xmax+2, ymin)], fill = face_border_color)
outtext_second = label_second + " %.2f" % confidence_second
draw.text((xmin+8, ymin-40), outtext_second.decode("utf8"), fill = score_text_color, font = base_font)
else:
draw.rectangle([(xmin-2, ymin-50), (xmax+2, ymin)], fill = face_border_color)
outtext_first = label_first + " %.2f" % confidence_first
draw.text((xmin+8, ymin-40), outtext_first.decode("utf8"), fill = score_text_color, font = base_font)
output = StringIO.StringIO()
pil_img.save(output, "JPEG")
pil_img_str = output.getvalue()
try:
o.write("--myboundary\r\n")
o.write("Content-Type: image/jpeg\r\n")
o.write("Content-Length: %s\r\n" % len(pil_img_str))
o.write("\r\n")
o.write(pil_img_str)
o.write("\r\n")
except Exception as exc:
logging.info("Done serving client %s:%s from port %s", client[0], client[1], port)
return
#从camara中获取帧结束
class VideoRequestHandler(BaseHTTPRequestHandler):
scale = 6
def create_opencv_image_from_stringio(self, img_buf, cv2_img_flag=cv2.IMREAD_COLOR):
img_array = np.asarray(bytearray(img_buf), dtype=np.uint8)
return cv2.imdecode(img_array, cv2_img_flag)
def do_GET(self):
self.do_STREAM()
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.end_headers()
def do_STATISTIC(self):
self.do_HEAD()
self.wfile.write(json.dumps({"result": face_scores}))
def do_STREAM(self):
global bytes
global pre_result
# Send headers
self.send_response(200)
self.send_header("Cache-Control", "no-cache")
self.send_header("Pragma", "no-cache")
self.send_header("Connection", "close")
self.send_header("Content-Type", "multipart/x-mixed-replace; boundary=--myboundary")
self.end_headers()
o = self.wfile
#从视频中获取帧开始
video_path = '/assert/test.mp4'
cap = cv2.VideoCapture(video_path)
while not cap.isOpened():
cap = cv2.VideoCapture(video_path)
cv2.waitKey(1000)
success = True
frame_pos = 0
results = None
while(success):
success, cv_img = cap.read()
height, width, channels = cv_img.shape
cv_res = cv2.resize(cv_img, (width/self.scale, height/self.scale), interpolation=cv2.INTER_CUBIC)
cv_img_str = cv2.imencode('.jpg', cv_res)[1].tostring()
files = {'imagefile': cv_img_str}
if frame_pos % 2 == 0:
results = None
try:
start = int(round(time.time() * 1000))
response = requests.post(detect_api, files=files)
end = int(round(time.time() * 1000))
print "response_result %s" % response.text
print end - start
results = json.loads(response.text)
except Exception as exc:
pass
frame_pos += 1
#转换RGB通道
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
#opencv img to pil image
pil_img = Image.fromarray(cv_img)
if pil_img.mode not in ('L', 'RGB'):
pil_img = pil_img.convert('RGB')
if results:
for result in results:
xmin = int(result['xmin'])*self.scale
ymin = int(result['ymin'])*self.scale
xmax = int(result['xmax'])*self.scale
ymax = int(result['ymax'])*self.scale
label_first = ""
label_second = ""
confidence_first = 0
confidence_second = 0
if result['face_scores']:
keys = face_scores.keys()
for key in keys:
face_scores[key]['score'] = result['face_scores'][key]['score']
face_scores[key]['highLight'] = result['face_scores'][key]['highLight']
'''
if face_scores[key]['highLight'] == "true":
label = face_scores[key]['cnName'] + "(" + face_scores[key]['name'] + ")"
confidence = float(face_scores[key]['score'])
'''
sortedObj = result['face_scores']['sorted']
sortArray = sortedObj['sortArray']
if sortedObj['highLightNum'] == 1:
tag_first = sortArray[0]['tag']
label_first = face_scores[tag_first]['cnName'] + "(" + face_scores[tag_first]['name'] + ")"
confidence_first = float(face_scores[tag_first]['score'])
else:
#first
tag_first = sortArray[0]['tag']
label_first = face_scores[tag_first]['cnName'] + "(" + face_scores[tag_first]['name'] + ")"
confidence_first = float(face_scores[tag_first]['score'])
#second
tag_second = sortArray[1]['tag']
label_second = face_scores[tag_second]['cnName'] + "(" + face_scores[tag_second]['name'] + ")"
confidence_second = float(face_scores[tag_second]['score'])
#加载文字图片
draw = ImageDraw.Draw(pil_img)
draw.line([xmin, ymin, xmin, ymax], fill = face_border_color, width=5)#left
draw.line([xmin, ymin, xmax, ymin], fill = face_border_color, width=5)#top
draw.line([xmax, ymin, xmax, ymax], fill = face_border_color, width=5)#right
draw.line([xmin, ymax, xmax, ymax], fill = face_border_color, width=5)#bottom
if confidence_first > 0 and confidence_second > 0:
#画分数第一的框
draw.rectangle([(xmin-2, ymin-100), (xmax+2, ymin)], fill = face_border_color)
outtext_first = label_first + " %.2f" % confidence_first
draw.text((xmin+8, ymin-90), outtext_first.decode("utf8"), fill = score_text_color, font = base_font)
#画分数第二的框
draw.rectangle([(xmin-2, ymin-50), (xmax+2, ymin)], fill = face_border_color)
outtext_second = label_second + " %.2f" % confidence_second
draw.text((xmin+8, ymin-40), outtext_second.decode("utf8"), fill = score_text_color, font = base_font)
else:
draw.rectangle([(xmin-2, ymin-50), (xmax+2, ymin)], fill = face_border_color)
outtext_first = label_first + " %.2f" % confidence_first
draw.text((xmin+8, ymin-40), outtext_first.decode("utf8"), fill = score_text_color, font = base_font)
output = StringIO.StringIO()
pil_img.save(output, "JPEG")
pil_img_str = output.getvalue()
try:
o.write("--myboundary\r\n")
o.write("Content-Type: image/jpeg\r\n")
o.write("Content-Length: %s\r\n" % len(pil_img_str))
o.write("\r\n")
o.write(pil_img_str)
o.write("\r\n")
except Exception as exc:
logging.info("Done serving client %s:%s from port %s", client[0], client[1], port)
return
cap.release()
#从视频中获取帧结束
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
pass
def startServer(port):
def target(port):
#print 'server start at port %s' % port
server = ThreadingHTTPServer(("0.0.0.0", port), RequestHandler)
server.serve_forever()
def statistics(port):
server = ThreadingHTTPServer(("0.0.0.0", port+1), StatisticHandler)
server.serve_forever()
def videoThread(port):
server = ThreadingHTTPServer(("0.0.0.0", port+2), VideoRequestHandler)
server.serve_forever()
t = Thread(target=target, args=[port])
t.start()
s = Thread(target=statistics, args=[port])
s.start()
vt = Thread(target=videoThread, args=[port])
vt.start()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf8')
parser = argparse.ArgumentParser(description='mjpeg server')
parser.add_argument('--port', dest='port', default=8080, type=int, help='server port (default 8080)')
parser.add_argument('--src', dest='video_src', default="http://mjpg-server:8090/?action=stream",
type=str, help='source video stream')
parser.add_argument('--detect_api', dest='detect_api', default=os.environ.get("DETECT_API"),
type=str, help='detect api server url')
args = parser.parse_args()
port = args.port
video_src = args.video_src
stream = urllib.urlopen(video_src)
detect_api = args.detect_api
SERVERS[port] = {"images": "./", "maxfps": 10}
startServer(port)
|
from loadImage import loadFromFile, return2dImageMatrix
import numpy as np
import random as rd
def randomInitialize(X, K):
"""randomly initialize few datapoints as points for
centroids for applying the kmeans algorithm"""
a,b = X.shape
randIndexes = np.random.randint(a, size=K)
return X[randIndexes,:]
def findClosestCentroid(X, centroids):
"""find the closest centroids and and return the
centroid index number in an array"""
K,n = centroids.shape
# Now n has the no of features and K has the no of clusters.
closestCentroidNo = np.zeros(len(X))
noOfDataPoints = len(X)
for i in range(noOfDataPoints):
dataPoint = X[i];
queryMat = np.tile(dataPoint, (K,1))
squared_distance = np.square(centroids - queryMat).sum(1)
min_squared_distance = min(squared_distance)
indexOfMinElement = np.where(squared_distance == min_squared_distance)[0][0]
closestCentroidNo[i] = indexOfMinElement
# this gets kind of depenent issue
return closestCentroidNo
def computeCentroids(X, closestCentroidNo, K):
"""Function computing the centroid locations again
to shift the position of the new centroid"""
m,n = X.shape
centroids = np.zeros((K,n))
for i in range(K):
indices = (closestCentroidNo == i)
for j in range(n):
if(indices.mean()!=0):
centroids[i,j] = sum(X[:,j]*indices)/((indices.mean())*len(indices))
return centroids
ImgName = '12.jpg'
Imgarray = loadFromFile(ImgName)
a,b,c = Imgarray.shape
TwoDarray = return2dImageMatrix(Imgarray).transpose()
# I know this is shit code.
K = int(raw_input("Enter the no of clusters: "))
A = randomInitialize(TwoDarray,K)
max_iter = 10
for iter in range(max_iter):
print "Started with iteration " + str(iter)
closestCentroidNo = findClosestCentroid(TwoDarray,A)
centroids = computeCentroids(TwoDarray, closestCentroidNo, K)
print "Done with iteration " + str(iter)
for iter in range(len(TwoDarray)):
TwoDarray[iter] = centroids[closestCentroidNo[iter]]
print "The centroids are"
for centroid in centroids:
print centroid
TwoDarray = np.fliplr(TwoDarray)
ThreeD = TwoDarray.reshape(a,b,c)
import Image
Im = Image.fromarray(ThreeD)
Im.save("Kclusters-{}".format(ImgName))
|
import torch
import torch.nn as nn
import torchvision.models as models
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from dataset import TextDataset
from utils import get_val_augmentations, get_train_augmentations, preprocess_data
def main():
BATCH_SIZE = 4
NUM_WORKERS = 8
IMAGE_SIZE = 1024
N_EPOCHS = 50
device = torch.device("cuda:0")
#device_ids = [0, 1]
albumentations_transform = get_train_augmentations(IMAGE_SIZE)
albumentations_transform_validate = get_val_augmentations(IMAGE_SIZE)
train_df, val_df, train_labels, val_labels = preprocess_data('/home/datalab/input/noisy_imagewoof.csv')
train_data = TextDataset(dataframe=train_df,
labels=train_labels,
path='/home/datalab/input',
transform=albumentations_transform)
train_loader = DataLoader(dataset=train_data,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=True,
drop_last=False)
validate_data = TextDataset(dataframe=val_df,
labels=val_labels,
path='/home/datalab/input',
transform=albumentations_transform_validate)
validate_loader = DataLoader(dataset=validate_data,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=False,
drop_last=False)
model = models.resnext50_32x4d(pretrained=True)
model.fc = nn.Linear(2048, 2)
model.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=0.00001)
criterion = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer=optimizer, T_0=200)
best_acc_val = 0
train_len = len(train_loader)
for epoch in range(N_EPOCHS):
model.train()
train_loss = 0
train_acc = 0
for i, (imgs, labels) in tqdm(enumerate(train_loader), total=train_len):
imgs = imgs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
output = model(imgs)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
pred = torch.argmax(torch.softmax(output, 1), 1).cpu().detach().numpy()
true = labels.cpu().numpy()
train_acc += accuracy_score(true, pred)
scheduler.step(epoch + i / train_len)
model.eval()
val_loss = 0
acc_val = 0
val_len = len(validate_loader)
for i, (imgs, labels) in tqdm(enumerate(validate_loader), total=val_len):
with torch.no_grad():
imgs_vaild, labels_vaild = imgs.to(device), labels.to(device)
output_test = model(imgs_vaild)
val_loss += criterion(output_test, labels_vaild).item()
pred = torch.argmax(torch.softmax(output_test, 1), 1).cpu().detach().numpy()
true = labels.cpu().numpy()
acc_val += accuracy_score(true, pred)
avg_val_acc = acc_val / val_len
print(
f'Epoch {epoch}/{N_EPOCHS} train_loss {train_loss / train_len} train_acc {train_acc / train_len} val_loss {val_loss / val_len} val_acc {avg_val_acc}')
if avg_val_acc > best_acc_val:
best_acc_val = avg_val_acc
torch.save(model.state_dict(), f'/home/datalab/input/model_saved/weight_best.pth')
if __name__ == '__main__':
main()
|
# Run this script after doing the pyinstaller build
import os
import shutil
import subprocess
from musclex import __version__
os.sys.path.append(os.path.abspath(os.path.join('..', '..')))
deb_path = os.path.join('.', 'musclex-{}_amd64(linux)'.format(__version__),
'DEBIAN')
exc_path = os.path.join('.', 'musclex-{}_amd64(linux)'.format(__version__),
'usr', 'bin')
app_path = os.path.join('.', 'musclex-{}_amd64(linux)'.format(__version__),
'usr', 'share', 'applications')
png_path = os.path.join('.', 'musclex-{}_amd64(linux)'.format(__version__),
'usr', 'share', 'icons')
os.makedirs(deb_path, exist_ok=True)
os.makedirs(exc_path, exist_ok=True)
os.makedirs(app_path, exist_ok=True)
os.makedirs(png_path, exist_ok=True)
shutil.copy('control', deb_path)
shutil.copy(os.path.join('..', '..', 'dist', 'musclex'), exc_path)
shutil.copy('musclex.desktop', app_path)
shutil.copy('AppIcon.icns', os.path.join(png_path, 'AppIconMusclex.icns'))
with open(os.path.join(deb_path, 'control'), 'r') as f:
control_lines = f.readlines()
for i in range(len(control_lines)):
if control_lines[i].startswith('Version'):
control_lines[i] = 'Version: {}\n'.format(__version__)
with open(os.path.join(deb_path, 'control'), 'w') as f:
f.writelines(control_lines)
proc = subprocess.Popen("fakeroot dpkg-deb --build musclex-{}_amd64\(linux\)".format(__version__),
shell=True)
proc.communicate()
print('Checking .deb installer with lintian')
proc = subprocess.Popen("lintian musclex-{}_amd64\(linux\).deb".format(__version__), shell=True)
proc.communicate()
|
import midi_output
import time
class MidiEventSender:
def __init__(self, midi_receiver):
self.midi_receiver = midi_receiver
def set_program_event(self, program_value):
print("Setting program to ", program_value)
event = [[[192, program_value, 0, 0], 0]]
self.midi_receiver.send_control_event(event)
def set_pedal_off(self):
event = [[[176, 64, 0, 0], 0]]
self.midi_receiver.send_control_event(event)
|
from distutils.core import setup
setup(name='graphsurgeon',
version='0.4.1',
description='graphsurgeon',
author='Nvidia',
packages=['graphsurgeon'],
)
|
import csv
import cv2
import numpy as np
import sklearn
import sklearn.model_selection
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Convolution2D, MaxPooling2D, Cropping2D, Dropout, SpatialDropout2D
from random import shuffle
def main():
lines = []
datafiles = ['data/driving_log.csv']
for file in datafiles:
with open(file) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
model = Nvidia(input_shape=(160, 320, 3))
model.compile(loss='mse', optimizer='adam')
X_train, y_train = get_all_images(lines)
model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=5)
# train_samples, validation_samples = sklearn.model_selection.train_test_split(lines, test_size=0.2)
#
# train_generator = image_generator(train_samples)
# validation_generator = image_generator(validation_samples)
# model.fit_generator(train_generator, steps_per_epoch=len(train_samples), validation_data=validation_generator,
# validation_steps=len(validation_samples))
model.save('model.h5')
def get_all_images(samples):
images = []
steering = []
for line in samples:
source_path_center = line[0]
image = cv2.imread(source_path_center)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
images.append(image)
steering_angle = float(line[3])
steering.append(steering_angle)
# augmentation by image flipping
images.append(np.fliplr(image))
steering.append(-steering_angle)
X_train = np.array(images)
y_train = np.array(steering)
return (X_train, y_train)
def image_generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
images = []
steering = []
for batch_sample in batch_samples:
name = batch_sample[0]
image = cv2.imread(name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
angle = float(batch_sample[3])
images.append(image)
steering.append(angle)
# augment by flipping horizontally
image = np.fliplr(image)
images.append(image)
steering.append(-angle)
X_train = np.array(images)
y_train = np.array(steering)
yield sklearn.utils.shuffle(X_train, y_train)
def LeNet(input_shape):
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Convolution2D(6, 5, 5, activation='relu'))
# model.add(SpatialDropout2D(rate=0.3))
model.add(MaxPooling2D())
model.add(Convolution2D(6, 5, 5, activation='relu'))
model.add(Flatten())
model.add(Dropout(rate=0.3))
model.add(Dense(120))
model.add(Dense(84))
model.add(Dense(1))
return model
def Nvidia(input_shape):
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape))
model.add(Cropping2D(cropping=((70, 25), (0, 0))))
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Flatten())
model.add(Dropout(rate=0.5))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))
return model
if __name__ == "__main__":
main()
|
s = raw_input().strip().lower()
if ('a' in s and
'b' in s and
'c' in s and
'd' in s and
'e' in s and
'f' in s and
'g' in s and
'h' in s and
'i' in s and
'j' in s and
'k' in s and
'l' in s and
'm' in s and
'n' in s and
'o' in s and
'p' in s and
'q' in s and
'r' in s and
's' in s and
't' in s and
'u' in s and
'v' in s and
'w' in s and
'x' in s and
'y' in s and
'z' in s):
print "pangram"
else:
print "not pangram"
|
#!/usr/local/bin/python
import argparse
import distutils.spawn
import shutil
import subprocess
import os
DEV_APPSERVER_PATH = distutils.spawn.find_executable('dev_appserver.py')
GCLOUD_PATH = distutils.spawn.find_executable('gcloud')
APP_PATH = os.path.join(os.path.dirname(__file__), 'app')
LIB_PATH = os.path.join(APP_PATH, 'lib')
def main():
parser = argparse.ArgumentParser(description='Deploys to App Engine.')
parser.add_argument('--local',
action='store_true',
help='Deploys using the local development server.')
args = parser.parse_args()
# First, we clean out our existing dependencies and retrieve them using pip.
if os.path.isdir(LIB_PATH):
shutil.rmtree(LIB_PATH)
p = subprocess.Popen([
'pip',
'install',
'-r',
'requirements.txt',
'-t',
'lib'
], cwd=APP_PATH)
p.communicate()
# Then, we deploy according to the --local flag.
if args.local:
os.execvp(DEV_APPSERVER_PATH, [
DEV_APPSERVER_PATH,
os.path.join(APP_PATH, 'app.yaml')])
os.execvp(GCLOUD_PATH, [
GCLOUD_PATH,
'app',
'deploy',
os.path.join(APP_PATH, 'app.yaml')
])
if __name__ == '__main__':
main()
|
class cardpackage():
@property
def name(self):
return self._name
def set_name(self,name):
self._name = name
return self
@property
def cost(self):
return self._cost
def set_cost(self,cost):
self._cost = cost
return self
@property
def subtype(self):
return self._subtype
def set_subtype(self,subtype):
self._subtype = subtype
return self
@property
def text(self):
return self._text
def set_text(self,text):
self._text = text
return self
@property
def col_num(self):
return self._col_num
def set_col_num(self,col_num):
self._col_num = col_num
return self
@property
def rarity(self):
return self._rarity
def set_rarity(self,rarity):
self._rarity = rarity
return self
@property
def illustrator(self):
return self._illustrator
def set_illustrator(self,illustrator):
self._illustrator = illustrator
return self
@property
def power(self):
return self._power
def set_power(self,power):
self._power = power
return self
@property
def toughness(self):
return self._toughness
def set_toughness(self,toughness):
self._toughness = toughness
return self
@property
def loyalty(self):
return self._loyalty
def set_loyalty(self,loyalty):
self._loyalty = loyalty
return self
@property
def basic(self):
return self._basic
def set_basic(self,basic):
self._basic = basic
return self
class MTGCard:
def calc_color(self,cost):
total = []
lists = {3:"white",5:"blue",7:"black",11:"red",13:"green"}
for k,v in lists.items():
if cost % k == 0:
total.append(v)
if len(total) == 0:
total.append("colorless")
return total
def calc_cmc(self,cost):
cmc = 0
lists = [2,3,5,7,11,13,17]
cost = int(cost)
if cost == 23 or cost == 0:
return cmc
while cost % 19 == 0:
cost = cost / 19
while cost != 1:
for i in lists:
if cost % i == 0:
cmc += 1
cost = cost / i
return cmc
def __init__(self,args):
self._name = args.name
self._cost = args.cost
self._color = self.calc_color(args.cost)
self._cmc = self.calc_cmc(args.cost)
self._imageURI = ""
self._cardtype = ""
self._subtype = args.subtype
self._text = args.text
self._col_num = args.col_num
self._rarity = args.rarity
self._illustrator = args.illustrator
self._version = "BFZ"
self._lang = "jpn"
@property
def name(self):
return self._name
@property
def color(self):
return self._color
@property
def subtype(self):
return self._subtype
@property
def text(self):
return self._text
@property
def col_num(self):
return self._col_num
@property
def cost(self):
return self._cost
@property
def rarity(self):
return self._rarity
@property
def cardtype(self):
return self._cardtype
@property
def cmc(self):
return self._cmc
@property
def imageURI(self):
return self._imageURI
@property
def illustrator(self):
return self._illustrator
@property
def version(self):
return self._version
@property
def lang(self):
return self._lang
@property
def to_dir(self):
tmp = {
'name':self._name,
'color':self._color,
'subtype':self._subtype,
'text':self._text,
'col_num':self._col_num,
'cost':self._cost,
'rarity':self._rarity,
'cardtype':self._cardtype,
'cmc':self.cmc,
'illustrator':self._illustrator,
'version':self._version,
'lang':self._lang,
'imageURI':self._imageURI
}
return tmp
class lands(MTGCard):
def __init__(self,args):
super().__init__(args)
self._basic = args.basic
self._cardtype = "land"
@property
def basic(self):
return self._basic
def to_dict(self):
tmp = super().to_dir()
tmp['basic'] = self._basic
return tmp
class creatures(MTGCard):
def __init__(self,args):
super().__init__(args)
self._power = args.power
self._toughness = args.toughness
self._cardtype = "creature"
@property
def power(self):
return self._power
@property
def toughness(self):
return self._toughness
def to_dict(self):
tmp = super().to_dir()
tmp['power'] = self._power
tmp['toughness'] = self._toughness
return tmp
class artifacts(MTGCard):
def __init__(self,args):
super().__init__(args)
self._cardtype = "artifact"
class enchantments(MTGCard):
def __init__(self,args):
super().__init__(args)
self._cardtype = "enchant"
class artifactcreatures(creatures):
def __init__(self,args):
super().__init__(args)
self._cardtype = "artifact creature"
class instants(MTGCard):
def __init__(self,args):
super().__init__(args)
self._cardtype = "instant"
class sorceries(MTGCard):
def __init__(self,args):
super().__init__(args)
self._cardtype = "sorcery"
class planeswalkers(MTGCard):
def __init__(self,args):
super().__init__(args)
self._loyalty = args.loyalty
self._cardtype = "planeswalker"
@property
def loyalty(self):
return self._loyalty
def to_dict(self):
tmp = super().to_dir()
tmp['loyalty'] = self._loyalty
return tmp
|
'''
Random forests help to mitigate overfitting in decision trees.
Training data is spread across decision trees. The subsets are created by taking random samples with replacement.
This means that a given data point can be used in several subsets. (This is different from the subsets used in cross validation where each data point belongs to one subset).
Individual trees are trained with different subsets of features. So in our current problem,
one tree might be trained using eccentricity and another using concentration and the 4th adaptive moment.
By using different combinations of input features you create expert trees that are can better identify classes by a given feature.
The sklearn random forest only uses the first form of sampling.
'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_predict
from sklearn.ensemble import RandomForestClassifier
from support_functions import generate_features_targets, plot_confusion_matrix, calculate_accuracy
# complete this function to get predictions from a random forest classifier
def rf_predict_actual(data, n_estimators):
# generate the features and targets
features, targets = generate_features_targets(data)
# instantiate a random forest classifier
rfc = RandomForestClassifier(n_estimators=n_estimators)
# get predictions using 10-fold cross validation with cross_val_predict
predicted = cross_val_predict(rfc, features, targets, cv=10)
# return the predictions and their actual classes
return predicted, targets
if __name__ == "__main__":
data = np.load('galaxy_catalogue.npy')
# get the predicted and actual classes
number_estimators = 50 # Number of trees
predicted, actual = rf_predict_actual(data, number_estimators)
# calculate the model score using your function
accuracy = calculate_accuracy(predicted, actual)
print("Accuracy score:", accuracy)
# calculate the models confusion matrix using sklearns confusion_matrix function
class_labels = list(set(actual))
model_cm = confusion_matrix(y_true=actual, y_pred=predicted, labels=class_labels)
# plot the confusion matrix using the provided functions.
plt.figure()
plot_confusion_matrix(model_cm, classes=class_labels, normalize=False)
plt.show() |
from panda3d.bsp import BSPMaterial
from panda3d.core import LVector2i, PNMImage, VirtualFileSystem, getModelPath, Filename
from PyQt5 import QtGui, QtCore
# Reference to a material loaded from disk that can be applied to brush faces.
# Materials with the same filename are unified to the same MaterialReference object.
# Stores the $basetexture Texture and the dimensions of it.
class MaterialReference:
def __init__(self, filename):
vfs = VirtualFileSystem.getGlobalPtr()
self.material = BSPMaterial.getFromFile(filename)
self.filename = filename
if self.material.hasKeyvalue("$basetexture"):
baseTexturePath = Filename(self.material.getKeyvalue("$basetexture"))
if vfs.resolveFilename(baseTexturePath, getModelPath().getValue()):
imageData = bytes(VirtualFileSystem.getGlobalPtr().readFile(baseTexturePath, True))
byteArray = QtCore.QByteArray.fromRawData(imageData)
image = QtGui.QImage.fromData(byteArray)
self.pixmap = QtGui.QPixmap.fromImage(image)
self.icon = QtGui.QIcon(self.pixmap)
self.size = LVector2i(image.width(), image.height())
else:
self.texture = None
self.size = LVector2i(64, 64)
self.icon = None
self.pixmap = None
else:
self.texture = None
self.size = LVector2i(64, 64)
self.icon = None
self.pixmap = None
|
# coding=utf-8
from mongo_zaojv import MongoUrlManager
from crawler_zaojv import Crawler_zaojv
import re
import time
mongo_mgr = MongoUrlManager()
# 设置爬虫起始页
root_url = "http://www.miaoqiyuan.cn/zaojv/word-a.html"
# 向数据库中添加一级网页
first_url = "http://zaojv.com/word_{}.html"
"""
for page in range(1,1046):
url_tmp = first_url.format(page)
mongo_mgr.enqueueUrl(url_tmp, 1)
time.sleep(0.1)
"""
while True:
record = mongo_mgr.dequeueUrl()
if record == None:
print("数据库为空, 程序退出")
break
url = record['url']
depth = record['depth']
reset_num = record['reset_num']
crawler = Crawler_zaojv(url, depth)
if (crawler.html == None) or (crawler.html == ""):
if reset_num < 3:
mongo_mgr.resetUrl(url)
else:
mongo_mgr.finishUrl(url, error_flag=True)
time.sleep(1)
continue
retLen = crawler.parseContent()
if retLen == 0:
if reset_num < 3:
mongo_mgr.resetUrl(url)
else:
mongo_mgr.finishUrl(url, error_flag=True)
time.sleep(1)
continue
mongo_mgr.finishUrl(url)
if depth == 1:
for url_tmp in crawler.words_url:
mongo_mgr.enqueueUrl(url_tmp, depth+1)
time.sleep(0.5)
|
import threading
import queue
class MyThread(threading.Thread):
def __init__(self, name, que):
threading.Thread.__init__(self)
self.name = name
self.que = que
def run(self):
print(f"Starting Thread {self.name}")
work(self.name, self.que)
print(f"Finishing Thread {self.name}")
def work(name, que):
while True:
try:
w = que.get(block = False)
except:
return
else:
worker(name, w)
def worker(name, w):
print(f"[{name}]\tItem: {w}")
if __name__ == '__main__':
que = queue.Queue()
for item in [x for x in range(1, 101)]:
que.put(item)
thread1 = MyThread('A', que)
thread2 = MyThread('B', que)
thread3 = MyThread('C', que)
thread4 = MyThread('D', que)
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread1.join()
thread2.join()
thread3.join()
thread4.join()
|
# File : i2c_fusion.py
# What : i2c sensor fusion (Tilt-compensated heading)
import math
import json
from i2c_base import i2c_sensor
from i2c_compass import compass
from i2c_accel import accel
import threading
class _data(object):
"""Contariner for ULMO-relevant IMU data."""
def __init__(self):
self.acc_x = 0.0
self.acc_y = 0.0
self.acc_z = 0.0
self.mag_x = 0.0
self.mag_y = 0.0
self.mag_z = 0.0
self.mag_heading = 0
self.acc_roll = 0.0
self.acc_pitch = 0.0
self.heading = 0.0
self.heading_comp = 0.0
def __str__(self):
return("rAx:{:.3f} Ay:{:.3f} Az:{:.3f} | Mx:{:.3f} My:{:.3f} Mz:{:.3f} | Ap:{:.3f} Ar:{:.3f}| h:{:.2f} ch:{:.2f} \r".format(self.data.acc_x,self.data.acc_y,self.data.acc_z,
self.data.mag_x,self.data.mag_y,self.data.mag_z,
self.data.acc_pitch,self.data.acc_roll,
self.data.mag_heading,self.data.heading_comp()))
def to_json(self):
return(json.dumps(self.__dict__))
def to_dict(self):
return(self.__dict__.copy())
class fusion(i2c_sensor):
""" Process input from all I2C sensors and produce tilt-compensated heading."""
_I2C_ADDR = None # not used
_I2C_BUS = None # no smbus instance required
def __init__(self,T_ms=0):
# instance variables
self.compass = compass()
self.accel = accel()
self.data = _data()
self.data_lock = threading.Lock()
# IMU peripherals status --detailed--
self._status_periph = { 'acc':self.__class__.ST_OK,
'mag':self.__class__.ST_OK}
# collect IMU status after init
self._status_periph['acc'] = self.accel.status
self._status_periph['mag'] = self.compass.status
# superclass initialization
super(fusion,self).__init__(fusion._I2C_ADDR,fusion._I2C_BUS,T_ms=T_ms)
# calibration mode enabling
#self.compass.set_calibmode(True)
self.compass.set_calibmode(False)
def i2c_init(self):
# nothing todo here
pass
def i2c_read(self):
# concurrent access
self.data_lock.acquire()
# reset IMU 'sample processing' status before querying peripherals feedback
self._clear_st(self.__class__.ST_ERR_SAMPLE_PROCESSING)
# synchronous sensors read
self.compass.i2c_read()
self.accel.i2c_read()
# store accelerometer readings and status
self._status_periph['acc'] = self.accel.status
if(self._status_periph['acc'] == self.__class__.ST_OK):
self.data.acc_x = self.accel.get_x()
self.data.acc_y = self.accel.get_y()
self.data.acc_z = self.accel.get_z()
self.data.acc_roll = self.accel.get_roll(degrees=False)
self.data.acc_pitch = self.accel.get_pitch(degrees=False)
else:self._set_st(self.__class__.ST_ERR_SAMPLE_PROCESSING)
# store magnetometer readings and status
self._status_periph['mag'] = self.compass.status
if(self._status_periph['mag'] == self.__class__.ST_OK):
self.data.mag_x = self.compass.get_x()
self.data.mag_y = self.compass.get_y()
self.data.mag_z = self.compass.get_z()
self.data.mag_heading = self.compass.get_heading()
else:self._set_st(self.__class__.ST_ERR_SAMPLE_PROCESSING)
# store compensated heading
self.data.heading_comp = self._heading_comp()
self.data_lock.release()
def _heading_comp(self):
if(not self._check_st(self.__class__.ST_ERR_SAMPLE_PROCESSING)):
heading = 0.0
try:
# calculate compensated heading out of ACCEL/MAG readings
cos_roll = math.cos(1.0*self.data.acc_roll)
sin_roll = math.sin(1.0*self.data.acc_roll)
cos_pitch = math.cos(1.0*self.data.acc_pitch)
sin_pitch = math.sin(1.0*self.data.acc_pitch)
mx2 = self.data.mag_x*cos_pitch + self.data.mag_z*sin_pitch
my2 = self.data.mag_x*sin_roll*sin_pitch+self.data.mag_y*cos_roll-self.data.mag_z*sin_roll*cos_pitch
heading = math.degrees(math.atan2(my2,mx2))
if (heading < 0):heading += 360
except Exception as e:
self._set_st(self.__class__.ST_ERR_CALCULUS)
else:
# update heading value
self.data.heading = heading
# clear calculus error if set
self._clear_st(self.__class__.ST_ERR_CALCULUS)
return(self.data.heading)
def __str__(self):
return(str(self.data))
def to_json(self):
"""return JSON string with sensor data."""
self.data_lock.acquire()
json_data = self.data.to_dict()
self.data_lock.release()
# add status information and dump json-string
json_data['st'] = {'imu':self.status,'acc':self._status_periph['acc'],'mag':self._status_periph['mag']}
return(json.dumps(json_data))
|
diff = ('Jones', 'Sally') > ('Adams', 'Sam')
print(diff)
d = {'a': 10, 'b': 1, 'c': 22}
print(d.items())
print(type(d.items()))
|
s, t, f = map(int, input().split(' '))
if s == 0:
s = 24
h = s + t + f
if h >= 24:
h -= 24
print(h) |
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import enum
db = SQLAlchemy()
def configure(app):
db.init_app(app)
app.db = db
class Usuario(db.Model):
id = db.Column(db.Integer, primary_key = True)
nome_completo = db.Column(db.String(255))
cpf = db.Column(db.String(14))
email = db.Column(db.String(255))
data_cadastro = db.Column(db.DateTime, default=datetime.now)
class Batidas_Ponto(db.Model):
id = db.Column(db.Integer, primary_key = True)
usuario_id = db.Column(db.Integer, db.ForeignKey('usuario.id'))
data = db.Column(db.DateTime, default=datetime.now)
tipo_batida = db.Column(db.String(14))
|
import datetime
from django.apps import apps
from django.db import transaction
from django.core.management.base import BaseCommand as DjangoBaseCommand
from wampbaseapp.wamp_app import WampApp as WampBaseApp, register_method
class WampApp(WampBaseApp):
PRINCIPAL = 'PRINCIPAL'
models = {}
apps = {}
@classmethod
def get_app_config(cls, app_name):
try:
return cls.apps[app_name]
except KeyError:
app_config = apps.get_app_config(app_name)
cls.apps[app_name] = app_config
return app_config
@classmethod
def load_model(cls, model_path):
app_name, model_name = model_path.split(':')
app = cls.get_app_config(app_name)
return app.get_model(model_name)
@classmethod
def get_model(cls, model_path):
try:
return cls.models[model_path]
except KeyError:
model = cls.load_model(model_path)
cls.models[model_path] = model
return model
def post_init(self):
new_methods_map = {}
def decorate(method):
async def new_method(model_path, *args, **kwargs):
def sync_method(model_path, *args, **kwargs):
model = self.get_model(model_path)
return method(model, *args, **kwargs)
return await self.async_run(sync_method, model_path, *args, **kwargs)
return new_method
for method_name, method_data in self.methods.items():
method, options = method_data
new_method = decorate(method)
new_methods_map[method_name] = (new_method, options)
self.methods = new_methods_map
@register_method('get')
def get(self, model, search_params):
obj = model.objects.get(**search_params)
return obj.serialize()
@register_method('filter')
def filter(self, model, search_params):
objects = model.objects.filter(**search_params)
return [obj.serialize() for obj in objects]
@register_method('get_or_create')
def get_or_create(self, model, data, defaults={}):
obj, created = model.objects.get_or_create(**data, defaults=defaults)
return obj.serialize(), created
@register_method('create')
def create(self, model, data):
obj = model.objects.create(**data)
return obj.serialize()
@register_method('get_or_insert')
def get_or_insert(self, model, data):
obj, created = model.objects.get_or_insert(**data)
return obj.serialize(), created
@register_method('update_or_create')
def update_or_create(self, model, data, defaults={}):
obj, created = model.objects.update_or_create(**data, defaults=defaults)
return obj.serialize(), created
@register_method('delete')
def delete(self, model, search_params):
return model.objects.filter(**search_params).delete()
@register_method('multi_insert')
def multi_insert(self, model, rows):
counter = 0
with transaction.atomic():
for data in rows:
model.objects.create(**data)
counter += 1
return counter
@register_method('multi_put')
def multi_put(self, model, rows):
counter = 0
with transaction.atomic():
for data in rows:
model.objects.get_or_create(id=data['id'], defaults=data)
counter += 1
return counter
@register_method('update')
def update(self, model, search_params, data):
field_names = tuple(f.name for f in model._meta.fields)
if 'updated_at' in field_names:
data['updated_at'] = datetime.datetime.now()
queryset = model.objects.filter(**search_params)
queryset.update(**data)
return queryset.count()
class Command(DjangoBaseCommand, WampApp):
PRINCIPAL = 'PRINCIPAL'
METHODS_PREFIX = ''
METHODS_SUFFIX = ''
def wamp_run(self):
WampApp.PRINCIPAL = self.PRINCIPAL
WampApp.METHODS_PREFIX = self.METHODS_PREFIX
WampApp.METHODS_SUFFIX = self.METHODS_SUFFIX
WampApp.run()
def handle(self, *args, **kwargs):
self.wamp_run()
|
from struct import unpack, pack
import csv
def read_vlv(file):
"""
convert Variable Length Values to integers
"""
bin_str = ''
char = file.read(1)
while(unpack('>B', char)[0] > 127):
bin_str += bin(ord(char) - 128)[2:].zfill(7)
char = file.read(1)
bin_str += bin(ord(char))[2:].zfill(7)
return int(bin_str, 2)
def midicsv(midifile, csvfile):
"""
Converts midi file into csv file
:param midifile: path to the midifile
:param csvfile: path to the csvfile
:returns: None
"""
with open(midifile, 'rb') as f:
with open(csvfile, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar = "'")
# File Header
file_head = f.read(4)
if file_head == b'MThd':
data = unpack('>LHHH', f.read(10))
midi_format, track_number, division = data[1:]
writer.writerow([0, 0, 'Header', midi_format, track_number, division])
print('< 0,0,Header,1,2,240 >')
# Track Header
for track in range(1, track_number+1):
tick = 0
track_head = f.read(4)
if track_head == b'MTrk':
writer.writerow([track, tick, 'Start_track'])
print('< %d, 0, Start_track >' % track)
# Track Size
track_size = unpack('>L', f.read(4))[0]
print('Track Size: ', track_size)
while True:
# Read Tick
v_time = read_vlv(f)
tick += v_time
# Parse Event
raw_e = f.read(1)
# MetaEvents
if unpack('>B', raw_e)[0] == 255:
meta_type = unpack('>B', f.read(1))[0]
length = read_vlv(f)
print('< MetaEvent Type of ', meta_type, length)
if meta_type == 3:
writer.writerow([track, tick, 'Title_t', '"%s"' % f.read(length).decode()])
elif meta_type == 33:
f.read(length)
elif meta_type == 47:
writer.writerow([track, tick, 'End_track'])
f.read(0)
break
elif meta_type == 81:
int_tempo = unpack('>I', b'\0' + f.read(length))[0] # Convert 24bit to 32bit
writer.writerow([track, tick, 'Tempo', int_tempo])
elif meta_type == 84:
print('< %d, %d, SMPTE >' % (track, tick))
f.read(length)
elif meta_type == 88:
print('< %d, %d, TimeSig >' % (track, tick))
f.read(length)
elif meta_type == 89:
print('< %d, %d, KeySig >' % (track, tick))
f.read(length)
# midi_event
else:
event_value = unpack('>B', raw_e)[0]
if 128 <= event_value <= 143:
curr_event = 'Note_off_c'
n = event_value - 128
kk, vv = unpack('>BB', f.read(2))
writer.writerow([track, tick, curr_event, n, kk, vv])
elif 144 <= event_value <= 159:
curr_event = 'Note_on_c'
n = event_value - 144
kk, vv = unpack('>BB', f.read(2))
writer.writerow([track, tick, curr_event, n, kk, vv])
elif 176 <= event_value <= 191:
curr_event = 'Control_c'
n = event_value - 176
kk, vv = unpack('>BB', f.read(2))
writer.writerow([track, tick, curr_event, n, kk, vv])
elif 192 <= event_value <= 207:
curr_event = 'Program_c'
n = event_value - 192
pp = unpack('>B', f.read(1))[0]
writer.writerow([track, tick, curr_event, n, pp])
else:
if curr_event in ['Note_on_c', 'Note_off_c', 'Control_c']:
kk, vv = unpack('>BB', raw_e + f.read(1))
writer.writerow([track, tick, curr_event, n, kk, vv])
elif curr_event == 'Program_c':
pp = unpack('>B', raw_e)[0]
writer.writerow([track, tick, curr_event, n, pp])
writer.writerow([0, 0, 'End_of_file'])
print('== MIDICSV Done ==')
def csvmidi(csvfile, midifile):
raise NotImplementedError('csvmidi() is not implemented yet')
|
# Guess the number game
# no of guesses 9
# no of guesses left
# game over
n=18
remain_guess=9
guess=1
while(guess<=9):
print("----------------------")
inp = int(input("\nEnter a guessing number:"))
if inp<18:
print("The number is greater than the one u entered\n")
elif inp>18:
print("The number is smaller than the one u entered\n")
else:
print("Yeah!! u guessed it right..the number is 18\n")
print("No of guesses u took", guess)
break
guess = guess + 1
remain_guess = remain_guess - 1
print("Remaining guesses are " + str(remain_guess) + "\n")
if(remain_guess==0):
print("----Game over...sorry u lose..try again----")
|
import numpy as np
import cv2
#Contour Area(둘러싸인 부분의 면적)와 Contour Perimeter(호의 길이)를 구하는 방법
#키값이 m00인 값이 contourArea의 값이다.
#arcLength()는 2개의 인자를 가짐, 두 번째 인자에서 True이면 폐곡선, False이면 열려있는 호를 나타냄
def contour():
img = cv2.imread('images/sana.jpg')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thr = cv2.threshold(imgray, 127, 255, 0)
_, contours, _ = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[164]
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt, True)
cv2.drawContours(img, [cnt], 0, (255, 255, 0), 1)
print('contour 면적:', area)
print('contour 길이:', perimeter)
cv2.imshow('contour', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
contour()
|
from tkinter import *
root = Tk()
root.title("Hello")
text = Label(root, text="請輸入暱稱:",
width="30", height="2")
text.pack()
name = Entry(root, width="30")
name.pack()
button = Button(root, text="執行")
button.pack()
result = Label(root, text="",
width="30", height="2")
result.pack()
root.mainloop()
# 檔名: hello_demo.py
# 作者: Kaiching Chang
# 時間: June, 2018
|
# Generated by Django 2.0.3 on 2018-03-14 06:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0004_auto_20180314_0609'),
]
operations = [
migrations.AlterField(
model_name='order',
name='product',
field=models.ForeignKey(default=13, on_delete=django.db.models.deletion.CASCADE, to='stock.Product', verbose_name='Товар'),
preserve_default=False,
),
]
|
# sqlalchemy tollkit imports
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Database classes imports
from catalog_database import Base, Categories, CatalogItem, User
import datetime
engine = create_engine('sqlite:///clothescategories.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create users
User1 = User(name="Hana Shamatah",
email="hanashamata@gmail.com",
picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png') # noqa
session.add(User1)
session.commit()
User2 = User(name="hana shamatah",
email="hanashamata31@gmail.com",
picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_400x400.png') # noqa
session.add(User2)
session.commit()
# Add Categories
category1 = Categories(user_id=User1.id, name="Formal") # user_id=1
session.add(category1)
session.commit()
category2 = Categories(user_id=User1.id, name="Casual") # user_id=1
session.add(category2)
session.commit()
category3 = Categories(user_id=User1.id, name="Sport") # user_id=1
session.add(category3)
session.commit()
# category4 = Categories(user_id=User1.id, name="Homewear")
# session.add(category4)
# session.commit()
# category5 = Categories(user_id=User1.id, name="Underwear")
# session.add(category5)
# session.commit()
# category6 = Categories(user_id=User1.id, name="Accessories")
# session.add(category6)
# session.commit()
# Add catalog items
# For category1
catalogItem1 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Jacket",
description="Make your formal wear a stylish one wearing a formal jacket. Perfect for your night events. Pair it with an off white shirt and black trousers with black leather shoes to make your outfit look picture perfect. ", # noqa
picture="https://the-collective.imgix.net/img/app/product/1/199864-600150.jpg?w=610&auto=format", # noqa
date=datetime.datetime.now())
session.add(catalogItem1)
session.commit()
catalogItem2 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Suit",
description="For your wedding and it is perfect for very formal events", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExIWFhUXFx0ZGBgXFxUZFxgXGB4aFxgYFxcYHSggGh0lHRcXITEhJSkrLi4uGB8zODMtNygtLisBCgoKDg0OGxAQGislHR0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAJMBVgMBIgACEQEDEQH/xAAcAAABBAMBAAAAAAAAAAAAAAAHAAMFBgECBAj/xABFEAACAQIDAwoCBgkDAwUBAAABAgMAEQQSIQUxUQYHEyJBYXGBkaEysRRScpLB8CNCYoKissLR4XOz8RVTYyQ0Q4OjM//EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/8QAIBEBAQACAgMAAwEAAAAAAAAAAAECESExAxJBBEJRE//aAAwDAQACEQMRAD8AKIrINJa2ArSMhq5MQ9ieJFdcjqoLMQFAJJJsABqSSdwqg7R5xcIJCqLJIv11CheF1zEEj0qi6rqL1oSajth7chxcXSQsSobKQwswYW3jzBqRY0Qje1ba0qyKDcU3IacWqxyg5Zw4eQx9HJIynrZQAq9trk6mxG71oqy5jatSxNQeweV2HxbFI86uFzZXWxtuNiCQbXFTN++g2cm6DiT7KT+Fd8dcEy9aPXcSfHqsPx9q70NQbFqdDU2BW6i1A5es5q0Bqi8pucAwzGDDxCVwcpJubvuKoq2JsdCb76W6WS3pfL1mqryP5TyYl5IcTB0EyDMo1yyR6Ast+DEAi5+JeNWqhZZxWVFYNKsiiErUr1iozb23YMJH0k8gUH4RvZjwVRqfkO21BKUqoI528BmylZwO1+jUqO8hXLegNXjDYlJEWSNgyOoZWU3DKdQQaB6kawtI0GazWlbUCrBNYJrANBms1VuUvLfDYNujYl5ALlFt1QdRmJ3Ejs1O7des8leXGGxxyR5kky5sjgXIBsSpBIO8etBaK0NbXrWgaasXrdhXNisSkcbO7BUUEsxNgAO00HMBrfiT+NYtVOPObhAxHRzFR+sAlvIFr1a9mY+LERJNE2aNxcHcdNCCDuIIII7qGjpNasK2KVrbjxP+KISUqyo7e21Kg6I3rcPWsQArZqquXauHEsMkZAYMPhvYNbWxOuhtY91VrlbswT4aRQiJYdRtCcw13WFtdN9WjGYuOJC8rqijeWIA/wA1SWx0s0SthADEw+JmYsC2qgx70OUhtbXDA61w80vFen8ezVxv1FchMC+DxrYZpM/SYYSvpYB1cAW4izNrpcEURqpHIDk+0E2JllIMlwls12X9dhIN6sR0ZsdbVeLCu2G9cuHk17XRLW1q0RbaCtwarDOtCCLk1jHxJzm4kAlZ7HLeQ3Kk6Wsb+ml72ovu4AJJAAFySbAd5J3VTOTmaGLMsj4qBmIDlT1dcvWW5/Rm2YEaWN+NuPluUm49H48xuWsnHsbkQMPi4pUxOYi5ICgXFiGHxG41t51eglVHFcqcPh8WExAMQyFVZUYrcMDZsovuI0AI11tpVlwG2MPNrFPG/crC/wB3f7Vrx79d1nz+vvrF0yn9JH+9/KakVqNb44/E38Mp3+ld6mtuR8G1bXvTV6yrUDwFDzkzsWCJZZIpbylyskjDMwdDlkTr/D1rm3fV5x2MjjQtLIqLuuzBfQ8fDWh1sxII0zYbEMuZ3cDEMueQMxCuQSM6tbRhre4OoIrn5ZbOHo/Hyky5TuwVMmPMmdSEjkAA0IDMgF+0/CfarmCaoHI3GYaLFTrLiYfpDrHYA2QKbnJE56smu+xOot2UQBW8ZqcuflymWVsZvSzmsViq5tgageVWz45wiSRq4IYXIBKZgLEA+HYewVOXqN29hnZA0SgyKdLsFGU3vmPDtrOctx4dPFZMpaCW3eSJwsBleRSTJkVQpsV62pPYeodNbC2tEPmcxzPgmibXopLL9lwHt5EtVB5c4rM56SZHlBACwPniRBvBftYm2gGltanubLlRhYZHhZegEgSztIWRpEzA3LAdHe+g3adlwKnjxz1y15bh+ovA0r1pesmtuLN6yTTd6yKoRakDWtq2IqAcbF2G0EuImnQSyyySFnLXsMxKIqsNARbXw4Vy4fZrjauGxCKsYF1kRQDmzhxqV00W5v8Asiu/aLSCWYlxiF6Q36EZejcBbRtdiL5CpvfeT32zyPw0n0/PiSqyHDkxRZ+vYsMz5DqdNM1uNcJL7vblcf8AIQr1qTSam81d3iYMtRu3tnieEqRmAdJMt7BujdXym+ljl7e6pGlJuNLNzS43VlUTlpskT4eQLGiWH6N+qTcd1ri9cXNzA2FmnwZkzgRxzcMrt1ZBbgR0ZFSO1oMTYLGFSLKS800gCICbXAvp/mq7zZvBHjcTGkwlLICrhWVWyklwpYAneDrvsSNK4+LHLb1fkZYWcCWxrU07WrCuzxm0FZpRmlQPiqjzmbYlw+HToZCjPJYkWzZQpJtw1y6j8atYahfzt4i88Mf1Yy332t/RWoKLjsbJJdndna29mLG3iTRd5s+T2FxmDhxEyM0yI2HJEkigKjMUtkIscjLrQdQXUUWeYjG2GJw54rKviRkf+VPWrelil8sg2A2pLHhppRlVLszlmJyBusT8Q13HSrnyB5WyYt3ilVcyJmDrpcXCnMu6+o1HpVG5b4uKTauLkkEjLnKjo2RTeNVi3srC3UPZwro5r8UBj+rcK8cigEgmwyuLkAXNkOth4UqQZga4Nv44wYaaZApZEJUNuJ7L2t6V2q1VznGmy4CX9ooP4gfkpqAU7e25icUP00pZd4QWVB+6ND4m5or83OwMNi8DhJ5ULSQLJEBmYLbO+9QddG9+4UGo3vccP+aMfMjiv0OIhv8AC4ceDi3zQ+tLdxZwqfPJsHD4aTDrh4+jDCV2ALEXJjGgJNh1ToNKpLRjhRD55MQDjo9AyxRJdWvlJLMxDWINiCu4g0P9qbSzsnUiQagCNAo1HafibcPiJ96m+AZuTG0xPhcLJclj1GP7aKyvfxyk+YqzqaEPNftXLIcMf1n6RD3hCrD0sf3TRajN6geY1CcsdoPBg5pUbK4ACmwNizBdL9upqZzVTOdfEFcGq9jyqD4AM3zAqwCnH7Sll68kjO1t7MWOved1F/mm2dhsRgY5ZII3miLw5mUMcmZnC66fDJQXjF18z8zRV5isbb6TAeKSr4/A3yStXmHVULnFw8Ue1JooI1jjjyqFUWXVFdrD7TmpXm25QYhcbh4OmboXJUxkkr8DlcoPwnMF3Wqv7S2wHxuIxHRxyiWR2XpAxAUt1CArDULYa3rl2btLJiY8QQqiOeNyFGVQqMrMABuFgat/iR6cvSNalqzeuasGgXy929NNip4zK/QpI0YjBITqHKSVGjEkE3N99HS9ea9vyH6RiGGt5pD6uxvWoObJWCK16XT89u6tnNhT2NLNyO5V4jCyxoJCYCyho2N1CkgEpf4SAb6W76PTCvMMLHpI1G8uo8yQB716fc1LdjQCtwKbLVnPQNY3ECON5DuRWY34KCT8qAm1+WuMxC/pJiFP6idRfAhdWH2iaLXOLi+j2diD9ZQn32Cn2JoAobjzPzq4pRg5tOSUeIwcWIefFLm6RTFHPJHFmEjgSWQg5rBe3sFDbl0cmPeOOWRxhz0Yldrys4JZ2dwASwZimbfaNeFFTm02yuH2LLM26BpTbidHVfMuB50HcM0Dl3xLz9IzEno0jIYt1mJLuLEknsNanFqfFv5Mc5WLjeOKYidCyqS+kgDELcON9r36178aND15fw0lpUKjdKuW9r6EWB7K9PSdtZqxhTWJrWpE6U3inARmbcFJPgBc1FCznBxUKyZoiRisnQswOiRlQzWvuYhguYWIGbiKocMxiIKEqym6sNCCO0GpXaeNjlLPkfpGckuZFKWJJssYS4vcalj4cIjEVrYOnJLbH0rCxzH4iMr/AG10bTsB3+BFS5qg8zjN9DmJ3HENl8Mkd/e9XsNWUYWlWinU0qB0GhFzrS/+tF/1YlHkcx+Zotigtzo9bGSrwVB/AD+NUV3ANdF8KtfN9tpcJjFkY2XI6t3qRmA+8q1UsDLmUG4ud9v7VviO42PyqNGMbMXdmJubm54sdWPrU1zcTWxmHPFpAPNXFQU4AWu/kLIFxOGJOnTgfeYr+NEHrDk9tU3nYxuXDxR5tXkzW4hAd/ddl9qugNVbl7ycOKiEkes0QOVfrqbEr46XHmO2qgT7Pw3UefNp0qxEd5VnDH0t61d+bjbyYbGR9I2VZLxsezX4CfBtO6965+ROyoZ4sVBiHeMq8ZMeisG6wznMLg6BfPXeKnsfyOwax6JI1t5EiAgb7t0jWtp2C/dXPLyY43T0YeDPOe06VnnI2j0uOmAN7SEHwTqKPPLeq7sgQHExjEW6IBy+87o3IsBre4Fu+1EbZHJLCohaaMtcfFIZAxvrcaIu76oPjUPzg7OwUcGFbDwrHI7MBlFuliAFnY/rEMbA7zc1Mc5ldJ5PDlhN1T9k4ySGaKWMZmRxlXUMxvbL+9u869GQsbA2t3cKHXJDkd9HaOaezTXuq9keh8mbtv2W0vvohxtXRxPlhQ+54pgI8Ol97O3kAo/qq9uaGXPBKS8A32Rj6kD+mgHmz3uG+2fnU5yc202ElaRd5jkT7ym3owU+FQWz8oUAHXXN4lifkQPKnMStxVl0OQa3PZuHgK0WI5XBFswuO9SLX9QaenawrIj6kb9hQqftI7X0+yyUHprZs4eKJ1NwyKwPEFQQafL1WuQM+bZ2F7ogv3CU/pqeZqg2xWJyo7DeqkjyBNeaWPV1O8bz+NH/AJUYkpg8QwNiImAPAkWv7158xrWW1PgxhtQPzup1Tc37BurSJdAPWnrWqKd2PCZMZhkAuWnj07s63PkAT5V6XZqBvNZswy7REturBGXJ/bfNGg9C5/do2mqlK9bA03ethRFA56MfkwsUIOsslz9mMa/xMlBnANcN9o2ol8+QJfDA7ujk8zdb39B60NtnxWjW3j661YJ+Lb5TZ0uEG6SdZD9lV1H30Q+Rqu3NOFdTXPiD2Ai/yq27Iw4IRt4OpHHiD6WNeosNKWjRjvKqT4kAmvL2IcsCb3DIuvAiNQwPgbjyr09A90U2tdQbcLish9zuqp86G1OhwDops87LCvGzdaQ93UVteJFWkndVD53EHQ4c9olb0yG/yFFDFt48a58YbC/A0uszg7lG4cdN5P4VrjXsvjRRe5rYsuzoid7NI3rI1vYCrYTpUHyOhyYDDL29ChPiwzH3apZmoy2jcXOtKmlsDcDU1ig6gaD3OTGRjpDxVCPuhdfumi2rVXtt8lIsTiRNKzZcgXKhAJYE7yVNhY9mvhVtkm6uGNyuoDOzz1B5+xIpybdU1yt2IuExBWPSJ7lBcnKRlzqSdTqwb97uqt7TxGXKo3lgf3RqazLvlrLG43Vaym9dfJkDpsOCcv8A6hN/+oCK4nYWNXnZ/IVxBh5lnBe8cjxMhUi5VyquCwJAPaFvTZMbZbPgotKK2jbvrhCKdQxNbiPvNac1W5wZBBJh8RCFjxGZv0qquYhQBla4s4625r9lUhtrzlWTpnytvGY6+e8DuFWvnMQgQakjrjX9yqMDUslbxtnRvpCRlzMVGgXMbADsAvYDurWUkqLknKuVbknKo3Kt9wFzoONcmCm1a5+Jiw8OFdch76gOGFnLpC+8sqH7wF/YmpqNqrHJoH6NhCd/Rpv/ANM1ZVGgrTJx3oYc65PTQm3/AMZ9Qx09x60SnOlQXKHk5FijEZGcKlx1CoJzWsLkEDceyp0uMtuoCezD1P3m/mNPTnSpXlJsZcJiGjjv0T3aO5zEWyh1JsL2JB3frDhVc21iMqqo+IkHyGvztTe+mssbjdVu50rXC4glcmpCszffEY/orV5Ljxq5cieS+HnwjyzGQOztlMbqLItk1VlIPWDdo3ilsnZjhcrqCZzeMP8Ap2Gym/UN/HM2YeRuPKrEdKg+RuyvouEjhz9IBmYNly3DsZBpc7s3Gpo0YVXnLxuXAsu7pHRPfOfZDQMxT5nCjz8KOHOLsubE4ZI4UDN0oJGZF0CuL3YgdooKyYJopnje2ZDY5SGF7A6MND5VGpLrZ+PcPCstSXcPCtZOyqCVzNYwZsRDYXISQHtIF1YeV1+8aKFAjm62iIdoRXIAkvGf3x1f4wlHJnoycIFZBrn6SkJaAbc9kV/ozd0i/wC2b/njQ3PCilzxxO0ELhf0aOS7adUtlCXG+xNxfjahPPiABf8AN60G+mUsVW+Zfi3WsbWy+9/KmnAsSd3boK5IIypz7yTc11yyXFSVTuy8GZZI8OAc0jqlhvGcgE+ABJJ7q9MWA0G4aeVDLmi5NLlGPchmbMsI+qASjuf2iVIHAX46EqSSiNydaoXO4f0MH+o3pl1q7iSq1zgbHfE4b9H8cbZgPrAghlHedLd4oA5FiRra5tw/OtRm0pCde3fbuHZU3DstzC0yMHCXMii3SRgfrMh16Ow+IXA1vbtrmIckMSb99RofuReIzYDDHhGF+51f6amr1UebCYnZ8Y7VZ1NyRrmLW3ftVZ3lI4ev+KrBzNes1zLc7/mf8Vmmh0h6juUu0RBh2lK5suWw3XJYLv7N9dqmq1zkuRg7fWkUHw6zfNRSzfC45WXcDflhtnpyAjEIbOQQcyyKvR3zftLluN10Brii2BfAvjmLZumEcYJ0ZdzMe34uqNew1y4i5IC6sSAPtHQDvN6LXK7YyJso4dd0MatfiUYM5Pe3WJ7zWZjqabzzuV3QbkiGS17m2/W2tGzYMrSQxSahXUSBTbQMihVuN4AF9e09gsKCs7dXh8/E0cNkRZYIV+rGg9FAp6y097jLJ9SSmuhDXDfjTq+Nbc0DzjQhsJmtqjqb8Abqf5h6UJcU4sQSx8D+NGrlHhukwsyDeYzbxXrD3AoITDfr5VmrE0uwbbPhxak6yOrjfYZiqEHhdSP3hXAqdtvSwPrRW5L4JJdmQxOOq8VjbeMxJuO8HXxoXTQGN3jJ1RmU9lypK3t5U0uxN5vpy2EhBJJSRl1N9AGZQDwAIFXaJrjwodc2ZJjkW+6YEDhmQg/y0Q8OLA+NVDtqU0AZCu7gflStpW8bdlNbJbLuAJyv2l0zAR3tnzte4dXCiNrG9srBVJFt6Xvqaj+TOxDjcUYl3iKRx3si2QHuLlRXJtQkFvtH8au/Mdg74jEzfUiVB/8AY2Y/7YrMmuHTPO5XdUVQCL2oi81CHERTYdjlSLIQQNWV3kcg33EEHXgRppc1nlxs8QY7EIBYZ868LSASW8LsR5VZeZT/APri/sRfOWmpe0mVx5gsIlrAbhuremxWxrTmZxmHzrYaHXfe1iLEG3ca8+7bIOImKqVBc6FrnMNGPqN1ehQa897YFsRMP/K4/iNZ9ZvbpM76+vxvyb5Py4xplR2HRRFwAfie9kTuvZte6oO54k+JJ040UeZbDtlxUv6jOiDjmQMzeVpF96pHLLZpw+Mmi3DOXT7L9cAeGa3karK48y0g6TEq1j1Y2W4BtYuGIO/tWir0goJ80UpGPK8YHuPBoyPlRnarEpxnFJTTVtKytEVPnXxWXAhLX6SVV8lDP80FA+ca7t3t50X+d6f9DAnGRm+6tv66EWJkNtP8nwpWo3g2fL0LYjKTEJRFmJ1zlS9rW1AA38WFNSGwtRV5XbKXCbDSDLdg0VyP+6zZ5Cf4h5ihLI+89g9zw8qg9B83EATZuFHGPP8AfZn/AKqn5TXDyew3RYXDxfUhjU+IUA+9dZNVGqNXLiZbsoN9Dfs1tf11t6V2RJoDXDtA9ZPtH2U0AJ27pPKN1pHHkGI/sKhGKk9b4b9a3Dt9r1I7UnzvJI2hdma172zEm1+3fvqLkGlj2i5893t86ivSiRqosigC5NgAB1jcnTiTfzpSR3ri2Lii8ELXBDRobjdqo3V3O9aZN4dLD/mlS6S1KiNI5QTVS51MVlgiS/xSE+OVSP6qmVxf7NUXnKxOYwjgH9yn9qtmiVXOSsPTbQwqH/uhrd0d5D/JRg5aa4LE/wCi3yoTcgSRjlcC5RHbXwyf11f+VO0GfCTjKBeM8b27fapJwtB/EnqN4UfY3AUeAHoO6gPHDmZE+u6rb7RA/GjUk54e5pjFydZl7j6f3rKOfqt7f3rj6XgvuaXTntHuaumdu4yH6p9B/egftOLo3eP6rFfukj8KMX0g/kmhXy0gIxcn7dmH7w1971mxqVe+bbGlsEFFz0bsnrZwPR6qPKkWxc/Z17+oBqf5unC4aRQL/pTfUj9VPz51A8sP/dym1r5T/Ao/Cnw+p/mwxJE0ycVVvQsP6qKMbXH58KE3Nk9ppz+ynHi9Ef6dbsPqaSFqZvWt7andUMcc19B7muLb22+jw0zNcWjYXv2sMo7OJFXSbA/bWKDyMw0DOzacCbj50WuZfDhMHJIf/kmNvsoqr881BqVbsvnRn5A4jocBCuU65n3i3XZmHsRUjVVvnbUDGqw/WgU+YZx8gKe5msUFxGIj7WiRh4RsQf8AcFc/OlJnkge1uoynyIYfzGoPkDtDocfE1rhgyHwZSR/Eq0+nx6BQ1hjUGu3F+q3tTg22vBvarqspXN3V572y36eY/wDlf+Y0bf8ArK/te1A7a73mmPGR/djUqwWeaiMrs5dPikkP8WX+mq7zxYIdJBLbV1ZD+4Qy+fXb0qW5vNqhcBEpzGzONLbs7H8a4+dKZZsKhAYGOUHW25gV7O/LTXBvlV+bWYptGHW2cOhvwKlgO/rKtG8mvPnJ2cpicOxO6ZDfttmF/ajX/wBbT6r/AMNJCpm9a5wCB2m5A7ha/wAxUUNtrwf0WsrtpP2/RaukDzndxxOJSP6kQPm5a/sFqtchdj/SsbGhPVQ9K/ErGQbDjdig8CeFS/OxKrYhJFvrEAb23qzcO5hTnNJIiSzytfMEVBa2gclm9Si+lRfi8c6S32ZMT2NEf/1Qad+poIwQ53SP67qgH2yF/Gi7zi7YVsBKgvdig6wFtHVr6dvV0oYck0DY7DA7llVvufpP6aHx6MJAprPUWdqrxb0H9qbO1E4t6D+1XSbS4awtULymkZYJnB1WKQjuOXT01pz/AKsnFvAgfO1RPKTaIbDTC9rxSDW1rspA7KaAZkS+/wCEb/7Adnzrhkku3jXViJdLX0G/x7B+eFSfKHZYjw2DkC2YrlfvLkyL6XYelRqr/wA2OPDYPJfrRuwtvNm66+5YeVWzpDQm5v8AaBhxJUbpEKkXtcr1gb9wzetEKbaZbTX1qxmpYtfu8jSqCmxZIFiRalV0htG/OlVDnBYZobcG+a1aVXuvVQ5faGH7LfNa1l0k7a83CXmmbggH3mv/AE1buU8o+iTH9i3qQPxqsc20F1nbtzKPQMfxqW5cyZMKQTbOyrc245z7LbzqTpb2qvIbCdJjY8w+G769gUdX+IrRXkhWhtza5GxMhLC4i08CwzG33fWiX0SW0JpjODK8mOhpLg79tPCKtxYVpk0uzjVD5ydmZJYZBuZSp8VNx7P7UR0kHCqnzoSr9FTNv6UZe45WJv5XqZThZeURzXvm+kJ2gqw8CCp/lHrXHy/Urite2JT7sPwrXmvx2XFlBa0kbDXTVbOPYNXbzqR2xET/AForfdYn+v2rH6tfse5tE/8AcN29QfzmrtnP/FUnmqlBbERm25XHH9ZT819avxwh/wCa1j0mXbnSQ8aguXoJwb2I0ZN3DMKsnRW7Ne6q/wAtiy4KYhL6C/hmW58qtSUIrG5OpO4ePCjfgcB0cSRg6IirbwAH4ULOSWAE+KVexP0rabwhXq+ZI8r0WzGRv971nGNZVUOcfDn6PG1vhlHb2MrD52qk8nsYkWKhkcdUNqeFwQGsd4BIPgDRF5cYXNgpP2SreQYX9jQtw2HEsiRfXZV+8QvtelnJOhtaI9mtIQG/Z63rsEI3Zd3eayYe6taZ25ei7vf8KD22lInmHaJH/mNGcpb/AIoTcugBjJQot8JPC5RSTWco1iuHIQ3wUev6zjzzt/iuvlTh8+EmHBM33Ov/AE1zc29/oep06RrcD8PZ43qyzYdHUqb2YEHwItVk4TfIKYNuult+dbeooyFNe2gqxMbH6yH+JT/ijpFOpAuOwH11qYrk48h4fKtujJ3X9K78yn8604tuJ8xWtM7DPnNwxCwva3xr29uUjf4GmeasE/ST2fox/uf4qe512UYRL7zMLeSuT7fOoDmjnAmnjuOsisL9uQkH/cFZ+tfExy+cDCWNutIo9Lt/TVV5BRXxY0+BGb5J/VU3zsYgZoYQQCLuwHf1U08nrHNPgCfpEpH1UHjqze2SmuTfC5BvLyrNj+RXecMaw2HrWmduDId+ntUJy1kK4VgbWZlXdwObf+6atKoO81QOcza0d0w6G7q12sNxIsAbdtiT6VKs7UzC4czzRwjczBfI6sfJbmiFyz2bnwcmUapZ1Fvqb/4c1QPNps7PPJKd0a2v+3J/ZQfvURmiBuD2ixFtCKmM4MqC2DxJjkSQaFWB8hvHmLii6mHU/rad2tCfbOzzBM8TXsDoeKnVT5i3vRL5L41ZsLG9tQMjccyaE+ejedMVySC4ex0F6VPKvpWK2wYSD86VQucea8safUQsbcXOgPgEB/eq6bRxcqJeKLpGvYC+7vIvrwqoryYxOKkzTgRKSWZiVMjE2tZQSABYb7bt1W/xJ/XVzXqDBKe3pde4BVt+NTPLTZzTYYhEzurBgBbNwOW5HYT41Icm9hx4SMpGWbMbsWIuTu3CwGlTaUmPGi5c7CjkbtFMLOyyqAWGUkjrRkHUEnUA6XHcp7KJivWmN2Rh5GzvBE7fWZFJ9ba10k93oKsmi3bUk8BWxjP5P4VsJCadViKukc0rdGMzkBRvJsAPEmqJy+2/DNGkaHOqvmZhr1rEAKfAt7d9XvaOESZckoDre4B7CL6jXfqda5oNiwKQywIGG5soJHgxuRWbKsoW8lNmYoYyGQQSqqyAklCoCnQ6sBfqk1N87U1pMNfdlk7e9KIoQ8RTcuHRuq4UjgwDD3vU9ONL7chVyB2tHh8SJG3MpRzwBysG0vm1W1v+KL+Ex6SC8ciuOIN7eNt1QO0eSmEmIZogpAteMmM/w6HzFO7G5PQYZs0YYG1rs7todbb7e1JLC3awFxvP586GvOPt2OWPoY3DKhDMQdGb4VVeIFyb9wogTRhlKmxDAggjQgixB8jUVheT+GRukGHTODcHViDxGYmx76tiSqfzVbPnjnkeSKRY2iIzOuUFgykZb2O7NqNKJmXgBXPnPGsq/fUk0W7O4rDLIjRsuZXUqw4g6Ee9A7bGyVgxMkNi4R/1rBmQG9uzUjtGm+jgk3GojlJychxoUSFlK3ystri+8G4II0GhFLFl0kMPKsirIjXVhcdm/wDGnQxHGuPZOzFgiEQdmAuczZb6m/YAK6so41dJtnOKDHK/FLPipJALfqMBfel1Fwe2wHpRmsO/8+VRu2eT2HxQXpoySNxDZT6i1x3Gs2bWXTn5FIBgoAQL5SdO9mINxv0NT1xpamMBg44o1iQAKgyqL3IA7zvroKAVZEA3FYWR5pCqlryOb6WsWJvfhRF5PcoYjAoxEixyp1WG+9tzDLvBHvemH5v4C7P08wVmZsoKgakki9u+u2DkPhBvMrjg0hI04kAG3desyVq2VO4cI6h0YMpFww1B8K1xWKSP43VfEgfOnoYlVQqgKoFlFrAACwAHhUftfYkWIy9IL5TpZmU694N7VpFE5ytorN0WVwVTMdG0ZmsPOwG/tuah+bPFFNoR7jnV0I13ZS9+G9B60R8NyPwqNn6AOw7ZGaT0DsRXbDsuCNs0cEatvuqKra94FZ0uw95z8CPpQkHWzxrcH6wzAAeIXd3HjV95I7Ojw+FjjjfNcZywFsxexv4WsB3AVEcquSiYuRJekyMFC2IzCwJIsLix6x1uRU/s6ERRJGCSEUKCbXIGlzarpLUnnrQsa5+kHZ+f7Vq0xsQNDbQ77HsNr61Uc+29prh4mlbW24biWO4f5oI7SxDZ2ZgS7EsSeLXJPvV/xWw9oPI56cZWupJdxdD2dGLgDuvXRheQWHsDMWlbxMaAcAqm/qalm2pdJXkRsk4bCKrg9I95HHe24eShR41xY7lnGoNoZLi9g4ytpxTVqshk7PLfWpkYb6aTYM46PEzu0vQTNmNywjk14b77hpYVdObyKREmVo5Il6hUSAqWfrBiCwHYEGnCrmH/ADam5pbWHZfhTS+zAe4FxupVnQ9vypVWTYQVl2sdKVKujB9d3nWElOmtKlQOlvlSznjxpUqK0kkN99K+tKlRGwGtO2rNKgbU1sppUqimz8XpTqnqk0qVRWe2nFUfKlSoho762ApUqKbZjc69lOEfOlSoHEOlZkGlKlQcskhFYDX/AD3UqVA6TurLHSlSoGW3/nhWI2Obf2UqVA8nZTbm26lSoNovPdxNOKL7+ysUqDUxj3761Ci9uzSlSqDdcOpvp7msywKNQOzvpUqoZjGldHQrYm3uaVKoOUD2P9qyDWKVFYU1pOKVKoFCopUqVB//2Q==", # noqa
date=datetime.datetime.now())
session.add(catalogItem2)
session.commit()
catalogItem3 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Dressy Jacket",
description="Create your formal look for business or evening wear with Dressy Jackets", # noqa
picture="http://www.gifttolive.com/wp-content/uploads/2018/01/jackets-womens-calvin-klein-dressy-denim-seamed-topper-jacket-indigo.jpg", # noqa
date=datetime.datetime.now())
session.add(catalogItem3)
session.commit()
catalogItem4 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Tie",
description="Black bow tie (silk, satin, or twill)", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAoHBwgHBgoICAgLCgoLDhgQDg0NDh0VFhEYIx8lJCIfIiEmKzcvJik0KSEiMEExNDk7Pj4+JS5ESUM8SDc9Pjv/2wBDAQoLCw4NDhwQEBw7KCIoOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozv/wAARCAFWAVYDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD2aiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACqepavp2jW63Gp31vZxM21XnkCAn0Ge/FW68E+M2sjV/GsOkIcwaXFh/+ujgM3P02D6g0Ae7Wt3bXsC3FpcRXELfdkicMp+hFTV8pWct1plwLnTL24sZx/HbyFCR6HHUV1+m/FzxjpqrHcNZ6mg/ini2vj0yhA/Eg0Dse/UV5Zpvx00xwF1fRry0f+9AVmX6nO0j8jXXaT8Q/CWtYW01u2WQ9I528ps+gDYz+GaBHS0UisrqGUhlIyCDwaWgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorE1bxn4a0TeNR1uzhkj+9EJQ8g/4AuW/SgDbory7V/jjpkYaPQ9Mub6XoJZ/3UX17sfpgVxGp/Ejxnq7knVFsI2GPKsk2Y99xy360Ae86prmlaJCJdU1C3s0Y4UzSBd30HerqOsiK6MrowBVlOQQe4NfKlxAbtpZ7qaW5nk5aaZy7k+pJr3b4R622seA7aKViZ9OY2jk+i4K/+OlR+BoA7eiiigAooooAKKKKAK9/ewabp9zf3LFYLaJpZCBkhVBJ/QV8qm7m1PULvU7k/vryZpXx0ySTx7c17p8Z9Y/szwHLaof3uoypbrg8hc7mP5Lj/gVeFQJsiVfagaJs8cUcUA89KOTmkMQjI5GaheCJ/vRjJ74qbgY55pKALGmavrWiEf2RrF5aAHPlpKfLJ91PB/Gu00f40+IbAiPWLG31KIdZIv3Mn6fKfyFcG1JQKx71ofxa8JazsjkvTp1w3HlXq7Mf8D+7+tdojK6B0YMrDIIOQRXyc8SOcOoOav6Lr+ueGZS+iapNbKTkwE74mPupyPx60wsfUlFeR+H/AI4xkpb+JtOMB6G7tMsn4oeR+BP0r0/S9Y03W7QXel3sN3Cf4onzj2I6g+xoEXaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiis7Wdf0nw9Z/a9Xv4bSLtvPzP7Ko5Y+wBoA0aQkAZJAA6k15Br3xxMm+Dw1phbsLu84H1CDr7En8K881nxBr3iRidZ1ae4Q/wDLFTsiH/AFwPx60Ae6638UPCWhs0cuprdzr/yxs181ifTI+UH6kVwesfG/VbnMeh6RFaJ0867bzGPuFGAD9Sa82SJIxhVAqQDAoHY0NT8TeJtdJGp69dyo3WKN/LjP/AVwD+VZSWkSH5UHHc1Pj1p2OBSHYaqgduBTwcYoAwKXp+FADhzx6V3HwU1U2fivUtHdh5d7CJY8n+ND0H1DHP8AuiuGAzyRUujaodA8W6VrAbYkFwvmn/YPD/8AjpagGfUVLRRTJCiiigAooooA8N+Omqm68S6Zo6YK2kBmcg/xOehHsEB/4FXAoPlGetaHi/Um13x9rF9kFBcGKIqSQUT5FI+oUH8aqIhHI5pDQgGKOox3p2KMUDEx2xk0mRTh/kUEA0AMP503FSY/CjH60AQ9qToOakxxxTStAEZ6HPIp+n319o16t7pN5LZXA/jibAI9COhHsaQqOvamsvHagR6x4U+NiSMln4qt1gY4Vb63UlD/AL69R9Rx7CvV7e5gvLeO4tZo54JBuSSNgysPUEcGvk1hkYIyPetjwv4x1vwdc+Zpk/mWrNmWylJMb+pA/hPuP16UxH0/RXMeDvHuj+Mrb/RHMF6i5ms5T86e4/vD3HtnFdNQAtFFFABRRRQAUUUUAFFFJQAtQ3V3b2NrJdXc8cEES7nlkYKqj3JrF8XeNNI8HWH2jUJd88gPkWsfMkp9vQepPH48V4D4o8X6z4yu/M1Oby7VWzDZREiNPQn+83ufXsOKAO+8W/Gosz2PhOHceVa/nT5R7op6/Vvyry67nutTvGvdSu5ry5brLO5Y/TnoPao1XAwBjHYU9RSHYAMDilpQvOMYp4XPWgY0AcU8LzkUu3PUUvB7UAAHWjntSgflSgAn9aAEAHenDjkHigL6Uu00gDpVPUY/Mt2x25FXccmoplyhU/jTA+i/AuqnWvBGkXzOXke2VJGJyWdfkY/mprfrzD4Fap5/hm+0l3LSWF1uVT/DG4yAP+BK5/GvT6ZIUUUUAFZfiXVP7E8M6lqeVDWts8ibuhcA7R+JwPxrUrgPjVfGz+HssIJBvbmKDj6l/wD2SgDwjT4yYt7ZJY5J9av9OBUdvH5cKr2xUwFIoQ8Un8qfjvSYpANxgGkPXNPI6UYNAEZXP40beeuKf3pMc80wGEevWk254xUh560hHHBzQBCVxn0prLg1Pg4pCo7fnQBWKkdqiZDVwpnk9aYycUAV4Zp7S7iu7Sd7e5ibdHNGxVlP1r2r4f8AxXj1h4tH8RNHbaicLFcfdjuT6eisfToe2OBXjDR/n6U0IsimOZcqeh9KYrH1tRXiXgD4t/2VjRfFE7vbIMW98VLMg7K4HJHoeT65HI9Gh+JPgye3M6eIbQIDjDko3/fJAP6UCOnorn18e+EWIA8R6dz63CitS11bTL4gWeo2tyT0EMyvn8jQBcoqtcalYWkgjub23hc8hZJVU/kTVW48S6DaR+Zc61p8S+r3KD+tAGlXI+P/AB9aeC7BVRFudUuB/o9sTxjpvbHRR+Z6DuRV1H4v+EbWxu5LLUPtl1Cp8qBInHmv0ADFcYz1Pp614Tf32oa5qs+qanKZbqdsseyjsoHYDoBQAy+vr7V9Sk1LU7lrq7mOXkbt7AdAB6Cmqn41IsQHQVIqY+tIqxGqnP1qUJ2xzTto7UuOMUANx2IpQBmnAfjS4yf60AMC8804L2ApwFKBikA0jtS4J9qXFFAABiigZooAP5etRt09z+tSYpjd8dqBnS/CDUP7O+I4tSTs1C2eLGTjcPnBx6/IR+NfQVfKmmaj/Y3ivS9U3MEtrpHkKnBKBhuH4jI/GvqoEMAQQQeQR3qiBaKKKACvKvjxcN/ZOi2Y+5LdtIfqq4H/AKGa9VryD47t/pHh5O2+c/8AoFAHnC+mc06mrjPFOqSwooooEFFFFAARmkP0paDQA3GQKXHNA6ml6UAJjtmm49RTs+1L1oAbimstPHXrSkZFAEJjz1pnlZYVY28UhXtTAx9RQteHA4CgVUMZHata5i3TMxHtUJg/WgRnbOaVBJG4ZGKsOhBwRV77PmlEAximBQYO7bnYsfUnNJsNaIt6U2+e1AWItOt9xeQjkcCtRUCrgHmo7OLy4yPfNWcUhjNmelKF/CnUUhiEc0uBRRQAYFHfgUUUCCiilxQMSjGKXiigQlLQKQigApCOABR+OKF680AZOprgZ96+rtHuY7zRbG6h/wBXNbRyJ9CoIr5W1RcKTX0v4Fk83wHoTZBxYQrx7IB/SqEzfooooEFeO/HWQNqPh+EA7gJm/D5P8K9irw741XLHxxp1u/8Aq49PDqPdpGB/9BFAHFDhc5oHUZNJvGPUUB8c9qkod36Uv40zeecUhfNADyaQt2qMuAM5phl5460ATF9vWkMvvVVpuetRtN6UwLvnAHk003CjvVBpmPaoyznsfxoFc0Deqpz1ph1ADnFUSkh9BR9mJ6uTQBbOqIvaj+14vRvyqutmmehP41IttGP4F/KgCUatAezflTv7UQjiJz+FNWFQOmPpUixDI4oGNilNwGcqUOehp3l/jU0MQBZvU1JsXOcUBYrbCelOEVWAgANGwYxQBD5YzSrGD71LsFKFA7UDK5uoIpGjd8MKcLq37zLUckCtK5HUnrUTQL3UH8KBFsXNueki/nS+bF2dfzrPa0jJwUX8qjNlF2Uj6GgDU82L++v50nnx5+8DWT9k29GNL5LqODQFzV8+L++KBMhHBrLBdTyKcJSOuc0WC5p+YvHNKGzWcJRxUnmnPWiwXL2aXNUxLg5z1p4noAs5HrSZ96h87NKHB70gHnmimj1JyKXI9aAK+pLvs2YDp1r6C+Frh/hvoxBziJh+TsK8EcJLG8ZPDDFe0/Ba5E3w/SDfuNpdSwt7HIbH/jwpoTO/ooopiCvEPjjbrH4r0m6Gd01m0R+isSP/AEM17fXj3x6jCz+H5/8AamT/ANAoA8yjbAII6frSscHPY0xuGz1oLZFIoUuQaY0nPWmOTk4qPHpQA9pT24ppye9IFzT1SgBmMijZUoQU4Lk9KAIhHjtSiPnmpduBT1TnkUARCP0pwTg1KqfgacFAHtQBGsecelPEYFPC+tOA5oAaEApeACT2p+Oajn4iPvSAjgmCowPJ3Gn/AGgVQjbaWB9aRpOc0wuaH2lc0hugPpWd5nvSFyeetAXNA3WD0oF2PQVnFyaVXIwaAuaEcga4b0apSvFUYG+fd3zV8E7etAEewHtTSgxkGpqTGKAIfL4yaaY88kVPg+uB6UhU5OaAKxj9RTSgxxVormmbMUAVTHjmk2kcc1aKenWk2EnpQBWwacCRUuzjPemlOxoAQMfpTg/vSYPpTWHcUASb+2aN+epqHJB5p4OfrQBKhwkjnspxXsnwHBHge7Y/xalIf/IcdeNyYWxkPqK9t+CUZT4eoxA/eXUrD9B/SmJnoVFFFAgry3482av4a02/+bfb3uzjoFdSST+KL+depVyXxR0w6p8PNVjQZeCMXC+2whm/8dDUAfPuNyA+1MI6ii1fzbdD7YNPI54pFEZXNJs9qkwCeKcFoAjCcDinAe1PA49qXbjkGgBgXtTgvB/Sn4pcUANC04LTsYHFL1oAQDBzS4xS5wKXntSAQjjig5pc55FA4oAM1XuXyMZqRnABOcY6mptE8Pav4svTa6LaGbBxJcv8sUQ/2m/oOfY0wMVnCuwPBPIqFpV7n8q9ysfgVoQsIk1K/vJ7zIaWWFgin/ZAIPHv1+nSuksfhf4LsECpocMxxy1wzSk/99Ej8qZJ8z+cGYBVLE9hVkWGpgAnTbrDdD5Lc/pX1ZYaHpGlf8g7S7O0J4JggVCfxAq/QB8kLpOslgo0e+JboBbvz+lFzpGs2UH2m70e+t4M48yW3dVz9SMV9b0hAIIIyD2oA+Q7eZWOMYJORnvWpE2VAr6K8T+C9F8VaWLG9tljaPJt54lCvCT3X29R0P5V4N4j8L6p4N1IWWpjfDIT9mvEHyTAevo3qP5jkoaKXHakyenakBzjmlpDDFFHTrRQAmOKQjjmnUdu1ADCB0pCOcCpMg8ikIHYc0wIyuaTaOhP0qQjA4pp5PHrzQBHtNJt9qlxgA55ox9aAICgBOQackfIOOKlAz24pyjuRx1oAr6gVW0IHHGK97+EEJi+GumkqF8wyv8AX943P6V8+apJ+7CjuetfUXhSx/szwlpNkUCNDZxK4H97aNx/PNMTNaikooELUN3axXtnNaXCB4Z42jkU9GVhgj8jU1FAHyYtvLYXt3p02RLazPEw6EFSQf1FTEADIre+JWmjSfiXqCxx7I7zbcp7lh8x/Fw1YgGBSKQ0Aen40uKX8PwpeKAEx2pwz+XWk780cZoAM84FKBg4pcUoHekADA4pfagc0ZAoAX3pNwpMZPFRvOquEHzyMcKijLMfQCgCUsAKiBea5S1topLi5kOEhhQs7H0AFdl4b+FniHXik+pg6NYE5IkGbhx7L/D/AMC5Hoa9c8NeD9E8J23laVaBJGGJLiT5pZP95v6DA9qdhXPNPDHwbvdQZLzxVL9mg4IsIGy7f77jp9Bk+4r17T9OstJso7LT7WO2t4hhY41wB/8AX96s0UxBRRRQAUUUUAFFFFABVPVdJsNb0+XT9TtUubaUfNG/8wRyD7jmrlFAHgvjD4aan4XdrzTBLqWk9SAMzW4/2gPvD3H4478dHMkqAxsCD3r6qrifFPwr0LxC8l3aA6XqDZPnW4+Rz/tp0P1GD70h3PD8dqD71sa94M8SeGHY32ntc2q5P2y0UyRgercZX8axI5o5F3I4b6UhklFHfNFABRSUZ5wKAEY+lGMDIoOc5pMd6AFPJHpSkDGDTaXAxzQAvtjpSkfLu6GkHTPahztQmgBdB0s+IPGulaXt3rNcKZRn/lmDuf8A8dBr6mrxD4GaP9r8Qalrsg+S1jEEQI/ic5JB9guP+B17fVEhRRRQAUUUlAHjnx301Y73RdaReTvtpWz2HzIP1evOewNe5/F7Sxqfw8vXVS0tk6XMeDjG04Y/98s1eExP5kKOP4hQND+1LgE4BNHUZ7jtS/UYpDDntSj0pR0oyMikAAUuDQW+lPsLW/1m8FlpFlLfXPdIhwvuzdFHuTQAxtoHJ6d6SATX10tpYW0t5dP92KBC5/TtXpvhz4L79l14ovPMzybG1Yqo9mk6n6DH1Nel6VoumaHai10uxhtIh1WJMbvcnqT7mnYVzyHRPg9rmplJtcu49LgPWGEiSY/U/dX9a9N8O+CPD/hdQdOsF8/+K5m+eVv+BHp9BgVv0UxCUtFFABRRRQAUUUUAFFFFABRRRQAUUUUAFJS0UAFcf4g+GHhnX3a4+ytYXbf8t7MhCT7rjafyz712FFAHg2t/CrxRoxeSyWPWLZehh+SXHuh6/gTXHu5huGtrmKS2nTh4pkKOp9wa+qKztY8PaPr8Ii1bTre7UfdMiZZfo3UfgaVh3Pmr3FIQTye1etax8EtNmYyaHqc+nHtDKPOj+gJO4fma8+1/wd4l8Lgy6np/nWi/8vdqfMT6t3Ue5AosO5jYGPej+lNjmjlXcjAinY9+KQAKMflQeB9aOAM0AKOlQ3snlW7t7cVMDlsflVa8hlvry1063G6a5lWNB6liAP1NMD3n4QaQNK+Htm5BEt87XUmT/e4XH/AVWu3qvYWcOnafbWNuCIbaJYowTkhVAA5+gqxTJCiiigAooooAr39lFqOnXNjPu8m5ieGTacHawIOPwNfLMUL2Xm2dyQk1rK0Ui56MpIIr6urite+FXhrWZ7+/Fs8OoXYLCZJWUJJj7+0ccnk+vNAJ2PCwy9cde9Llj0p8Gna5LePp0ei3s2oQHZPEkJOw+px0+vSut0r4SeLdTVXvZLXSom6iRvMkA/3V4/MilYq5xzOqL87gfjVrSNI1bxBP5OiabNeEHDSgbY0+rngV7LoXwi8M6TtlvIn1a5GCXu+Uz7RjjH1zXbQwxW8KwwRJFEgwqIoVVHoAOlFhXPLNA+CykpP4n1Dz+5s7QlU+jP8AeP4Y+tel6ZpOn6NZrZ6bZw2kC9EiXAz6n1PuauUUxBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHHeI/hd4a8QM862x0+8bn7RafJk/wC0v3T78Z96801r4VeK9G3SWaxavbrk7oDslA90PU/QmvfKKAPlOZ5bW5NveQS2k6/einjKMPwIpfMDDOOPavp/UdJ03WIPI1Kwt7yPss8YfHuM9D7iuF1j4K+H7vdJpNzdaVKR8qo3mxfirc/kwpWHc8ejwzZB6VvfC7SxrfxMhmdA0OnI1wwPTI4X8QzA/wDAan1v4XeK9DkMtvAurWwPEln/AKwD3jPP5Z+tegfCHwld+HdFu73U4GgvdRlDGJ/vJGudoI7EksfpiiwNnoVFFFMQUUUUAFFFFABSUtFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB//2Q==", # noqa
date=datetime.datetime.now())
session.add(catalogItem4)
session.commit()
catalogItem5 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Dress Coat",
description="A woman's dress that resembles an overcoat, usually with collar, lapels and front fastenings similar to a coat", # noqa
picture="https://www.suzannah.com/images/product/zoom/couture-princess-coat-navy-blue-twill.jpg", # noqa
date=datetime.datetime.now())
session.add(catalogItem5)
session.commit()
catalogItem6 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Shirt",
description="White pique wing-collared shirt with stiff front", # noqa
picture="https://static5.cilory.com/235372-large_default/french-connection-white-formal-shirt.jpg", # noqa
date=datetime.datetime.now())
session.add(catalogItem6)
session.commit()
catalogItem7 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Cufflinks",
description="Cufflinks are items of jewelry that are used to secure the cuffs of dress shirts. ", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUSEhMVFhUXFhgWFRUVFhUVFxgVFRUXFxUXFhgYHSggGBolGxUWITEhJSkrLi4uFx8zODMsNygtLysBCgoKDg0OGhAQGi0lHR0tLS0tLS0tLS0tLS0tLS0tLS0tLS0rLS0tLS0tLS0tLS0tLS0tLS0rLS0tLS0tLS0tLf/AABEIAJ8BPgMBIgACEQEDEQH/xAAcAAACAgMBAQAAAAAAAAAAAAACAwEEAAUGBwj/xAA7EAABAwMBBQYFAwMDBAMAAAABAAIRAyExQQQSUWFxBSKBkaHwBhOxwdEHMuFCUvEjYnIzNKKyFIKS/8QAGQEBAQEBAQEAAAAAAAAAAAAAAAECAwQF/8QAIBEBAQEBAAICAgMAAAAAAAAAAAERAgMxEiFBUQQTIv/aAAwDAQACEQMRAD8A8dqxnXPLwTARk5mcEx6e5SnDQWjqLhMpERc343ONBwC6xypbo4BN+YYkDhg59woJ3uHs+o92UB3GD0PhNsoal8yJBDuA/lNZJi+DwvEHXzSnOnPrx18UQJiJ53/P49VUDUbGPce/VPaMY0JnSc2yI95hKeDrBwbHpkC/0RU2SLgmNBoOYyiM3RJtMdbgXJ4+9E1tSMgzOB1wOHRB8vFo1jPToSjawaX93jl7MqiKbifxiSL248b/AGTXVSXAwCeWs6wLH0kzzUOAnDTbT1xbyteLo8WmNRzm0n3+EQQcIvmMcHE/WL25AQus/TPtx2zbfSkgU6x+TUmTO9+w5yH7ok/3OHTknHegXOmDJkcJmb40EImVIuJBFw4SI4R9ihK+sKjUlzFU+D+2G7ZsdGuMuaBUFju1G2eDH+4FbdzFxrqofLWCmrbmoSEXCAxFupiEqKiFKiVCApU7yFYgJ1xBXA16cOcwiQHFt+AJj7LvVxvatICtU5w7zH8FcfPP8vR/HudEbAYsfCcxzWyDhhaljIPv3qrtI6LyR7OlwdVDCOCW10ZR/ObxFlpz0TeiVtLgL3B8B9UNTbmaQSqNavwjyQ9ibXJN/VHIEW6pNKrbAjpCI1tCfMR9Ua0zpcdMKWXtjlFiEojUHrBKF7vHhx8LIumhwB7wgnWZH8YStoDY14GMzz5XVSrtd+8M51QN2wC0/wDEkzbgqmpdM7pPeGOY5oIadIPX3KRWq2seh1BVSvtPnyMKLK8bqEuz/m0qaQ49L48ff84wkiw6nPgiaySAPPh1819V8dDxj7c+eqgdPHkfH3zTHNGRPU6xqeaIDx43ubnHhHmFcTSidOfv7IgYNwDiQbeunTzRluZbjXERi3O3MqG+/tnKJoXAnIidTrqDzmdNPNG1pkx1HlczgdVLHG97GcYk6kDw8kbXWjd6xwB/JCGhLI4nWTx5hMjeBJxPOOkXve2gQTAgD2DhM3jb8zxMG9vsiMa0RbkY1zGnh0lTEEkg2zbUmwMYEm/FCXd2BbXSTiJ4wfyipyNdNAbDx6RzQMc0ZHqZEkaHw/krKbABgkiD053zp0lQ9pIE2EW4e5myxoxG8dPGL38/BB7V+g3aO9s9fZzALKge0a7tQevea7zXqLl8/wD6P9oml2kGOxWY6naI3mw8EzgQ1wjovf3Fc+/bpzfopyWUbillYb1BQlSShlBixCSolAcrJS95C56Gmly5X4mcG1WE4LT/AOJt9V0Jqrk/jh3cpu4PjwIM/QLPk53mt+K53Fd20DKs0a4jVaTZanEq8zaw3Wfei8OPfevpsTtJF93zKrVdtnEKhUrONwDHM28kLHxciFUFVquJQ0c6qw2mMot4DggbSjOPRFUeBlUam1AKhtHaUJFutx8wDl0SK21ALQ1O17KjX7XVypre19rC1dXaoyVpa3a3BUH7VUf+0E/TzWpxal7kb/aO0wBcrTbV25eAVVbsr3nvO8Bf1wtls/Y0DEfXzXXnxOHXm/TiWQIt4XB8EUa2+8+wsotGY8DYe/4TTA0I94le54AmTxjX8+iiB1ymDr6xH4GUE8vD+ERj6ZkTacAm8ex1QRpEX9iE5rWkyedjfhg+8KCZvGOsAeylEFo1N9TEjpHhnnhQ1uk4n3CYXze29x4mcnnzQti82t90E8o/I/OueKyk2Ymw5n3ZHMieAF/PXUc+aERwM36Y/PnKBgaYMxpkY1sdM9EtkZHDS9+iZuEgSM3aJgnjFuWOiHAIAxxziCPQaIGUxEuLp+50+x4rA8XtY356+t5UFoFiQDbOB14ITMa/bnJ4JRs/h3aRQ2rZ6s2bWY4kT+3eh2ud0m3VfUm9ZfJR1iZ48yvp74c7RFfZKFYGd+kxxOLlom2l5ss9t8Nm5yAlCXKN5c20lDKFz0t1VAwlLc9V6lZV31lcFx1RJfVVN1dKdXVxFs1Vzfxu6dnPJzT/AOQ/K23zFpvimrNB3gfUKdT6rXPuOboscRew4ytjRpgDJPOP4Wup7XDRj6oK23mLXXgyvoNy/aG4QVNoYLW8lzdSrVzhU31HnLinxWOnO08DHvgk1dvHFc6NpdgSVLg85gDmfsE/rtW+TmL219p8Fqa+3E6pvym6yfRX9m2RsftH1XXnxPP3540vePFQNmJzK6I7KpGy3wus4cb5LWqodnckxmx3grf06FkhlHvLWMaRsWxCcLafIRbNRurzqS1EeKURw9cYuYx4qajfLnnl9VLWmD5/wUNU6Rce4XZwZGvO0fzomFpBtzgjFvDkhb796o3mToBYCY5C6GotGTJnjjrxlC5s4OeP09MhGw/0i+keUwRhYTvEbx8BoBm1rwqgXyABa8ybGdIP4QOHHyB1vroFh6e+ajFyD/nJ9yopgmxGMW/pzJI0xlYagMxy45GcGEDBr+L+akkiY6GOEXsiHS0xJOsjhiN3j0+qESSYBiBYE4EQsc4W4xi/E88yseOdjPDn9eSozeFp/wAm2nWcKBeSSJ689FkQAR5xbJt6qCeo6fz76qKMPNzacE349I9he3/pH2iX7A1hJPyqj6emLPaLcnrxCCAIxx58DPTBXpH6N7bD9ppWiGVNZm7XDpZvPCln01z7eumqkv2lVa9dayvtJWMb1ta22BVam3LT1K5SvmFXE1tnbYku2pap9QpZqFXDW2O1IDXWra4pglBsPmrR/FW1NFB5cQBa/VwC2LCuS/UWpFBrf7qg8g1x+sKdemufbn3fEbQCKbC6NXWHkL/Rabbe1KtRwl5HAMJaPT7qts7bFSynLh1XKcyOt7tbLaXO3Wy5xni4omVHggBzgOElWq9Cd0JtPZ++jOrux7OSLz4q1UowFc2WhACfWoS0qYjge2u03NdujRBsPxE8ABZ25sn+qZWp2WhLrcVqT6Zr1Tsl/wAymHK2aKR8OUYpBbV9NFV2ssk0qfeV8Msh2WjdEM2aldXH0UWzUrq66ktD573jwxxv1N0txn0gDSI80w/x788oBj09cErq4pYLE2JkXzBOpRkWnHnrwQU37pkTMaWufeEdSbWtbGJj0NlYlSGyeZjNuOPTzQuJiTrYcY58lJpyQ1o0xzsD6hC+lxvFjy08vRBjiIIuPp48EOOMyPDj1RlskyQPCB+ZshAnW+oPAwLcUBAaEcb6jA8cYUFuNB9ZMEn3ooYB+7MSYn7hSb5ERr9ZHFBLTpYifveCBbCwZuT5aTCyk2ZjJxgfxPJSCQ28ek9Ol9UCmO0gx7zysiLe93rnX3HkpB558LRe+n8KHWHPT/bPvCisDnTwgQYnl6Z4rrv0rr7u37p/qpPaLzcFrh/6m+q5Jp52BniD1/PVbP4V20Udu2eoCYFRrXG1xU7jgeQDjfoiz297qEkc1QqtV+oYMKpVCy3VCoECdVSHFVC3hYGqQmspkqAGtTG01apbMrDaARVEU1ovijsn5+z1IHepw8eu8PJdeNnOgR7F2eQ94cO69v0mfqpVjwvZdisjbsneHVdRV7L+VUqUiP2uI8Jt6Qk//D7yxjoS3Z7t6IqNLvrbHZrjol7Ns3eKYNjs1KwVj5CPZ6dlbaxMZrnNv+HGVDJVbZvg2m0gwuuZTVptBMRrtl2MNAaE+pSWwZRS9opoKLadk3Y6KYGWVzZKNlRGy0rq46mo2andWXNUHzLXOnueQ8NEoEZxGPwf5RvdkmPLySxfl7ESuzisUXD0tqPLgjIzfM4vvXOuuiVPT/MH31TSy8SAdfT34KxACpfHG9p5343WPdAz4YtgzzQvJPK59Yv7wpMYmxETz+15uOKCL2mYP92sH3jCwu4CBrrkwB0QubJ05z6o2TgjGmCOc6Z6IoQ6Dm/LjOnuFjuBxPKOZ62UA6AAx7kBZrJI+u9y638ERAgmI4D7KS2+etzaLQZ054Q03A6T448fyjJAnjoLjrveg1UULZLrRbiBYDKINtOvlwEx9xcLAffG/wDnGFhcPHic8AAceNkGbskydZniZxwn/KirYS3S4zYgyOsH/JwoYdPt+c4wmVmk204Z0mx163RXv2xbWK1GnVGHsa8X/uaD90b3WXP/AKZbX83s9rSRNJ76ZiMTvNt0cPJb6sLqNqdd/BJ+S4rebD2Rvd51mi5JsAOZOFOzdtbD88bPSeypVgmxlg3ciRl2scAbrF6xZy1+ydmuOi2tDssrV1/iyts+8dppbOGMqFrjLqbjTcR8p9NrA9xmS2CMtze3YbLWa9rajDLHtDmzYwdCNCMKfJcUKXZnFWqewNGivhYSpq4UygBog2oQARp7KcXJbyVFcF8ZbEBXbUGHtv1b/BHktBSo95dp8V0ZY08HW6ELmaNO5WoaEsyo2OjlWAyxTtjpWVDqbLJzWI2U7J7KaiUqjTV1tNRRpq4GIhTKar7WxbBrFW2tqiqTWWWw2anZIZTWwossrUBRZdOc1ZSbdMeFB8r7QbwipxMkxAze3KMpbzE+/OfoVjL3NowM63PRdnE5tS8iw6BYXiQQNbiZBMW5xyQGIgfnB/nVEBESJ5nmcD8qoAuJMkwf599FPzdQBPEQOp/myyoQLHxH0vx9yi3oAA5YzPP3CBZAA96o3Oc60yPtcxxjkl1HX0PnH/15Kd0f3Wxf7cfqorHiPL1AjKkU+OfZudPVQ5xn2MHXn6LC8A45ePPh4IC3Ztre3hp5Z5ImtEwMcenLhyzfwQh2pvPDyFtOnBEyqbm/AngOvOYkyggtE88Hz/HHlKY1nIiM3xzn30SyyInrGLXPvMymbpIxbgPEWE3HIJAFPNotxxGvdj3wR2ifvknUHwyELvDzsbZ6zfiITCwa5i4sL210MaHzRXf/AKN7f/q16BNnsFRvDeY7dffiQ9vWF6rQ2KbleEfAm3Cj2ls7ps53ynZxUBaBf/cW+i+hX05YQDBIsRodCsdN8uQ+L9sLqj6bHOb/APEc2ruM3XGpTa0CoTS3hffMbzuoWib2dVG0MqbHSY0VR875m7Lt4nvhz3SKbRqGAEybq18X7e3Y9po7eKQLa7vlbSNA8Ddqtdxltx4rY/Cr3U3bRs9QyxhNTZ6liDReA4C3AEaa8ljl0N+NezmMY3bQ2RAp14GKRcO/iTuOh3gqX6Z/EdWpU2jYdoaG1KZL6W7MODTu1GtnNgHDiCuk7L+INmql2yh/zC5pdESCLBwvyODzXDbBSqu7T2djD8t9CruFrZf/AKdyN50SAaLSL/2jjclesU6iPeVWk8GSMEmOk2TwoaOUJWKQiqXamxipTLddOowuGbTgkHIseoXo7guS+Idj3am+MOz1C1KjWU6dlY2OmipUu6rWx0lQbKVkbGKy1llgYpqhosVoMQUmKy1qlZC1qqbS262IaqNcXUUtjFeptskNarbBZELpi6N4UsCJwQfJjnXsep+ywPEfjJ5f5WEWtjXppdZQgSbe9Qu7kyJvPvkOSJhdFvO/S/8AhHUjM34a+P2QNcIOb8NTpI19ERm7HLUGbeCljJFyAPCeNucLHETAv/uuJjgOH1QudbH58+HuyCHj3kcBHsFHTaNb8Oca9ENOk5+Qd0cOPU6rHMLLWI8DecSMacMouGOAEe4Ik+eL4vBWUxAkAZzcjpf1m5lLcZN5nyTGzpIi034X5+iIkj6c7Rp5XjRNdcCDgZwWmRJMZ6j0SiyI8xPXSMiR5i6IMByeMD36nlZUSb+8nUgadOaY1zRczGtpjAAbiehhA+IsRnMkWzk9JvfmUYp4sd7UYjh4+fQqAHNkzpPPnknXnqmtf4kzbSckwOuL+Epe7gCeWpAzgTHhbJGE7dsABbjkE8o4crzlIFPeW94G4Ie0zEOEEHrMGRwhfTfZO2itRp1m/tqMa8dHNB+6+ZiJtm97g3+5m2l5vZe4fpNt/wAzs9jD+6i91Ig6Qd5vP9rh5LPTfLbfEvZLa9N9Exu1gN0nDNoZek7kD+09Vw2zfEW0bI0HaqX+tSBpPpkH/ovMte2f6mkOE8COK9Tq0g9pa4SCqlTYauG13FvCo1lUj/i54Jlc8bed0ewK7NsG0UX06Oy74qCo+xcHTv02NHefki1odmy7bYdhZ811alSIe9gY+u+z3M4MZ/SOZur7OzGA7z5e+3eed4yNQdPBXqaAqTYEJgQhEijClAplRRKj2nsoewjy66JXafbVGgJqPAOjRdx6AXXP7R23tNe1GmKTD/XVu6OTB91UWqVLuq1sdJRsmzlrAC4uOrjAJJyYFgr+y00tVDaakU1Z3UJaomEsanAIWBNAQiCFReLrYPwqbRdCiYFZaLJQCeMIQtgUvUtQ1Sg+TCwnPgPrI+6Dd9/XoiFXhnN75+qkWM5OfPrrfIXZyE1jYbJLZJxczoY4GwzbgpNEhxaZPDQnhHJZUbLYAuI0EmwNyMnOmmqmuSd03uOehvy1+qqJAkG0RwsJH9MffSEsnPp46og22c2N40nXT0WGY4c+v5/PBBhB3d0GYM6mOHQ/lT83epCcAmfEC3oP/wBKKDolsCHanTd71ibT9lsdn7Dqhu9UNOkyo2W/PqMpGTgBj3BxFhcAjqpbiyNXvYESdON9B+Posa8ajjcZ5zx9FlalumJY/iabw8eGqOkJuCD9bTB5xHgkulEx28RgdbX0k9PDqUQfEEiRrobi1/eqyJA9yPemvDVMZBFjfqZ8NfEX6KsgqXuLW0m2oiL+PO/BTRa7DRJ4cxMx01iyZVA8ZvH0tr0i0zdS0d2J8MSdIOoxbSOcoBLdTJ8RHCTb8EJku0IFwD/TN+JsDHpqpqCZvOO9xtGuk6n6KA0ECZEfYz1Bv6iCgEy0EWB1mx1MAGwtoZEY1XpH6J7VFTaaNoLWVBmbEtPhDm2XnJdHIAeUEXPC+uniV0n6cbZ8rtGiZAFTepHAnfbIFrTvBuOGJIUrXPt74xGEoORyuddBKZQysLlASKVz/b/xZsuyD/WqtDokMmXmODRcryv4n/VitVmnsrTTbcb743o5NFh4z0TDXsfaPbtKjYkuf/YwbzvHh4rQ7V2rtNb9pFFn+3vVPPDfCVzvwYHP2VlR7i57xLnHJMrpaNOyuKHYuy2M74EvOXuO889SVsGU8I2N7qNowgthtlZ2cJMKxQFlkgyEJCYhKKWAmNCEBMCAamFVpC6uVhZVqARBpuiWjRQgpdYoglVlUr5SmLEg2gdZ04FQ2prwBv799EdSmC4WIJg+Y4Y55SSPK8n8Lq5DNRufXA0tEZ8fFWdn2So6kXtpPcwOMODHEccgWHOcwqez7buO3gxrnx3d8bzWcw02c7m6RyUbZ2vtFX/qV6r+Re6B0bMAcgs3qtfEdwAYImYm0xZ0HW9ljqgjOtgIPlw6joqG8VaovkHkPtE/VWUvI2bWWEOae+P2kidyP6gMb3A6Zyq9Rxc7ecS5zjcuJJJPEnJQSm7O/ddvEbwH9Phx0WQot8E3Zal4Ov1QeETfM20TaTYLJi5J8MfZIL2768Z0AmY8zrHJMbTmbxHTOdf6o6RqSsB4aRnjNic5OmOKJrg3nBi82MdeWMLowwUotm8THDSPspAnJOI5nlPDF7zoETBv40AOuJDZJ4yR00QNqwIzm3ACziLemDwRDSNWnxmDJAsScWJ4jyQsp8RcxEDS4mR4+uTZSakwTb1HeOeOtzk8lJccC83g88XzedCMGUVlWnawiQS0GZyb2ze2kyAU3Zdo+U+nUDZ+W9tQEWncdvwLXPdI43OEjJvbNxBuJ0xEkDSxsiLSe7Ek2z/ugCYvcZKD6WZUBDXAyCAQRqCJH1TnOXH/AAN2wx3ZlGo+Wik35TiRrR7sgN0gBcj8T/q3mnsLJ0+bUBiZjussT1MdFzvt1emds9uUdmpmpXqBjRxyeQAuTyC8k+Lf1WrVZp7GDSZrUcAah/4jDR1k9FwfaPaNbaHmpXqOqO4uOOQAsB0VYNT4sXr9Mqvc9xe9xc4mS5xJJPMnKGEZahIWmXt3wD/2FHp9yulpLmf07/7Cl4/+xXT0lmus9L2gRNyFjkQFwsquKxRwq6fRwoQxQpKgooQmNSwmsQBtDrJVBTtBWUET8iKIoXKThFAEqsmBIrm6qP/Z", # noqa
date=datetime.datetime.now())
session.add(catalogItem7)
session.commit()
catalogItem8 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Gloves",
description="Ladies' evening gloves or opera gloves are a type of formal glove that reaches beyond the elbow. ", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxISEhUSEhIVFRUVFRcVFxcXFRUVFxUVFxgXFxUXFxUYHSggGBolHRgVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OFxAQFy0dHR0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLSstLS0tLS0tLS0tLS0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAACAgMBAQAAAAAAAAAAAAAAAQIEAwUHBgj/xABCEAACAQICBwQHBgMGBwAAAAABAgADEQQhBQYSMUFRYRMicfAHMoGRobHBFEJSYoLRIzNyQ1OSk+HxFRZUY6LS4v/EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/8QAHhEBAQEBAQADAQEBAAAAAAAAAAERAiExQVESA2H/2gAMAwEAAhEDEQA/AOoCEUlI0BGICMShiEBCEOEI4BHFHAI4o4BCEcBRwMIBEY4QFCOEBQjhAUDCEBQjhaAjFJRQFaKPZ85whWGEUCbSIleSEwmoJxH0nabxYx7IK1WnTVaZpLTqPTXZIF27pG022HF/y24Smu57UYact1B15eufs+IYdsBdW3dsBvy4OBnlvFzlYzoNLGjjMf37la/lsbxgyqa4tPB+kLXirg2oJh9naZmd9tdpWRLDY5i5beMxszTLo95KaDQmnlxNGnXTJagBte5U7mUnmGBHsm7pveSdatjII5ESQmkOEIQogY4QFCEBAIRxQCEIQCEIQhQgIGFHnz8IQhAXnfCHnfCBRxhbYbZNmsbHkbZH3z5p7TE4euxNSrTxCt/EbabbLbyWN++Dvubgg8bztHpM1lxGCp0jh1Qmo5VmcFgtl2gAARmbHP8ALOU6V1kXG2GKpKlUZLVpg5dCpOa/lJ8CDHmes769bqx6Q2qWpYiy1dysMkqdLfdfpuPC26PW2kmNp7I7tandqZPE8UPRrAdCAeE56mE2j2bAX4HgR+IHlNlgNLPh6gSsxemcg+9l+rAct/LlM+z4WyVUw9Huq4JDZMCCVKkZixGYInVtSdPfaqNnP8allU3DaX7tQDkdx5EHgRPD6f0cEtiqPeo1M6mzYhWbdUH5W49c+M1mCx1TDVkr0jZl4Hcyn1kbmp/Y7wJnqb6683ZjtGJxBVTOP+kTEl8TTJ3Cns/+TX+azp2j9M0cZQ7Skc9zofWpt+Fh8juM5dr9R7wflcexrfUCTjzxjr9er9E+lxapg3P/AHaV+IP8xR4GzfqblOp4StwnzZorSLUalKuubUmV7fit6y+0XX2z6EweIVlV0O0rAMp/EpF1PtFjJuVuzW9UyQMr0XmYGdZXNkEciDHKGI4o4CjhCAoRwgEUcICvCEIAIQMIBFHCAsuUJGEg0Ws2haeMoNRqjI7iPWVhmrKeYP1G4mcA1k0DVwtTsaw35pUAOzUXmOu668PCxP0owmi0/oiliKbUqyBlOfIg52ZW+62e/wCl5ue+MXx870MQUsr3FvVffs+PNfPh0XU7QtHE4Z6lZadVy4VRYmmuzZyWcjInIWGQBG0bHLzGsegXwlXsqnept/LqWsHXl0ccR7dxlfQmn8Vo/bGHZWp1CC1Oou2lxua1wQ27MEHIXmbz9NS+bHo/SFU+y7FLCp2NPE0WapS7rIFYgB6a/wBmW74sLL3bgXuZ5XA3ZLNwyueXCZnxjYqo9asSajHvcl5BRwUf73NyUwKzOfrU/wCIYerVoVBUouUccRxHEMNzKeRmfT+mFxNMlhsVAMxwJGd1PjwOfjvlWpUvMDpeYvldLNihhXytOxei7TQq4f7Ox79DIdaRPcP6Tdell5zkr4e2a7/nNrqfpb7Pi6NQnZXa7Op/Q/da/QGzfpkvrM88r6Fw1SXVaalGl2jUl46SxeBkgZhRpkUzoyyCOREcqHAQhCiEIQCEDCARRwgIxw8+fhFAIGEICvCEIGvx2KSlTerUbZRFZ2bkqi5OXSc+xHpUwm0R2OIC7torT3cyocke6dB0jgkrU3pVBdKilGHNWFiJyrTnopqpdsJWDj+7q5NbkKgyPtA8ZYzc+24GsmBxiFNpKinejjqSCVOYI5754rWPVnsQatBu0ogFmViC9MbyQfvqPeLcd80GldXMRQzr0KlIA+tshkB/rW6j3zXu1TZsarFDw22IPsvaa2/bM5+42VAAZjcZKu8qYNyo8JkapeZrrKLRGSvFaYsalQvK+Io38fmOszkZyBmPhryu16i6Y+04Sm7G9RR2VTntpYXPVhst+qelRpyP0VaR2MRUoXsKtPbA5vTI3dSjOT0TpOsI0xfKjYUnllWmupPLVN505rNi2DJXmFWkwZtlkjkLyV5QR3kbxwHCKEBxQEBAcIQgEUcUBbRhJWhIK0RWShKipXwwM85pvU3CYhWD0UVm/tEVUqKRnfaAzz4G4nriJBkmp0zeXGdP+jirSUvhXNYAZ03t2n6GUBX8LA+JynhnBBKsCrKbFWBVgeRU5gz6Yq0cp5fWzVCjjV73cqj1aqjvDLcw++u7I7s7EXmslTbPlxJTJAZy/p3QWIwT7NdO6TZaozpvyseDflOfiM5QQ5jxmManSFQZ25RVFzA88/lM1Ib25Z/tEqZXO85fv7z8hMWOkrHgcS1GqldPWpuGAzANj6p6EXB8Z3XQelqeJopWp+q/A71YZMrdQQR7juInDHW2U32o2sP2StsOf4FUgPfdTfILU8NwbpY/dnLqNu0oZYpvKdMzKhmZUsbBHmVWlJHmdHnaVixZvJAzCGkw00jJeMGQvGDAkIRXheBKEV4ShiOKAgOKEDAjCT2xCQVxHEIxKAQtHCERImKpSmeIiXUxqsbg1dSjqGRsirAMpB4EHIicy121Ep0aT4rC7SimNupSzcbO0NpqZOa2BJINxYcJ1yqkqsls/Gbl1izPh85JUDABTe54TPsXPQZfvPfa56khWqYzChVCptVKIU98gku6EGynZsdm2ZU8TPAU6tzlmDmD45zNjfN1F7FrD3ysy75awg73vPxtPQ6j6tDF4g7duxo96sdrZNmVyiqbHeyi/S/Sc7zrrOsez9GeHxRwu1iCeyJX7PtAlyhAN78aZ2hs8cm4bM9VcXyNxzni/Sfrh2AOFw5C1WXvMALUaZuQoFrB2HDOwIPKazULXddgYfF1LOLCnUe9nB+677gQbd5rXuBvFzx6n3Fl/XTFaZUeVA0yK0kpYuq8yq0oB5nR50nTNi2DJAzArSYab1Ga8d5iBkryonHeQvHeBOF5G8d5Q4CK8IBlCLzwhIMYjkYxCJQijEoIQhAi6zC6yzMZEsqWNe65+/6WvOGa0aD+w4spa1FyXonhsHet+aHI9Nk8Z3qovnwz/aarTOiKOJTs69MOu8XuCrW3qwzVrcQefCb+WPiuHYKizOqKNp6jBFHibAnkL5zrWk8Smh9GhFN6lrAbR7+IqAbRA/CPWt+FeMp6nak/ZsbVrsS1OlcYa97ntBYklSLlQXp2IN9sNwnite9NHG41wrE0cPemg7wBcfzH2Taxv3d25Os59fjpK8hiC7lnqMWdiWZmNyzE94k8zcxbAYWltqeR9vwX/wCh7pEpsqJix0j23o21nZSuCxBJByoMeBt/Kvy37PK2zxFumU2nzt2rBldTZkIdSPuspupHgQDOp6la7fa27GsqpWtdSuS1QM2sD6rDM2zyueE5dT7Wfj3YaZEaVVf6TKDJKLatMitKatMytOkqWLQaTDSsrTIrTWsswMleYg0d5pGW8YMx3jvAneEjeF5RK5ihCBCMRRiQOOEIBHFHKhQaOBEDA6/OV6q/Pz8PlLpEw1EmpWbGu0oz/Z6i0h/EKuF/rK9298reeU+fq+DfDVHw72DU2KMRuNrWI6EEEHrPot6eU5t6SdUnqk4vDqWewFVFF2YAWDqBmzAAAjeQBbcb3PtP6xzwt3bAZn4C97+60WJXcDwFz+0DUFwRmDb3XzmfEJm3Uge/L6zOOkqhUokAeA+QjwOJahUp1qZ79Nww5G3A9CLg9CZYxS7THpK1RMvb9JzsdHYNVdbKON2lVTTqJmabEG63zZWHrDhwIJGWc9IrfCfPeCxdSi4q0XKOu5h8QQciDyOU61qframMXYYBK6jvJwcDe1PjbmN4vxGc43nPYr1qmZFeVVfz59syholRaV5kVpUDTIrzcqWLYaSBlZXkw83rKwDHeYA0mGmkZbx3mINDalGXbhMW1HAzCMRCEIlCEBAccQjgBgIQgKRcScDKKzLKtRN9vJ4S84mB1mpWLHGfSXq0aFQ4qkP4NVruB/Z1TvPRXOf9RI4i/nhmwPUH4TveKw6urI6hkYWZWF1YEZgqd4/0nHtc9W2wNQNTBOGc90m7Gk2/s2PEcVY7xlvFzbDm/VaJlzbxPzmGuvdHUnz8JYvmepv75GtTyX9R+MxZ46y+teRvmNWZSGVirKbqykgqRxBG4y1WSwHU/KYGE5WNx03UzXQYn+BiLJWAye4C1bb7D7r2z2dxsSLbp7NXnzvUpzompeu4IpYTEDvd2klUW2SALIKlzcNkBtC9yRunPrn7iukh5MPKavJ7czKLgqSYeUu0khVnSVle25IPKPbSQrTcrK92kNuaLEawUKdVKJqA1XZUWmpDPdjvIB7oG+5tuyucjuRTNs5rVnOsvaRSOx1hGr/FXxHIiMSuaUcjHAlCKOAXjMQjgEIo4EWmJxMxkWEqVUdZRx2DSojU6ihkfJlO4i493iONptGWV3WblYscN1p0G+BrbBu1J86VTmBvVvzru6ixG+w1yNcAdT8c527TeiaeJpPRqi6sL3G9WF7Op4EXv13biZxfTGiquDrGjV8UcerUTgy9eBHA+wlYvPX1VWpT+H7yo1M3l4G8hWXiPZ4zFjtK11ReUw1Els087TC63+k52NPW6r69mivZYss6KO5UA2nFtyN+Lod/A9Oh4PHLVppVQ911DrfI7LAEX5GcGqLfw8+f95udC604jCjZB7SmFIWmxyQ7wQwF7Dle1id059cfia7J24t/uZA4i05yfSJkAKJLWFyGCrcqC1gQTk20PAA3mlOsOLrXDVmA4hAE39VF/jHPHVL1HRdNa30cPdf5lQfcU7uW225fDM9J4LT+tOLrggv2aX9SndR+pvWb326TXrTtMeIFwR0nbniRLrNqtiezxeHbgK9K/gXUE+4mfSRp2nyxRYjMbxuPUbp9P4HE9pSp1Pxorc/WUHfxl6X/ADZ9nzeEjfr8RCR1WYxFGJXmOOKMQHCEIAI4QgEcUYgEUcIGNhMLCWDIMssRTqJ5+c02sWg6WMomlVFs7o49am/4l6cCNxGU37LMFRZuVixwPSmjauFqmjWFiM1YX2ai3sGXp03g5TAc52nWfQNPGUjTfukG6OBc03ta/Vd1xxHK2XF8dgqmHqtRrLsuhseII4Mp4qeB+oIixrnpXqLllx8mYWTlvOQ+sskzE9yb9LDpzmLHRUdR9B9TK9dcvHyZaqCU8S3x+UxnrVvivSW5+Pu8/GbTRvqnxlSillYnov7yxh2soE25yerbNMTRbUTmHRRGRI6z6B9H+K7TR9A3vsp2fhsHZ+k+f6vreInYfQ1jdrC1aPGnV2h/S4B+YaOj/O+vebB5/AQk7xzD0auCMSMkJp4zEcjJCA4RRwHCEIBHeIQEBxQjMAMiRGIGUYmXz585zE4lhhIMJYmKjJPO62atU8agDdyqtxTqWvsk/db8SHiPaM56grMJTP5eP+k3Kxj550jhKuHqtRrpsOu8bwRwZT95TwP1uBWLzu2sWr9DGU9isuYvsOMnpn8p5ZZg5G043rRqticEb1Bt0ibLVUHZPIMPuN0OR4EyWN89frT1CDv3Sm52nvwHyH7mZibAknw8Zhp5ZWzPkTGNVkrblT2n2yYMwPlvzY/KSUwsrLtQvIRgy4axYgZg+ye59EGP7PGNTO6rTI/Uh2l+BaeIqi4PTOWtBaROHr0q676bhvEbmHuJ98VJc6fSWyeZ+H7Rzyn/ADzg/wC+T/EITnsenK9sDJCQjE08qccjHeBK8IoCBKAihKJXiEISBwiBhKHAxQgEVpKKBjImMrM5kSvnz4y6mKz05hq4cMCGAIYWIIBBW24g5Hj75d2YtjKXUxzTTfoqo1H28NVNC+ZRl7SmCd+z3gyjoSfZulCj6JGA72NFzyoE/E1Z1nY8+esRSNMrlC+iIbzjGO/dQA8P7Qxt6J0/6p/8pf8A2nVSnn4yJp5y6mOXL6KafHE1T4Ig+d5aoei/CD1qld+hdF6/dQH4zo/Z+fnF2Ui48bhtQMAm7DhurvUf5tl7JewGqmDoC1PDUhxuVDt/ie5+M9J2fnz7Y+z8+7z7ZFxqf+DYf+4pf5SftCbfs4QZExGIQmWjkoQgOEIQCMwhKBpKKEAjihCCAhCA4oQgKI7/ADzEcIC5eeUIQlC8/ORP0hCA5EfT9oQgMRQhCD9v2gPpFCFEIQhX/9k=", # noqa
date=datetime.datetime.now())
session.add(catalogItem8)
session.commit()
catalogItem9 = CatalogItem(user_id=User1.id,
category_name=category1.name,
name="Shoes",
description="Step out in style in a pair of handcrafted leather shoes.", # noqa
picture="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSgsdNYm36qeuf2AOCkT8GETykunitDOwyIALGKzOA00a3KGKi-QQ", # noqa
date=datetime.datetime.now())
session.add(catalogItem9)
session.commit()
# For category2
catalogItem10 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Tshirt",
description="Look great and stay comfortable with short sleeves and a round neckline", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAQEBUSEBAPDxAVDw8QEA8QFQ8PDxUPFRUWFhUSFRUYHSggGBolGxUVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDQ0NDg8NDisZFRkrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrKysrK//AABEIAOYA2wMBIgACEQEDEQH/xAAcAAEAAgMBAQEAAAAAAAAAAAAAAQIDBwgFBAb/xABNEAACAgEBAwkCCQUMCwEAAAAAAQIDEQQFEiEGBxMxQVFhcYGRwQgUIjJSYpKhsSNygqLCJCVCRGN0o7LR0uHwFzQ1RVNUZIOTlNMV/8QAFAEBAAAAAAAAAAAAAAAAAAAAAP/EABQRAQAAAAAAAAAAAAAAAAAAAAD/2gAMAwEAAhEDEQA/AN4gAAAAAAAAiUklltJJZbfBJd5+K5Q86Wy9JmMbnq7Vw6PS4tWe52ZUF5Zz4Aftjx+UPKjRaCO9qr4VtrMal8q6f5ta4vz6u9mkOU3O/tDU5jp93Q1Ph+T/ACmoa8bZLh+jFNd5r226U5OU5SnOTzKc25zk++Unxb8wNj7V569oLVuVFdEdMsKGnti5SceHGc4yT3uvq4LueMv3dBz91NfujQXQfa6bIWp+klHBpe+rfWV85Lq70fHkDqTk7zj06+ErNNodpThCW5OShpmlPCe7wty+DXtPSlysx/u/anpQn+2c58lOXmq2bTKmmrS2wnd076eN0pKzdUeDhZHhhI/Sf6bde+L0mz2+35OoX3dIBsnlHzqV6KLc9mbUXFJTtqhTQ5POI9JvPjwfDBqvlPzw7T1eY0SjoKvo0Nyuf51rWfsqJ5vLDnG1e06Fp76tNXWro3J0q1TzFTSi3KbTj8vPUupH4+uDk8L29i8wNx8hOeS2uMa9qZvhlpaquKV0V2b8FwmvFYfhJm6tl7So1VSt09sLqpdU62pLPan3NdqfFHHdmFiK7Os9PYG3tVobOl0t06ZvG9u4cJpdk4PhJea4dmAOvAal5K89NNmIbRqdE+C+MUqU6X4yhxnD03vQ2hs7aNGprVmntrvrfVOqUZx8srt8APqAAAAAAAAAAAAAADwOWHKzT7Mp6S5uU5ZVVEcb9kl+EVwzJ9Xi2kw9y22MIuU5RjFJuUpNRikuttvqRq/lbzxUU5r2fBaqzq+MTytMn9VLErPTC48GzWPLDlxrNotq6e7TnMdPXmNSx1N/Tfi/TB+TlYB73KLlXrde38a1E7Y5yqc7lC7sVxxF473l+J4UrCm8VbAybxKkjFklAZG+4rJxl85ev+JBVgQ9PHsk17GFpfr/AHf4k5AEOmC65OXhwX4cSzt7IrdXgUcQkBZIlzSKYK4AzK0+7Ze1rtPPpKLbKLPp1SlBtdzx1rwfA80Jgbb5N882qqxHWVw1cOCdkMU6jzePkS8sR8zavJvlxs/aGFRela1/q9v5O70i/necW0co7xKsf4P1A7QBzlyM51tbo3GGolLWafgnGx5vjH6lj4vylnzXWdA7I2nTq6IX0TU6rI70ZdXg012NPKa7GgPsAAAAAAABg1+rhRVO2x7tddc7JvujFNv7kcscreUFuv1M9Ra8OTxCHWoVLO7WvLPq232m9OefaSo2VOOcSutqoXe1npJL7Nb9pzlOWW/MDFkwsvIq2BBBO8QBJJAAkZIyQBYEZGQDIBGQJIGSALAZAEkSJRWbAsng2/zB8o3C+zQzl8i2LupT7LoJb8V+dBZ/7b7zTx7XJHafxTW6fUZwq9RXKT/k84s/Ucl6gddAAAAAAAA0n8IbaX5TSaZPqhbqJr85qFb/AFbDTrl8rzSfuZ+255Nf022L0uKqhTp4/ow35frWSXofhr1wyutcfTtQETluyz2Pg/MvKPcUliS8Gv8ALKU2Y4PrX+cgWcSplkUYBAglACCxDAIBAAQSVAAACUSVDYE5Euz2kIpKfHCAyMvX1Y7+HoY2yYvs/wA5A665F7S+NbP013XKenr3+38rFbti+0pHtGrPg+7Y6XQ3advMqNTKUV3U3fKX66tNpgAAADYPG5Za56fZ+qtXzoaW5x/P3Go/e0Byzyg13xjVX35yrdRdan9WU24/c0fBkmaE1wAxxjh47Otf2GLUQ7V1r8DOn2ewx2zWOIFanlLyf4l2imknwfmZwMZKRfdJwBjwRgyNEAUSJwSSBRorgy4KtAUwQXwRgCCmSzKMCyeDHWu3s6/P/AWPgRFt8OztfgBkz29i6vFkw7yuM9XV2GRx7ANgcxO1/i+1I1yeIamudL7ukiukrb+zKP6Z0ocbaDVz09td1fCyqyu2H58JKS9Mo7A2ZrYaimu+t5rtqrtg/qzipL8QPpAAA/Dc9Gr6PZFkV1220VL7am/ugz9yePyp5N6faVHQalT3FNWQlXJwnGxJxUl2PhJ8GmuPUByXJ8e78CcG2Nv8yGohmWh1ML1/wtQuis9JxTjJ+aia521yc1mieNXprtP9eSzU33KyOYN+oHktHz3cPJ/cz6pJ+ZgtWU17PMDFon8p+R9zXBH0cn+T9uoo1eqjwr0tVcp/WlZZGKj6R35forvMElhICsesq2Wr7X4FF1gS2VyWkVQABgAGABUMCQENGFGZMxT62BS5cPUmK4JLt6ybq5bm9iW6pKLlh7qk02k31ZaT4eD7hUBniiyiVTJ3vQC8jobmI2079muiT+XprpVrv6Gfy636Nzj+gad5Lcgto7RxKihwpf8AGL81UY74trM/0UzevNvyBWyI2t6iWotuVSsaj0dUVXvtKMctv574t+iA/agAAAABWcFJNSSkmsNNJpruaLAD8Tyg5rNlavLVL0lj49JpWquPXxrw4P7OfE/A7U5iNRl/FtdTNdkb651v1lByz7Eb0AGtNgchZbO2DrNNdKuy+6nWWWyrzKCfROMIxbim0lFPiuts0FqUdfbUr3qLY/Sptj7YtHIN/F+wCsViLMUTPbwivaYKwJmVRMyEBJDJIABAAVYYZLAx9pW3r9ETYLOx+AGxNg7AldyX1tkKp22vaFEq4wi5zar6KDlFLL4K232M/LbL5EbU1DxVs/VP6063TD7VmF95vjmIX7zQ/nGp/rs2GBz9sPmR19mHqrqNJHhmMc6i7xWFiK895mzOTPNfszQtS6J6q5cVdqt2xp98YYUI+eM+J+1AAAAAAAAAAAAAABWyOU13pr2nHUVnHkjsc48isPyz9wHzaqXErX1FbXlmVLEQMUmEQyUBJAYAEEsgCGSQwgKWrgVXGPkzJJcGYqe1eAHSfMNLOx4+Gp1K/Wz7zYprbmAf70Pw1l/4QfvNkgAAAAAAAAAAAAAAAADjvfzFy7+r14nYcnhZ7lk43tliuC7XGLfsA+eKyzNaNPHt9Sk2BRliESwIAAAgACGAwAMVXzseaMrMdnCSfkwOi/g/v96p+GtuX6lT95ss1h8Hz/Zt67to2peXQac2eAAAAAAAAAAAAAAAABh1rxVN/wAnP8Gcc2PefhhY8jsDbE93T3S7qLX7IM5Arj+CAvLhHzPnZmufEwsAgwQwJQYIYAEEgQwGQwJZS5cE/QsWispoDf3weJ52ff8Az+T/AKChe42mam+DpL9w6ldq1ucedVf9htkAAAAAAAAAAAAAAAADzeUssaLUvu0mof8ARyOSa1x8jq3lxbubM1ku7Q6p+vRywcqL+E/HAGCZQtYUAkglhACCSABBJAAglkAGXq6yjJh1gbo+Dlfh62ruelsXm+li/wCrE3UaE5gL93aOoh2T0XSesLIf/Rm+wAAAAAAAAAAAAAAAAPzPOXZu7I1fjp3D7bUfecwWLCx6vzZ0pzuW7uyL/rS08f6aD9xzVqpAfLJ5BCDAEkIkAyAQwAAAEBkACYviVyMgbM5kLN3a8Pr6PUQ9jhL9k6JOZ+aG3d2vpO5yvi/J6e337p0wAAAAAAAAAAAAAAAABr/nvu3dmJfT1VMPZGc/2DnXUPib85+p/uLTrv1e97KrF+0aBs62BjIBIBkEkAQCcACpJBIEMglkAQwGAP1/Nhdu7U0j/wCpjH7ScfedTHJPIu/c1+kl3a7Sex3RT+5nWwAAAAAAAAAAAAAAAAGofhAanC0leet6mxry6OK/rM0lYjcvPloNTbqaXXRdbTHTYU6652RVjnLei3FPDwodZqizZt666L151WL3Aea0Qj6p6Sxdddi84yXuMbpl9GX2X/YBgbBk6J9z9g6J/RfsAxkMy9C/ov2MstLN/wACb8ot+4DAD6Y7PufVTc/Kub/BGWOxtU+rS6p+VFz/AGQPhIPTWwdY+rR6x+Wnv/ullyb17/iGu/8AW1H9wDyJBHqy5O65deh1q89PqF+yYv8A8PWf8nq//Bf/AHQJ2NbuXVTfVG+mf2Zxl7jsQ5Bq2DrZLEdFrHJrEUtPf87s/gnXserj19oEgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/Z", # noqa
date=datetime.datetime.now())
session.add(catalogItem10)
session.commit()
catalogItem11 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Jeans",
description="A casual pants for daily use", # noqa
picture="https://www.google.com/url?sa=i&rct=j&q=&esrc=s&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwjcqe_4sMPeAhVCKBoKHT1UCRoQjRx6BAgBEAU&url=https%3A%2F%2Fwww.ebay.com.au%2Fitm%2FBoys-Ripped-Denim-Jeans-Kids-Pants-Trousers-Bottoms-Children-Fashion-FREE-BELT-%2F401415995742&psig=AOvVaw2z7SCtjUvZGQFykEWtoNO3&ust=1541717910057094", # noqa
date=datetime.datetime.now())
session.add(catalogItem11)
session.commit()
catalogItem12 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Shorts",
description="Worn in warm weather or in an environment where comfort and air flow are more important than the protection of the legs", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUSEhIWFhUXGBoXFxYYFxcVGBoVGBUYFhgXFhcaHSggGB8lGxcXITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGhAQGi0lHx8rLS0tKysuLS0rLS0tLS0tLS0tLzYtLS0rLS0tLS0tLS0vLS4tLSstLS0tLS0rLy0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAABAUCAwYHAf/EAEcQAAEDAgMEBwQFCwMCBwAAAAEAAhEDIQQSMQVBUYEGEyJhcZGhMlLB8EKCkrHRBxQjJDNicqKy4fFTY8IVdBY0Q3OTw/L/xAAaAQEAAwEBAQAAAAAAAAAAAAAAAQMEBQIG/8QAKxEAAgEDAwIFBAMBAAAAAAAAAAECAwQREiExMkEFIlFhkRMzQrEUofBx/9oADAMBAAIRAxEAPwD3FERAEREAREQBERAEREBUdIGwGP4Eg8x/ZaGPkWVttDD9ZTcziLeIuPVc/gsRZASTVeNxWP548ag8wtraoWXWhSSa2487wsvzsHuX0kcAsCO5AZir3eS3seFDnu+fFSablAJLXd6yzLUCswVIM5XwlY5klAZKBtHZ1GqTnpMd3xB+0LqaVpN4A1NlDSa3JTa3R570h6M06NOpiqbMn6ZjYDnOGTKWk9q/tuA+qtOANgu76b4cHZ9Zo+iGu+y9rvguB2ZoFyr2CTWDqWk3Km8+pe02AhQ8cpdGYUTFlYS9clLlRboReslh7EiIvoT54IiIAiIgCIiAIiICv6QY3qcNWre5Te4eIaY9YXn+ydstqU2VWmzwOThZzT3gyFa/lj2j1eA6sG9V4b9VvbPqGjmvEujm2qlCpkaM7KjmtdTmJcSGhzTudcDv0O6KpTxLBfClmGT3nAtz73BW1PDRvKhbMohjQJVm0q4pMeqC+dTwW8IQgIzqCx6uFILTxXwtKA0yVsBKyLV9aEAaFmTCxIWdN0oDCpVGizwLJJdwsPitb7mBvVhTYGgAblBBWdLGTgsSP9mofJhPwXmuyDYL1baVHPRqM95jm+bSF5HsJ/Yb4LnX64Z0bF+WSOjYbKHi1Lpmyi4hc1mxFZkRbeSKMnvJ62iIvpD58IiIAiIgCIiAIi116oY1z3GGtBJPAASSgPJfyw4nrMQyiLikyT/E+/8ASG+a4voNs1rto0c+XKwOqdqAJaIbrvDnNcPDuV3tesa9apWIM1HkgamNGjkIHJbmbNbRph5jM49o2Ikt7I+zn9e5YYSc5uXZHRmlCnGHdnq+GbIkQR3fipTW9y8cwNMNdLeyeLSWnzC6TBbbxLCIrFw914DxzJ7XqvSvYZ3RW7OXZnoIYvsLnsJ0oP8A6tLmw/8AB2n2lcYba1F9hUAJ+i7sGeAza8lohWhPhmeVKceUSUhZPIGoRjgdCCrSsxyrB9OLjyUjKvhYgNIMiVrqOgTvW3LB7io+IaS9rBo6879/wCgGWzzndPu6+O5Wi10aLWiGiB8eJWxCAvEtlHI5zPdc5v2XEfBe2rxerhT+eYmBYV6sf/I5Yr1ZgjdYvzNex0VB3ZWmut1FkNWmsLLks3kPKizhFB6yeqoiL6Q4AREQBERAEREAXJflA2llptw7T2qt3d1MH4n0BXWErzCvihiMQ+s72S4Nb3MByj8eZVFeTxpjy9i+hFZ1S4juVmFwsOzEaez3nQnTd4aqNtjaGUNoFvWVKmU1GezAc+WBhi1S1rmA6SCHLOhjIa6pUnsNc5zQb9ltOWg7jqO4uCqthVHVHOrPEuJ6x4aO1myhrWgmzYADhuAELRRoqnDT8merWdSev4LN2BdTdlzsfvEPp5o72hxv4SOBUygHA9ppb/EC371zeKx1TEDM99rkNAAY3uAG6IF5VnsRxzOgPAa3IWMdADyM0kN1khjZ/wB08sLtKdVNweDe7qpSxGaz7o6SgZC3FtoVHS2rUdUe9rh1Qe5ohrDDWmGEktLjmaJJ4lWX/U4GYlkDXsumfEP/AOKrfhtRcNBeIU3ymSGVXs/Z1Hs7muIb9n2fRYVsZWcL1nA+8G02kcwxR2bVa/2WtPg54/8AqMLN2LFhkH2yDyBpiU/i3Udk/wCyf5Nu+f0YUn41t27QreDmUHj1pz6qXT6S7Ro3cKGJbwg0Kn2gXNP2Qvjag4O/lI85C0169Maui8XNIX1/1F6jTul/kHWtnz+i7wX5QcG/s1i/D1PcqtgH+F4lp8we5dNs2oyoDUa4OBsCCCIgaFeSYt2HqVabC5xLg4tLKjbObETlEjWxDpJgC5Xs4IAmwETO7xWyGvHnW5kqOnnyGSwq1WtBc4gAakmAqLaXSimzs0h1juP0Bz+ly81zeJr1a7pqOJ4DRo8B8lUVbqENluy2nbSlu9kXu0+lOraAk++Rb6o38/IrnadIklxuSSSeJNyVvZhwFhVrALmVa06j8xvp0ow6TMv3So1S6xdUkr4+oFTyWpHxfVH68cV9TSz1g9VREX0ZwAiIgCIiAIiICi6a4/qsK+D2qn6Nv1va/lDl5zVxQpU5ykiNwJiJiY8xxgjUFdL+UGv1lelQBsxpqP8ArGAPGx+0uL2gMzn5TDTqIDhdwFgdNTPgF4pwcqmv02RZUmo0lDu93/zsWW0hTqUwDAa6SQ0AZi5wffxIvxzu7iK3B7QOGYXMpntdlr4zyGBrWtjs75Ahw320UahLKYYTOSQCd7QYb5CByVxTymlEuYWhpzSQM1RoObLoYzuPiO5bMIxZZz7Mawve9wDnuF4boZkuaCwsBJtZx11Ktdj4wU8I+oJJlxgwHZpyN0Jvm6k/NvmOwbXluYSYaSRIkOcSActoyui+k79VmNksqzSaMocGh9YOcTGduUOYRlzl2UyDuPKJcbHpNdz7hcPRZTa5ji2AGkOLWSDfK4Py8jO7ymM2O0vEEmbjsyBA0Lwbab1js40sOOrY52YBhcSbFxpZzoAAP0ggNjQcForbZc9xDKTH31c92ltPaBFneaZkQ0jfUwBLQ4FrATlAIcztA/S7JIKj1tlknJV6uoYsBUDXNHcHRmH4ahZ4SpXfOSjR7g2GgHd2uq/uIVuNludDnzm49Y8+WXLG9VVLmFPqZbTt5z6UU46Pl5GcBzW9kGm6HdwdNifx1WT9nPmCykGAaGC5wHjefu4roKWymidTJBgucRIEAiSeClUsEBuCyT8RX4L5NUPD3+b+DkWdHjUqMqGG5HtfTAbdmR4eGgzHEE95XU1etqANqPJaAAG6C2nZFj4lSsoC1VMQAufWualTqfwb6VCnT6Ua24cDVfXVgNFExGK71ArYpZs+hoUck+vilCqVVCOJLjlaC4nQAEme4K92Z0RxFWDVIpN77v8As7uZ5KyFCc3sjzOcKfUyoqYsNCmbN2LicQRDCxh+m8ZRHEA3dyXc7K6NYehBazM8fTf2nctw5BXC30rJLeRhqXjfQjkv/AtL/Vf6IutRafoU/QzfXqeoREVpUEREAREQBEVP0p2r+b0C4e27ssHfFzyF/JSlkhvB59tjHdZXq1QZzVCPq0+w0eQn6ypnnK7Nu3/PiAtWAqyHtm4eZ5gGfvW/EU5ETu08d3kr4x0rBROep5IePb2bG5BHM/4VptFpAdD80EZRpYdYcp3GzQOKpw452MO97RPi8a8lbYitkDexlkzlmYdLHXBMkGXDmV6PIe8jOJgMLoAm49jndg8l9p1DTaQLmxO72Dm9CFAa9wu4mZiJmD7X4fipBrSL7rz5FMbEdzbtHDPrPaWWYWiToMzezHH2Aw297cplHZgtJJA5f/nTco2ysaGzScRBu07hvnuGodwAYdGldDh2yCIg6GVyryrWhLCeEdazpUpxy1lk3A4YZbCApQaotN+UAStgrtAkmPFc9Y7m55NrnQolbFqm2v0nw1FxFSuwHhICpK/TfC7qk/wte7W40CfTqS6UxqhHlo6Wviyq+tjFQVOlIcexReWi7nuhjQ36RvJMCdQF7Hs/o7hqcObTDjqHO7Z8RNhyCuhYze8tiqV5Tj07nnuC2diK/wCypEj3j2W+ZsV0Wz+ge/EVSf3GWH2iPgu3CLZC1hH3Ms7upLjYhbO2VRoCKVNreJ1cfFxuVNRFpSwZm88hERCAiIgCIiAIiIAiIgC8+6aYvPicm6mAPrHtE/cOS9AJXhe2doODzijJDiS/eQCZBA4DhuHNWU1vkqqvbBGqHq8SJHZqAj6w7TfQEc1Yu0J3fFR9pAVqHWUzLmgVG75Lbg+FvVbWVGkA8/MT8VeUEHFUTmp2mHsPfAcCp2IrNJY19RoIZmLiIJa5rm28xN7ZbpTMuBgmCDa/0hbv3FVu1KIzAPeKrzHa9k5IsRaLiNOPFR3PSLKqGQGtIIFwRfUifjdGwOB7uGqjUoDYyxPkBPqtrHE7/D7zF1JAq0sxjS+ukHj58e9TMDtR1MZag7IEAi2lgBwBjQiBuLZWpjToO4eJgaKTSAEAjWT521O6F4qQjNYkj3CcoPMWWVLaDHaPHMhsG+ricsyDYOOirNv0qlYNYyo1rbl5FSmDAgBoJdbUmRfs96M2ewOzBhEiTkcaY0d7uhgkcyptLDgAHNVJn/VeNdd8xBI5rJGypxnqXwa5XtSUdP8AZzrth4fD0zUfTp20Gen2qhm0zd1j32Vm7YtJhMZTBAAiIDP0YvebN+9Ta2Bp1A3O0nI9rgXOeSH5gCRLt+d08ZSsz2bzpflc91ytazkyPGCLh8M19XqcgILBlsD2zWptaYIiwnkSvYKFPK1reAA8hC802DT/AFhhjR9LyLqs/j4gL09V1OSynwERFWWBERAEREAREQBERAEREAREQGFX2T4H7l4nQYCIjdH4r1npTjeqwtV0wS3KPF3Z+K8jw7jOsenzuV1LuU1uxTVqbsG4mD+bPmd/VOO/+C/IyeKnYWoHRlIII7MXGnHfZWbyHNIcJER88VzWP2JVwzjUwsmnMuo/Gmdx1tpqrOCpPJeUDGaCAYsTcAyI9RCxxFM9YSWiG21BENNgIA4eqq8BtdlUFwJB0ew6tdKsH4oF8GLuNu+/4J3J4NjnGb+Pha1l8NtbeW6PwRrtY+RKwcf78/8AK9EE2lOU63jTx/H7ltY8k+m/un571r2bU8h/kLfVac2m7u3/AHWkcl5BJpuLh/fz8NPNZhhMATvnl8+irmyD66kD5hSG1zpqe74RzUEllSHZ8Mp3++03WrF1AHezbu0iBf11W6m626CI36gyPUBaKzgXHTu4aDRQuSexK2JT/WGGbZ6X9T/nmvS1590UpTXa07jm+yJtzXoKqqcltPgIiLwWBERAEREAREQBERAEREAREQHK9MK4c5tLWBmI1ubAEeE+a5Gr0da6TSdkMeyRLJ3QNW+vcFTflG2pWGPe6i8sIIYI3xDbg2N51Umj0wfTcW1cO57ASA+nBdE72OInz5KIXEVs9j3O1m8Nb5FbZVZs5mGws5vbaSPC45gKGMNmE9a5rho5pPZ1nsmxFuC6vZ3SnB1SA2s1rybMfNJ08AHgTylWmKwVKpd7QTuOh197VaFUTMkqbTPGtv4SoHdYQM40rUmwbX/SUyTIvulRtm7TL3NzRmBEgXBvq07x8+Ppu1+jIcD1TyP3XXHhOq8l2zsurh8U0EAB03abXBjUDuUtrlErfZnccvuWNQj54LDDvBa13ETyj8CtlQ3MKwrN+zqpBI4hWLnAjv01jQTyVRh7XOk/irUO9ncORtcfBQDY6nJ08N+6Y+9ZMNwbfO63h83X1wsRvPh4D0Oq103HX5mI5/3UEm17wZZDjabay1zAde5xWBbcx3/eZX1vbMe6W/Sy2d1m/jLNEzdrv+EqEGdB0TP6yw78rgfKQu9XBdD2frTT/tv397APQld6qZ8l9PgIiLwewiIgCIiAIiIAiIgCIiAL4SvqjbTrhlGo8/RY4+hhAjwfpCTVxYqe9WaT4GoCVcbPaCear8Qz9Iw/vE+TXO+Cn7PMLkTllHcjFLj0LR2z6TxDqbXA8QCssLsemz9kX0o06t7mtH1JyHmCt1EqY0rxGTjwyJRT2ZGqVq7BdzKo/eGR3NzZbyyhefdMT1r2uFNzSx2d2hGUETcd3HivQsY6GlUmFwtOtQc+zg5+V3gH5S30K32tapOWlvYwXVCnCGpLcpcBaiybwxs+GRboHzdZYrZRoDUupgANdw3Q8ce/Q+i1ZvICbecrrI5DJDGndHHXd8VIwskwQbD8PnyUZoiw+bwfu9VuovnyH9vipIJwf2d3H0uvnWC/hqB3/wCF9LRw1vu7llUAg6D7vPyXkkjdYc4gGPZmYPYDXDdu607xqpWFBJvy8IP4LEFwIDS3Lclupmckyb/Q4rOgb9wB5WNvUISdd0GuXn934rr1y3QSjDHu7w0chJ+8LqVnnyaIdIREXk9BERAEREAREQBERAEREAXP9OMTkwpbvqODB/UfRp810Cgba2a3EUiw66tPB4Bg+pHgV5mm4tI902lNN8HjWM/aMHAOP8pHxUzBNsoWOJ66HCHBrwRwIc1pCscDuXIlsju9y0oKQHKPTC2uVZ5ZF2tU/RlcD0K28KGIq4WqYp1ny1x0bVGk9zoDfEBdvtY9leVbXwn6Y21Wq1nplkpuKeung9w6vcQLgjjI+PgqnGdFqbr0ndWfdjMyfDVvKw4Lj9gbdxNAZGjraYtkeTaPdd9H1HcuuwnTGgbVW1KB07TS9nJ7Zgd7gF04XEX3wcmpaVI9sr2K6tsuvTsaZcONM55t7tnem5ROsa3sv7J4PBYdODgCu3we0qVYfoatOp/A9ro8QDKmOuIIkd+nktCqGVwOFZW04WPrBHkVtw7hoTu/x6LqnbLoE/smCeDQ23JRa+xaIEsYQRoM7o5iYKnUiNJRsqPzzHYtFuMEwdd6+UD2nDgQDzMlXx6P0LAh5kyQatQi94jNA5LY3ZtJphtMTa8XJ87qNaGlnV9FKYGGaQQcxLjF98fcArdRNlYfJSa0iDFx4mVLVD5NC4CIigkIiIAiIgCIiAIiIAiIgCIiA8o6f7NNLGGoAclWmXAxYPzMDmzxtm5qFgDou/8Ayhj9Sf3OZ/UAvPdn6Bcy6jplsde1m509+2xd0tFk4LXSWRcsheV+0GyCuK2lhpqCy7jF6Ll8RSmswcXAeqtpPc9S6GfNl078/iugo0gdQqXZbdfner6ivNR7krg019j0X3fTaSNDAnkVo/6SWmaWIxFPgG1akfZJj0VoCi8KpKPDIlGL5RV9RjR7OOqfWbSd5yyVuZX2hBDsQ0iInqmTprpCsWI5Wq5q+pU6FJ/iiocMXADsU4xewY06RcgDcu5/J3gHBj61RznEnK0uJNhckTxJA5Ll6gOguTYBen7JwYpUWU/dF/4jcnzJWy1lObzJmS7UIxSiluS0RFtOeEREAREQBERAEREAREQBERAEREBz/T1k4Gr3ZT/O1ebbONgvUulrJwdcfuE+UH4LyrZrtFz7zlHTsX5Wi+ZovhX2kbLFxWE2GjE6Kk2TSz46gP8Acb/UrjFaKJ0QZOPYfdD3eVNx+9XUF50RVeKbKnZJkA8b+avqJsqLYw7DfAfcrukq58luCQCgCxC+sVJBsasXLJoWLzqvcUeSy6LYPrcS0n2Wds+I9keceS9EXOdCMHlomoReof5W2HrK6Ndi3hpgvc41zPVUfsERFeUBERAEREAREQBERAEREAREQBERAVPSsxg6/wDAfWy8m2cvWOlw/U6/8B+8LynALn3vKOpYdLLunovpKwpaLBxWE24NeKdZa+hhjFVXH6NCof5V9rixWnoqf0+I/wC1rf0q+360VXH22VWyRDR4BXVNU+zBYK5p6KmZebAsgsQsmqshmwFYNpl7msbq4ho8SYWZKtuh2Ez4jPFqYn6xsPieSvow1SSKKs9MWzucNQDGNY3RoAHgBC2oi7RxAiIgCIiAIiIAiIgCIiAIiIAiIgCIiArekv8A5Sv/AO2/+kryTZ+gRFz73lHT8P4kXNH8PitdVEWE39zVU9kqL0c/bYn/ALSv/SvqK+360VXH22QNmaBXDNERUyLjYvoRF4RDM3LqOgGlbxZ/yRFstPuGO7+2zrURF1DkhERAEREAREQBERAEREB//9k=", # noqa
date=datetime.datetime.now())
session.add(catalogItem12)
session.commit()
catalogItem13 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Turtleneck",
description="A high collar that covers most of your neck even when the collar is folded over itself", # noqa
picture="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRPFGfzuD9lOhHU3VxTM0tLJLhTr7ZoBWZj1gonZvm8COqb5zZ2zQ", # noqa
date=datetime.datetime.now())
session.add(catalogItem13)
session.commit()
catalogItem13 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Blouse",
description="a loose-fitting garment resembling a shirt for women. A long-sleeved, collared button-down shirt is an example of a blouse.", # noqa
picture="https://lp.hm.com/hmprod?set=source%5B%2Fmodel%2F2018%2FH00%200711288%20001%2099%20ab1fc14bbb9813801aa79cfafa7e487285150d7a.jpg%5D%2Cmedia_type%5BDESCRIPTIVE_STILL_LIFE%5D%2Ctshirt_size%5BL%5D%2Cquality%5BH%5D%2Chmver%5B2%5D&call=url%5Bfile%3A%2Fstudio2%2Fv1%2Fproduct.chain%5D", # noqa
date=datetime.datetime.now())
session.add(catalogItem13)
session.commit()
catalogItem14 = CatalogItem(user_id=User1.id,
category_name=category2.name,
name="Skirt",
description="a free-hanging part of an outer garment or undergarment extending from the waist down", # noqa
picture="https://s7d4.scene7.com/is/image/JCPenney/a0a3b8ab-8ebc-11e8-8780-b95ec8b54e4f.jpg?wid=350&hei=350&op_usm=.4,.8,0,0&resmode=sharp2", # noqa
date=datetime.datetime.now())
session.add(catalogItem14)
session.commit()
# For category3
catalogItem15 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Jersey",
description="a shirt worn as part of a uniform in sports such as football", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBw8REBAQDxEQDw8REBAPEhYQEg8QDxUQFRUWFxYTFhUYHCggGBolHRMVITEhJSkrMC4uFx8zOTMsNyguLisBCgoKDg0OFw8QFysdHh0tLTcrLS0rKy0tNystKystNystLS0tKy0rLSstLS0tLS0tNzctKysuLS0tKystLS0tK//AABEIAQMAwgMBIgACEQEDEQH/xAAcAAEAAgIDAQAAAAAAAAAAAAAAAQcGCAIDBQT/xABFEAACAQIDAwkEBwUFCQAAAAAAAQIDEQQFEgYhMQcTIkFRYXGBkTJSodEIQmKCsbLBcnOSotIUIzNDoxdTVWSDs8LT4v/EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/8QAHhEBAQEBAAICAwAAAAAAAAAAAAECEQMhEjEiQXH/2gAMAwEAAhEDEQA/ALxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8uOzGjRV6s1Fv2YpOVST7IQjeUn3JMrHavlVUoyo4DnMPV1SjKtWpRloUXZ6aTftftWt2djgtgGvFLbTOl7ObU5fvqFOK+FNn0rbfP8A/iOX/wAMP/UX40X8DXyrtrnL9vNqUf3GHpzfxpo+nZ/lJxeEqN4ivWzClN9NVY06bj307cPB7hwX0Dwcj2twmKS0ydOpojOVOonCrBNfWi99vtW0vqbPdjJNXTTT4Nb0QSAAAAAAAAAAAAAAAAAAAMZ2t2peEhNYel/aa8Vqcb2jGPXJ9bt7q3lLZ9tdj8ZdV60lD/d0706Xg4r2vvNmvjU6u7Ots8BhbqpiKWtfVjLXLzjBSkvQwPOuVSMrqhGtJeMcNBrva11H5OBV6v2Ii6Lwe9jtr8ZV1KNRYeMt0v7OpU5yXVrqturPzkzGcQ2pNtt6m2297u+N2d0ku04XKONOR2I6pU11bvDgdbpz97d5/Miu6c7dZ0pOf7PX8hGh27/gj0svy6VZVNM6UObg52qS0XS6o9TfdxJ9D1cHtfioqMK7jjKcH0ViU51Id9OumqlOXY1LcZfkm39ONrV6uHfXDFxeIpeEcRSWv71SnN95XDy2tzCxFlzLnzWpShfXa9tN7o+Ru3aX7GxmU7Ywqq8qbnHcnUwso4ulf/pXnFd84wMgwOYUKyvRqQqJbnpkm0+xrqNVKdRxalG8ZLg02pLwa4HtYParHQcXz7q6eHPqNdruU5pyj5NE4NmwVXsnt7jJpPEUUqSi5a052klpvaM25v2l0tTXGyZZOXZhSrwc6UlKzcZJNNxkuMX3kubE6+sAEUAAAAAAAAPgz7Mo4XC4jEy3qjSnUt2uKul5uy8z7zAOW7MeayuVNO0sRWpUvup85L4Qt5gYNsvtHLEpOvLVWVScaj95VW5J/mX3ThnGUUsQlKNqdXRS6SW6Tc3DpLr3Jb+O4r7LswlQqKpFXspK17J3i0n5N38jN8Pi9OmUXro3pyi11U474R/ae5tdVz041LOVy1LL2MWxNKpSloqJxl8GrtXT61dP0OpyMyxUaVeloqb3GCs1bVGcql93fZv1MTzLL6uHm4VFuvLTJK0ZJO112dW7qujO8WNZ1106hc6ri5zbc7A4XFwOS3libC5bgZYaTxWHhUn0qqlPRK9FNx3NvotSjJdXV5V0mZjsfm2HjDRWs3GE4uOqjSc4upCpBxlKUeckmpLQ3wta+9HPyd56WMozPZ7Kp0azjTqYecY3ulVgtWpxSs7xfTVrWfHqumVTLdulua3NPc0+tNFq4yNPm60YLE87OMra6VSEU1NTk6lRKz0uz1p8LWcis8TReIxNfmbOLq1Z6tTlBRc3ZuTV3e68bk8Orq2GvT5aUXOUYQi5Tk1FJcW27L8TJspyWnBKdXTUnJdGPGEddObj4u6XmjuwOFp4eL0K8ldym7anzdWL8lZR3HO05PTG0dL0Xe5KUZOcL9zTa8z2ZxJ7rjd2+o7s8z9U6c5qzlLnFFXX+bCEk7daTv6HPkZ2klSx8qFWbccXdu7/AM7jq8XvMDzbGqrU6P8Ahw1Qp9rhrk4t99mvRHVl+KlRq06sd0qc4zXkzl5NfKt5zyNvgfHlGNVehSrLhUpxl6o+w5NAAAAAAAABTH0hMU9WApX6OmvVa77win+Yucoj6QNW+Nwsfdwrf8VSX9Igq1s93ZPMVCpzNR/3dVpRfu1PlLh4qPDeeCzgpNO63Nb14mhbMMHGhaTScZce7saPg2uzKiqTpTSqVZcF7ji46ar61eMpLvPYyPFRxGHpSdm3BRku1bmYLtJlFWhUlNuVSnKc4qb3vVG3Rl2bpQt47uB2u+TjnM++vHlIKRwkyLnJ0dmoajqciNQHfrIcrnTqJ1AfQqjdlJzcE72UvJtX3J23XsZ3lkMM6EXQSUHpi19bW5ap6r8WoxSv3LqK81GSbL5bXd6t9FFxas+NRybpR0rqtKUt/wBiSN+PXxrOp2PbVOVV302g3VTS4vW73XkjzNrcaqVKNGLXOVElO3VSj2rtbtv7pGV4txowe62m0YrsSXzKtzzFOdebf1bQXl1erZreuzrOZ7fISmcEcji6tluSXFc5ldDfdwTg/IzIrXkKq3wE4+7VfxbLKM1AAAAAAAAAoHltoV6uZOUaNR06dClTUlFyTfSk3u/bt5F/HTiMLTqK1SEZr7STA07l2Pc+84SNqsw2Iy6tfXQjd9iT+DuY5j+R/Lp+xeD7rpfytF6K12QrTpYalVlK8JzqQircND4d/W/C/YePtdm7r4iSXsU5Sslw1tRU5fyRX3e8szbrZyllmWYbm96o42nJ8fZnCcZLfdv2mVtn+XQf97Bpbr34xkuN3buT3+COk/LP8Z7yvB1HByOFwYacri5AA5aiNRBAHNMzXY/PU4So1G5VIRjODk/ahCpKpJNt75LnZvwj3GEU4ttJWu9yv2mUZVlUYdK+qW9auFrN+yuxxlF7+03jNtZ1eR6+f5soqVRu6u9C96fyRhWfYGpRruNRpzlGNZ2Vl01eyXqvI9HEVViMXShHfBVIU49jV+k/Pf6IvDOeTTB41UatRONVUacG1qV0lffZrtY8mvfDM/bXKJzpwcnaKcn2JNv4GwmF5I8vjbUtT8G/zNmR4DY7AUUlGknbt3L0Vkc+tMQ5DKFWnh60KkZQvJSSkrOz6+4s866FCEFphGMIrqikkdhAAAAAAAAAAAAAAYLy0UtWUVn7tWhL/US/U19r5g5Uo0bNKO5tu9431W7t/wCWPYbI8qdPVk+O7qSn/DOL/Q1gXX4ms3icSiTiArkQAAADAHqrOXzMqbT1yjpvuSt2+abXkjyjiamrPpLJWR7C4fnMww0ft3+Fv1Np0jWzkio6s0o/ZTf80TZQxVAAQAAAAAAAAAAAAAAAAY5yjRvlOYr/AJSs/SN/0NWFwNsNtaerLcfHtwmI/wC3I1QX6FggAFEhAIAAyEBJxkciJLcBY/IbQ1ZhKXuwT/N8jYMo36P1K9bET7IqPov/AKLyJQABAAAAAAAAAAAAAAAAB5+0ENWExUe3DV16wkaiQ4LwRuDmv+BX/c1Pys0+o70n3IsHJkEsgolhAgCSCUQBJKIQ6wLj+j1Hdi39pePCJcxTv0fVaOL/AG/0gXEZoAAAAAAAAAAAAAAAAAAD485lbDYh9lCq/wCRmocNyXgjbbaaenBYyXZhcQ/9ORqS+CRYOJJBJQIJIAlBkEgEScSUBc/0fpdHFrruvwj8i4ik+QGp/e4mP2U/w+RdhKAAIAAAAAAAAAAAAAAAAMc5RMVzWVY6fBvDzprxqdBfmNWZGwXLtj+by2NFPfXxFOP3YJzfxjH1NfrFggBgogAACUQAJsESgBZ3IPWtjasfepL4XL6NauSTHqjmlDU7Kpen5vh+psqSgACAAAAAAAAAAAAAAAACkOX7EzlicJStJU6dGc72elznKzV+1KmvUqeRtrneR4bFwcMRTjNWteyuiu8w5GMPJt0akop9V2vxuWCjCbFu1uROf1K/q4v/AMUedW5G8avZqJ+Uf6i9FYgsOpyQ5iuDi/L5SPlnyWZkvqp/dqfIDBibGbf7Lsz9xelT+klcl2Z9cF/DU/pAwlMkz6lyS5i7Xsr/AGX+rR9mG5HcY2tc7LuUF+Mh0V/lWKdGvRqrdzdSE/JPf8Lm2uAxCqUqdRb1OEZeqK2yLkgw9NqVeWtrt6fwskvRllYHCwo040oXUIKyvvZKO8AEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/9k=", # noqa
date=datetime.datetime.now())
session.add(catalogItem15)
session.commit()
catalogItem16 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Shorts or Leggings",
description="comfortable shorts are a must for a successful trip to the gym", # noqa
picture="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQ7ju1y7Znmm1wNQJPS78xrM2s6QkmUWuONrvTMxULSliNgR4YWLw", # noqa
date=datetime.datetime.now())
session.add(catalogItem16)
session.commit()
catalogItem17 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Fitness Tank or Tee",
description="A lightweight tank top offers plenty of ventilation and flexibility", # noqa
picture="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMSEhITEhIVFRUXFRUXFhYYFRcVFRUVFRUXFhUVFxUYHSggGBooGxYVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGxAQGy4lHyY3Ly0tLS0tLy0tKy0rKy8tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0rK//AABEIAOEA4QMBIgACEQEDEQH/xAAcAAEAAQUBAQAAAAAAAAAAAAAAAQIDBQcIBgT/xABNEAACAQIDBAYDCA8HBAMAAAAAAQIDEQQSIQUxQVEGBxMiYXGBkaEjMlJykrHBwhQ0QlRzgpOisrPR0uHw8SUzU2J0g8NDY2SjFSQ1/8QAGAEBAAMBAAAAAAAAAAAAAAAAAAECAwT/xAAhEQEAAgICAwEBAQEAAAAAAAAAAQIRMQMTEiFBYTIiUf/aAAwDAQACEQMRAD8A3iAAAAAAAAAABidq9JsJhpqnXrwpzcVLK73yttJ6Lmn6jLGgOtCTe08Tmei7JLwj2UH87Zatcyre2Ibo2P0mwmLnOnh68ak4JSlFXTyt2urpXV9NN11zMuc0bDxdbZ2LjVySjUpyWenLRypySbg78JRaa8cr4HR2zMfTxFKnWpSzQnFSi/B8GuDT0a4NMWrhFbZfSACq4AALdetGEZTm1GMU5Sk9EoxV22+VjVdHrjcnK2DTjmeX3Zxllv3XJdm7O3A+3rh6SKNNYGm+/USlWafvad7qD8ZNbvgp398jUlPBzyyqqL7NSUHO3dVRxclDzypv+qNaVjGZZXvOcQ6O6KbdWOw0K6hkbcoyhmzZZRk1bNZX0s9y3mYPAdS7f2FVv98Tt+TpfTc9+Z2jEtKzmAAEJAAAAAAAAAAAAAAAAAAAAAA0h1h04rbGqupSwzkuFu7Fp+hG7zRPWtNraVRrfGFJrzUE17S/Htnyaez62+jHbUvsulH3WjH3RJazorV+bhrLycvA8T0A6aywE+zqXnhpu8orWVKT31ILiucfStbp7xwWIjWpU6kdY1IRmuKcZxTXsZz9042A8Fi6lJK1OXulF8Ozk/e/iu8fJJ8S1JzGJReMT5Q6BwGOp16calGcakJK6lF3T/j4H0HM+xdtYjCSz4erKm375KzhP40Ho/Pf4nt8D1v14q1bC0qj+FCpKkvTFxn85E8c/ExyR9bhPOdNullPZ9HM0p1p3VKnf3z4ylyguL8lvZ4HGdb9eUWqWFpU38KVSVVL8VRh854Pau06uJqurXqOc3pd6WS3RilpFK70XMV45+otyR8RVqVcTWcpXqVq1Rec5zaSSXBbkuCXkbS6YdHo4PYioqzlCpSnOS+7qTmozl5d6y8EkYrqd6P9pWljJruUrwpX41JLvzXxYu3nN8j2PWzK2zK3jOj+ugTaf9RCKx/mZY3qVqXwlePLEP1OlT/YzYRrLqRre54uHKdOXyoyX1DZpS+2lP5AAVWAAAAAAAAAAAAAAAAAAAAAA0J1qT/tKv4Kkv8A1R/ab7Oe+sbEqptHFNO6U4w9MIRhL85Neg049s+TTbvVtjFV2dhtdYRdN+DptxX5qi/SYrri2UquDVdLv0Jp349nUahNeV3CX4h53qZ27GFSrhJyt2jU6V+M0rTivFxUWvis2F05ins7HX+9a79Kpya9qRE+rEe6ucwQDdiklJuySu27Jc29EvWUmW6I0lLHYOMtzxFJ/JkpL2pEDoDo1slYTC0aEfuIJSfwpvvTl6ZOT9J5frkr5cBGPw69OK9EZz+qe7NVddu0It4WgpLMs9WS5JpQhfz90+SzCvuze3qr5+pHEJVsXT4yp05einKSf6xG3TQPVntDsdo0OVTNSf467v58YG/ieTZxz6AAUXAAAAAAAAAAAAAAAAAAAAKK1WMIylJqMYpuUm7JJK7bb3KwGC6c9IVgcJOqrdpJ5KSfGpJOztxSScn8U56nJttttttttu7berbfFno+n3Sh4/EZot9hTvGit172zVGucrK19yS3Ns8yjelcQ572zKqnNxalFuLTTTWjTTumnwdzau0+mscTsWu5NLEPJQqRWl5TaTmlwjKmpvwaa4GqSicb+ZMxlEThIuUqfB6MqLILl/Z+LdGrSqrfTqQqfIkpfQWCly9LIHS+2tvUcNhniZyvDKnC2+o5K8Iw5t/x3HPO2NpVMTWqV6tnOpK75RW6MV4JJL0FuttCtUp0adSpKUKUXGlF7oJu9kuPK74JLciwVrXC1rZVYetKnOM46SjKMovlKLvH2pHTWzcZGtSpVYe9qQjNeUkn69TmFo2L1b9PFh1HC4nSlmtTqf4eZ+9n/ku9/C+um6L1zCeO2JbiABi3AAAAAAAAAAAAAAAAAAB8+Px9KhB1K1SNOC3yk0l7d78DS/T/AKfSxt6GHvDDfdNq061npdfcw/y73xtuPj61nfada+tlSUb62TpQby396r8jyLkbVpG2N7zpJNym5JozSW1iIfCW+29b+RWedaVmknvvfc8qzaNc9ZbuRW04WrXLPSqwe+UfWuV/mKXKK/6iX4y/nivWYXGyTUsu7MreXZL6CuFK9Rp7pQf6C/gV80+DMvLZtyulv10Vt9yY1oW0lG1r3ut17X9ZjUrYa/O0m/FyTZ8k593S3vfQ/d17CZsRRnu1Xwluvv4c/IU6qkrqV14GFoSglJSe+juVr3zTckl4a+oyOyY9x6fdPX4Tsru3Dl6BFsomuH2L0k3BFy6rbHVv0+TVPB4qVnpGjWb0lwjTqX3S4KXHRPX320DlZpbv6HuuinWTXwqVKvF4ikklF5rVoJcFJ6TXhKz8eBlan2Glb/JbvB8GwtqwxdCnXpxnGM02lNJSVm1qk2t64M+8ybAAAAAAAAAAAAAAAANFdb8LbRk+dKk/nj9U8TFHueuNf2h/sUv0qh4c6K6c1tyWFgLlkIs+ZjKUmu/ZOVSTyrgl8J+UV/NzKNGG7emowcoyfddtbWjF5cum9aekpZar6YYqWZLJG1rysucb6J7tLesbPqOV3l3tyk2rWbtaK5923qLuPqRir5LymsumjtZ8SMLiIyd0mu5GT5NNaK3ND6n4srGSu/e5LtWtwV9fZLTkiJYyajfJBXtlXJNOWvoV7Frt1K8uz7qTk4prVTVs2nhe/HVF+GJhpF03vi9Xd3k2lK/oK5Tj8RUxu+0ErK2trudr6W4LeffhqWWEVvstfPi/WYypUi1OfZtJS1d73tJXS5Xdj7MJWck2lazaaeuqLRPtW0en2pgtxnfzK7l1USRMWGUQ3gdIdCaWTAYRf9mm/lRzfSZsxvRn7Twn+no/q4mSOadumNAAISAAAAAAAAAAAAANHdcf/wCh/sU/0qh4Wx7Prcq32lUXwadKP5ub6x42x0V05rbkAILITc83Wptwho9FJ7v87/aeibMBSn7nN9pPPyzPRZlqZ3XoyW0W12crN2ld2V/uWWqNX3SWaLTnCKSs99vYUSpPtKazz70VJ958FuXqL213/dptpNyvZ23JW+cfqY/4+XDwfZ1e617lGOqa7yvdFdaD7SLs91Hhp4+oqpVHej3m+5Pi7Oye/mfNh68ssrTk/cm3q9JZrK3LS3rKrL9HSFVWlJ69x3y2zb1bj+w+7ZN8mt7Znlvo3HhcsYyUuwpu73QzNb7W11Po2Unk1lmV3ld793+blo2pbT6ZU0ylPgy4UyiaKKimS1EXfQXA6Y6M/aeE/wBPR/VxMkY7o39qYX8BR/VxMicsumAABIAAAAAAAAAAAAA566yambaeLf8Amgvk0acfoPNMyvSvEdpjcXPnXq28ozcY+xIxVzpjTlnaEyJMiTIenmSJk7acTCUqUssoKCvrm0WZ3s1G/Dh60ZqKMdCeWMe9aVTvSfGMXrZeL0S/gUsvVfdGXaUXbRQs3ydijaN5yTjByUJNNc7pP2EKU1LWTtGN3fVPuttt8dbJeTLmAz7pWVtWuMpS1zeW9f0I36Ne1ihhJrsbxekZ38G72XtLWHwlSMZLK+9B33aSTdvZ85e+yZKTm3L30u7rZqKbtbklHfzkTKc8t1OTzNJ5dydm2o6acI35kYhbMp92dNRUHFxy8V30lZr5j6NlQtB8E5Oy8N3zpnz1q8+8s2qWVJeSTlLxbdl6zJ0YKMVFbkkvUi0bVtpUgQSXUW5LiVN8fAlooXFAdNdG/tTC/gKP6uJkTE9EambA4N88PS/QRljmnbpjQACEgAAAAAAAAAAFvEVVCEpPdGLk/JK5cMH05xXZbPxcr2fYzivOayL2yQhEuc6lRybk97bb827stslkHU5kbtSlLiEr6srW8CUjAUsa1GVlFWatZeaf0HoWjyUjO84accZZvHYlwpwateS1ur6ZdSViW6Knpmule198rM+TaFm6EW7LIr+Tt+wpw8//AK01ykva4v8AaRn2nx9LscQ3Uhbfrfj8Z+pWXmyKWNryuoJO3glblx8BsWznJvflVvK+v0FjZ1aopSUIp3avpu1fj5kZTh9eLxM+0cEk7JSS03pKV3z8j79m4p1IZmtb2dt2n9T4Kn2zP4j/AEEX9hf3b+M/miWifasx/lkb6kETJuaM0sixNyFvA6C6tq2fZuFfKDj8ico29h6Y8V1QVs2zoL4FSpH1yz/XPanNbbprqAAEJAAAAAAAAAAAPC9cmMybPyf4tanH0RvVv66a9Z7o1H14429TC0VwjOo18ZqMf0Z+stTat/5avKXq/AS5Ibjoc6pk7t5SubJjHUAm2YB4GeVvI735cLP6fnPQtFORviVtXKa2wxVTBynUhmi8qhFN7t0W/nLVPCTUK0cr1cbeNpPd6LGYyvgSqnNEeELecsThcLOnOnJRdmkpeF3Z39jLdOFWnmywbvzXK/iZ65DHgebDYiFTtZSjC9428NYpM+/ZFBwhaWjbbty3L6C69Hcu34omK4nKJtmMKmRDkSmUbmWVXEyFvDY4gbc6jsTfD4mnf3tWM7fHhl/42bMNO9Rte1fFQ+FShL8nNr/kNxHPfben8gAKrgAAAAAAAAAAGgutfGdptKsuFONOn6o537ajN+nMfSfFOrjMXU+FiKtviqbjH81I049s+SfTGpiOupS3wK0tDZiFTQSJaApzMqTAAIhxBKAoytbiqMr+ZOYiULgJIt05WdnuK0+DKasQLi0EyinK6sVLkBKegiyIOzITswPbdTtfLtFR+HSqx9Vp/VN7HPHVzWybUwvjKa+VTmvpR0OY8m23HoABm0AAAAAAAAAABRVnljKXJN+pXOU87er3ttv0s6c6T1smDxUvg0Kr9KpyscwrcvSa8bHl+JW8uS3FG4VJaLzNWatklJPICoMXIAPxC1KUrvyK0BJFwSgDRAW8lgWHo7lyfBoVYlNF8AJkTJXsyGiab4AZfofO2PwT4rEUl6JSUX86OlTlZVHBxnB5ZRacWt6a1T9DSOqTHka8QADNqAAAAAAAAAADBdOp22djX/49X2xaObXJHRnWI/7Nxn4J+1o5zZtx6Y8m1KdxU3xRMNShvveguzXiQtCLkiSGw2EBKJAAqbBSSAkSmCIgQ3zLLdmXyirTunzW4gXGr6lmStqV4eV0VSJFNVOUdFv3eb0OrDlrZsL1aa4OpTX58TqUy5PjXi+gAMmoAAAAAAAAAAPMdZkktmYu7sskfbUgc4uvFrRp+k61PjxeyaFX+9oUqnx6cJfOi9b4UtTLlmFeP8tFuFXVt8fE6dXRTAXv9hYa/wCAp/ukrorgfvLDfkKf7pbsU65czvFRZTLER52Omo9FcCt2Cw35Cn+6fVR2NhoWyYejG261KCt6kOw6nLCxlP4S9aMjSwFaSzRoV5LmqNRr1qNjqKEEtyS8lYqHYnqcvrZtf72xHl2NT90S2bXW/DV150ai+qdQAdh1frl6nsvEStlw2IlfdahVd/K0dSp7KxF7fY2Jvy+x6yfqynT4I7Dq/XMH/wAZiPvbEfkKv7oezMQt+GxC86FVfVOnwT2HV+uXns6v971/yNT90sVacqdu0hKnfdnjKN/LMjqgDsOr9cnU6sU3aSt5l9Ti2le7e5LVvwS4nUOIwFKo1KdKnOS0TlCMmlyTaKsNg6dP+7pwh8WKj8yHYjq/XPWwOjWMq1qUoYStlVSm3KUHTjZSTbzTsnpfcdGAFLWy0rXxAAVWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH//2Q==", # noqa
date=datetime.datetime.now())
session.add(catalogItem17)
session.commit()
catalogItem18 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Sports Bra",
description="For women a good sports bra is everything, you will feel pretty miserable by the end of your workout", # noqa
picture="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRnsczWVotVflGG1uvOZ02FigSYkvxWpSfZ8864K1fKPermgf_D", # noqa
date=datetime.datetime.now())
session.add(catalogItem18)
session.commit()
catalogItem19 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Socks",
description="The socks lightweight running socks or low-cut socks with moisture-wicking, breathable fabric and a touch of arch support are a pretty big part of your gym comfort equation.", # noqa
picture="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTrB1W65qFulipHAEQkq8Ll649pwrC1QOXQMpTTdBakUacqKj9sCQ", # noqa
date=datetime.datetime.now())
session.add(catalogItem19)
session.commit()
catalogItem20 = CatalogItem(user_id=User1.id,
category_name=category3.name,
name="Training Shoes",
description="The most essential item in sports", # noqa
picture="https://cms-static.asics.com/media-libraries/861/file.original.jpg?20173107192907", # noqa
date=datetime.datetime.now())
session.add(catalogItem20)
session.commit()
print "added menu items!"
|
# sql:
# \copy (select st."1:2"-st."1:3" as ibl, st."1:1"-st."1:2" as ebl, count(1) as c,nblocks as b,ngenes as g from (select * from recomb as rb join gene_trees as gt on gt.id=rb.inf where topology!='(4,(3,(1,2)));' ) as foo join species_trees as st on foo.sid=st.id group by b,g,ebl,ibl order by c) to 'discordant.recomb.csv' csv header
# \copy (select st."1:2"-st."1:3" as ibl, st."1:1"-st."1:2" as ebl, count(1) as c,nblocks as b,ngenes as g from (select * from recomb as rb join gene_trees as gt on gt.id=rb.inf where topology='(4,(3,(1,2)));' ) as foo join species_trees as st on foo.sid=st.id group by b,g,ebl,ibl order by c) to 'concordant.recomb.csv' csv header
from itertools import combinations
from pathlib import Path
from statistics import mode
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from statsmodels.formula.api import ols
from sklearn.inspection import PartialDependenceDisplay
import statsmodels.api as sm
from statsmodels.gam.api import BSplines, GLMGam
import matplotlib.pyplot as plt
from sklearn.ensemble import HistGradientBoostingRegressor
def calc_anova():
c = pd.read_csv('concordant.recomb.csv')
d = pd.read_csv('discordant.recomb.csv')
m = c.merge(d, on=['ibl', 'ebl', 'b', 'g'], how='outer')
m['n'] = m.c_x/(m.c_x+m.c_y)
mod = ols(formula='n ~ g*b*ebl*ibl',
data=m).fit()
aov_table = sm.stats.anova_lm(mod, typ=2)
print(aov_table[aov_table['PR(>F)'] < .2])
def calc_GradHist_PDP(df,
s,
predictors=['Length', 'Genes', 'Fraction', 'ebl', 'ibl'],
features=('Length', 'Genes',),
target='abs_err',
common_params={
"subsample": 50,
"n_jobs": 6,
"grid_resolution": 20,
"centered": True,
"random_state": 0,
}
):
X_train, y_train = df[predictors], df[target]
est = HistGradientBoostingRegressor(random_state=0).fit(X_train, y_train)
_, ax = plt.subplots(ncols=3, figsize=(9, 4))
display = PartialDependenceDisplay.from_estimator(
est,
X_train,
features=[*features, tuple(features)],
kind=['both', 'both', 'average'], ax=ax, **common_params
)
display.figure_.subplots_adjust(wspace=0.4, hspace=0.3)
# plt.tight_layout()
plt.savefig(s)
def calc_glmgam(df: pd.DataFrame,
s: Path = Path(
'/N/project/phyloML/deep_ils/results/bo_final_2/test')
):
s /= 'glm_fits'
s.mkdir(parents=True, exist_ok=True)
# create spline basis for weight and hp
bs = BSplines(df[['ebl', 'ibl', 'y_true', 'log_true']],
df=5*[8], degree=5*[3])
# penalization weight
glm = sm.GLM(
endog=df.ERROR,
exog=df[['ebl', 'ibl', 'Length', 'Genes', 'Fraction']])
models = []
model_names = ['target~(Length+Genes+Fraction)**2',
'target~Length*Genes*Fraction',
'target~Length*Genes*Fraction-Length',
'target~Length+Genes+Fraction', ]
aic_values = np.empty(len(model_names))
for target in ('preds', 'np.log(preds)', 'abs_err', 'ERROR'):
for i, m in enumerate(model_names):
m = m.replace('target', target)
gam_bs = GLMGam.from_formula(
m,
data=df,
smoother=bs)
res_bs = gam_bs.fit()
models.append(res_bs)
summary = res_bs.summary()
aic_values[i] = res_bs.aic
with open(s/(m+'.tex'), 'w') as f:
f.write(summary.as_latex())
best = np.argmin(aic_values)
print(model_names[best].replace('target', target),
aic_values[best], '\n-----\n')
PartialDependenceDisplay.from_estimator(
models[best],
df.sample(500),
['Length', 'Genes', 'Fraction'],
kind='both')
if __name__ == '__main__':
df = pd.read_pickle(
'/N/project/phyloML/deep_ils/results/bo_final_2/test/rec_fraction.pd.pkl').reset_index()
df['log_true'] = np.log(df.y_true)
# df['log_preds'] = np.log(df.preds)
# normalize
predictors = ['Length', 'Genes', 'Fraction',
'ebl', 'ibl', 'y_true', 'log_true']
df[predictors] = (df[predictors]-df[predictors].mean()) / \
df[predictors].std()
for c in combinations(['Genes', 'Length', 'Blocks'], 2):
print(c)
rs.calc_GradHist_PDP(z.query('Blocks==2'),
s/f'blocks2-{"_".join(c)}-abserr.png', features=c, predictors=['Length', 'Genes', 'Blocks', 'ebl', 'ibl'],)
calc_glmgam(df)
|
# 输入三个整数x,y,z,请把这三个数由小到大输出
x = int(input("x:"))
y = int(input("y:"))
z = int(input("z:"))
listNum = []
listNum.append(x) # append 方法添加列表项
listNum.append(y)
listNum.append(z)
listNum.sort()
for i in listNum:
print(i)
|
import json
import treq
from twisted.trial.unittest import SynchronousTestCase
from mimic.model.heat_objects import RegionalStackCollection, Stack
from mimic.rest.heat_api import HeatApi
from mimic.test.fixtures import APIMockHelper
from mimic.test.helpers import request
class HeatObjectTests(SynchronousTestCase):
def setUp(self):
"""
Test initialization.
"""
self.coll = RegionalStackCollection(tenant_id='tenant123',
region_name='XYZ')
self.stack = Stack(stack_name='foo', collection=self.coll)
def test_update_stack_action(self):
"""
Tests incorrect action update.
"""
self.assertRaises(ValueError, self.stack.update_action, 'foo')
def test_update_stack_status(self):
"""
Tests incorrect status update.
"""
self.assertRaises(ValueError, self.stack.update_status, 'foo')
def test_links_json(self):
"""
Test correctness of stack links.
"""
prefix = 'http://foo.bar/baz/'
href = prefix + 'v1/{}/stacks/{}/{}'.format(
self.coll.tenant_id, self.stack.stack_name, self.stack.stack_id)
self.assertEqual(self.stack.links_json(lambda suffix: prefix + suffix),
[{'href': href, 'rel': 'self'}])
class HeatAPITests(SynchronousTestCase):
def get_responsebody(self, r):
"""
Util for getting JSON response body.
"""
return self.successResultOf(treq.json_content(r))
def query_string(self, query_params=None):
"""
Util for building a query string from a parameter mapping.
"""
return ('?' + '&'.join([k + '=' + v for (k, v) in query_params.items()])
if query_params else '')
def list_stacks(self, query_params=None):
"""
Return a stack list, asserting that the request was successful.
"""
query = self.query_string(query_params)
req = request(self, self.root, b"GET", self.uri + '/stacks' + query)
resp = self.successResultOf(req)
self.assertEqual(resp.code, 200)
data = self.get_responsebody(resp)
return data
def create_stack(self, stack_name, tags=None):
"""
Request stack create, assert that the request was successful, and return
the response.
:param: stack_name The user-defined name of the stack to create.
:param: tags A list of strings to tag the stack with. Defaults to None.
"""
req_body = {'stack_name': stack_name}
if tags:
req_body['tags'] = ','.join(tags)
req = request(self, self.root, b"POST", self.uri + '/stacks',
body=json.dumps(req_body).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEqual(resp.code, 201)
data = self.get_responsebody(resp)
return data
def update_stack(self, stack_name, stack_id, resp_code=202):
"""
Request stack update and assert that the response matched the one
provided.
:param: stack_name The user-defined name of the stack.
:param: stack_id The ID of the stack provided by Heat.
:param: resp_code The response code to expect from the update call.
Defaults to 201.
"""
req_body = {'foo': 'bar'}
req = request(self, self.root, b"PUT",
'{}/stacks/{}/{}'.format(self.uri, stack_name, stack_id),
body=json.dumps(req_body).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEqual(resp.code, resp_code)
def stack_action(self, stack_name, stack_id, resp_code=201,
req_body={'check': None}):
"""
Request a stack action and assert that the response matched the one
provided.
:param: stack_name The user-defined name of the stack.
:param: stack_id The ID of the stack provided by Heat.
:param: resp_code The response code to expect from the action call.
Defaults to 201.
:param: req_body The data to include in the request. Defaults to the
one to trigger action-check.
"""
req = request(
self, self.root, b"POST",
'{}/stacks/{}/{}/actions'.format(self.uri, stack_name, stack_id),
body=json.dumps(req_body).encode("utf-8"))
resp = self.successResultOf(req)
self.assertEqual(resp.code, resp_code)
def delete_stack(self, stack_name, stack_id, resp_code=204):
"""
Request stack delete and assert that the response matched the one
provided.
:param: stack_name The user-defined name of the stack.
:param: stack_id The ID of the stack provided by Heat.
:param: resp_code The response code to expect from the action call.
Defaults to 204.
"""
req = request(self, self.root, b"DELETE",
'{}/stacks/{}/{}'.format(self.uri, stack_name, stack_id))
resp = self.successResultOf(req)
self.assertEqual(resp.code, resp_code)
def setUp(self):
"""
Test initialization.
"""
helper = APIMockHelper(self, [HeatApi()])
self.root = helper.root
self.uri = helper.uri
def test_list_stacks_empty(self):
"""
Test stack list, ensuring correct JSON response.
"""
resp = self.list_stacks()
self.assertEqual(resp, {'stacks': []})
def test_create_stack(self):
"""
Test stack creation, ensuring correct JSON response.
"""
foo_resp = self.create_stack('foostack')
self.assertEqual(set(foo_resp['stack'].keys()), set(['id', 'links']))
def test_create_stack_id_unique(self):
"""
Test uniqueness of created stack IDs.
"""
foo_resp = self.create_stack('foostack')
bar_resp = self.create_stack('barstack')
self.assertNotEqual(foo_resp['stack']['id'], bar_resp['stack']['id'])
def test_list_stacks_one(self):
"""
Test stack list output for one stack.
"""
foo_resp = self.create_stack('foostack')
foo_stack_list = self.list_stacks()['stacks']
self.assertTrue(len(foo_stack_list), 1)
self.assertEqual(foo_stack_list[0]['id'], foo_resp['stack']['id'])
self.assertEqual(foo_stack_list[0]['stack_name'], 'foostack')
self.assertEqual(
set(foo_stack_list[0].keys()),
set([
'creation_time',
'description',
'id',
'links',
'stack_name',
'stack_status',
'stack_status_reason',
'tags',
'updated_time',
]))
def test_list_stacks_two(self):
"""
Test stack list output for two stacks.
"""
self.create_stack('foostack')
self.create_stack('barstack')
two_stack_list = self.list_stacks()['stacks']
self.assertEqual(set(stack['stack_name'] for stack in two_stack_list),
set(['foostack', 'barstack']))
def test_check_stack(self):
"""
Stack updates with correct check status.
"""
foo_resp = self.create_stack('foostack')
self.stack_action('foostack', foo_resp['stack']['id'])
new_stack_list = self.list_stacks()['stacks']
self.assertEqual(new_stack_list[0]['stack_status'], 'CHECK_COMPLETE')
def test_invalid_action(self):
"""
Invalid stack-action requests generate 400.
"""
foo_id = self.create_stack('foo')['stack']['id']
self.stack_action('foo', foo_id, req_body={'foo': None}, resp_code=400)
def test_missing_action(self):
"""
Invalid stack-action requests generate 400.
"""
foo_id = self.create_stack('foo')['stack']['id']
self.stack_action('foo', foo_id, req_body={}, resp_code=400)
def test_multiple_actions(self):
"""
Supplying more than one action in stack-action requests generate 400.
"""
foo_id = self.create_stack('foo')['stack']['id']
self.stack_action('foo', foo_id,
req_body={'check': None, 'cancel_update': None},
resp_code=400)
def test_disabled_actions(self):
"""
Using disabled actions generates 405.
"""
foo_id = self.create_stack('foo')['stack']['id']
for action in ('cancel_update', 'resume', 'suspend'):
self.stack_action('foo', foo_id, req_body={action: None},
resp_code=405)
def test_check_stack_missing(self):
"""
Trying to check a stack that doesn't exists returns 404.
"""
self.stack_action('does_not', 'exist', resp_code=404)
def test_update_stack(self):
"""
Stack updates with correct status.
"""
foo_resp = self.create_stack('foostack')
self.update_stack('barstack', foo_resp['stack']['id'])
new_stack_list = self.list_stacks()['stacks']
self.assertEqual(new_stack_list[0]['stack_status'], 'UPDATE_COMPLETE')
def test_update_stack_missing(self):
"""
Trying to update a stack that doesn't exists returns 404.
"""
self.update_stack('does_not', 'exist', resp_code=404)
def test_delete_stack(self):
"""
Test stack deletion.
"""
self.create_stack('foostack')
bar_resp = self.create_stack('barstack')
self.create_stack('bazstack')
self.delete_stack('barstack', bar_resp['stack']['id'])
new_stack_list = self.list_stacks()['stacks']
self.assertTrue(len(new_stack_list), 2)
def test_delete_stack_not_found(self):
"""
Test correct response from attempting to delete nonexistent stack.
"""
self.delete_stack('nonexistent', 'bad_id', resp_code=404)
def test_list_deleted_stacks(self):
"""
Test listing stacks, including those that have been deleted.
"""
foo_resp = self.create_stack('foostack')
self.create_stack('barstack')
self.delete_stack('foostack', foo_resp['stack']['id'])
stack_list = self.list_stacks({'show_deleted': 'True'})['stacks']
self.assertTrue(len(stack_list), 2)
def test_create_stack_tags(self):
"""
Test creation of stacks with varying tags.
"""
self.create_stack('zero_stack')
self.create_stack('one_stack', tags=['one'])
self.create_stack('two_stack', tags=['first', 'second'])
stack_list = self.list_stacks()['stacks']
self.assertEqual(stack_list[0]['tags'], '')
self.assertEqual(stack_list[1]['tags'], 'one')
self.assertEqual(stack_list[2]['tags'], 'first,second')
def test_list_stack_tags(self):
"""
Test listing of stacks with various tag combinations.
"""
foobar_tags = ['foo', 'bar']
barbaz_tags = ['bar', 'baz']
self.create_stack('zero_stack')
self.create_stack('foobar_stack', tags=foobar_tags)
self.create_stack('barbaz_stack', tags=barbaz_tags)
foo_stack_list = self.list_stacks({'tags': 'foo'})['stacks']
bar_stack_list = self.list_stacks({'tags': 'bar'})['stacks']
baz_stack_list = self.list_stacks({'tags': 'baz'})['stacks']
foobar_stack_list = self.list_stacks({'tags': 'foo,bar'})['stacks']
foobaz_stack_list = self.list_stacks({'tags': 'foo,baz'})['stacks']
wrong_stack_list = self.list_stacks({'tags': 'f'})['stacks']
another_wrong_stack_list = self.list_stacks({'tags': 'o,b'})['stacks']
self.assertEqual(len(foo_stack_list), 1)
self.assertEqual(foo_stack_list[0]['stack_name'], 'foobar_stack')
self.assertEqual(len(bar_stack_list), 2)
self.assertEqual(bar_stack_list[0]['stack_name'], 'foobar_stack')
self.assertEqual(bar_stack_list[1]['stack_name'], 'barbaz_stack')
self.assertEqual(len(baz_stack_list), 1)
self.assertEqual(baz_stack_list[0]['stack_name'], 'barbaz_stack')
self.assertEqual(len(foobar_stack_list), 1)
self.assertEqual(foobar_stack_list[0]['stack_name'], 'foobar_stack')
self.assertEqual(len(foobaz_stack_list), 0)
self.assertEqual(len(wrong_stack_list), 0)
self.assertEqual(len(another_wrong_stack_list), 0)
def test_template_validate(self):
"""
Test template validation, ensuring correct JSON response.
"""
req_bodies = {'url': {'template_url': "http://bogus.url/here"},
'inline': {'template': "http://bogus.url/here"},
'wrong': {}}
requests = dict(
(key, request(self, self.root, b"POST", self.uri + '/validate',
body=json.dumps(body).encode("utf-8")))
for (key, body) in req_bodies.items()
)
responses = dict(
(key, self.successResultOf(req)) for (key, req) in requests.items()
)
resp_bodies = dict(
(key, self.get_responsebody(resp))
for (key, resp) in responses.items() if key != 'wrong')
self.assertEqual(responses['url'].code, 200)
self.assertEqual(responses['inline'].code, 200)
self.assertEqual(responses['wrong'].code, 400)
self.assertTrue('Parameters' in resp_bodies['url'])
self.assertTrue('Parameters' in resp_bodies['inline'])
def test_stack_preview(self):
"""
Test stack preview, ensuring correct JSON response.
"""
req_body = {'foo': 'bar'}
req = request(self, self.root, b"POST", self.uri + '/stacks/preview',
body=json.dumps(req_body).encode("utf-8"))
resp = self.successResultOf(req)
body = self.get_responsebody(resp)
self.assertEqual(resp.code, 200)
self.assertTrue('stack' in body)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021
#
# Distributed under terms of the MIT license.
"""
Description:
"""
import asyncio
import logging
import patterns
import view
from themesong import ThemeSong
import sys, getopt
from socket_client import SocketClient
logging.basicConfig(filename='view.log', level=logging.DEBUG)
logger = logging.getLogger()
class IRCClient(patterns.Subscriber):
def __init__(self, HOST, PORT, music):
super().__init__()
self.username = str()
self._run = True
self._s = SocketClient(HOST, PORT)
self._s.set_irc(self)
self.music = music
def set_view(self, view):
self.view = view
def set_username(self, username):
self.username = username
def update(self, msg):
# TODO Will need to modify this
if not isinstance(msg, str):
raise TypeError(f"Update argument needs to be a string")
elif not len(msg):
# Empty string
return
logger.info(f"IRCClient.update -> msg: {msg}")
self.process_input(msg)
def process_input(self, msg):
# TODO Will need to modify this
self.add_msg(msg)
self._s.setMsg(msg)
if msg.lower().startswith('/quit'):
# Command that leads to the closure of the process
raise KeyboardInterrupt
def add_msg(self, msg):
self.view.add_msg(self.username, msg)
async def run(self):
"""
Driver of your IRC Client
"""
self._s.start()
def close(self):
# Terminate connection
logger.debug(f"Closing IRC Client object")
pass
def get_help_menu():
menu = """usage: irc_client.py [-h] [--server '<SERVER>' --port '<PORT>'] [-m]
optional arguments:
-h, --help Show this help message and exit
--server '<SERVER>' Target server to initiate a connection to
--port <PORT> Target port to use
-m Enable music
using IRC commands, after launching the client use:
USER <username> to initialize your username
NICK <nickname> to set your nickname
after that you will automatically join the #global channel
and you can start chatting!
"""
return menu
def init_client(cmdargs):
HOST = ''
PORT = 0
music = False
try:
opts, arguments = getopt.getopt(cmdargs, "hms:p:", ["help", "server=", "port="])
except getopt.GetoptError:
print(get_help_menu())
sys.exit(2)
for opt, arg in opts:
if opt != '-h' or opt != '--help':
if opt == '--server':
HOST = arg
if opt == '--port':
PORT = arg
if opt == '-m':
music = True
else:
print(get_help_menu())
sys.exit()
if len(opts) < 1:
print(get_help_menu())
sys.exit()
return IRCClient(HOST, int(PORT), music)
def main(args):
# Pass your arguments where necessary
client = init_client(args)
logger.info(f"Client object created")
with view.View() as v:
logger.info(f"Entered the context of a View object")
client.set_view(v)
logger.debug(f"Passed View object to IRC Client")
v.add_subscriber(client)
logger.debug(f"IRC Client is subscribed to the View (to receive user input)")
if client.music:
music_thread = ThemeSong()
music_thread.start()
async def inner_run():
await asyncio.gather(
v.run(),
client.run(),
return_exceptions=True,
)
try:
asyncio.run(inner_run())
except KeyboardInterrupt as e:
music_thread.stop()
music_thread.join()
logger.debug(f"Signifies end of process")
client.close()
if __name__ == "__main__":
# Parse your command line arguments here
args = None
main(sys.argv[1:])
|
from itertools import cycle
from math import sqrt, acos, sin, cos
import numpy as np
from matplotlib import cm
from matplotlib.animation import FuncAnimation
from matplotlib.patches import Ellipse, Arrow
import matplotlib.pyplot as plt
from picp.util.geometry import apply_tf_on_point_cloud
from python.pypm.icp import Penalty
SCANS_ID_TO_COLOR = ["blue", "red", "green", "orange", "magenta"]
# SCANS_ID_TO_COLOR = cm.get_cmap('tab20b').colors
# SCANS_ID_TO_COLOR = cm.get_cmap('Dark2').colors
def draw_elipse(ax, avg, cov, color, label=None):
λ, v = np.linalg.eig(cov)
sort_indices = np.argsort(λ)[::-1]
λ = λ[sort_indices]
v = v[sort_indices]
# The angle is the angle of the eigen vector with the largest eigen value
angle = np.rad2deg(np.arctan2(v[0, 1].real, v[0, 0].real))
# angle = np.rad2deg(np.arccos(v[0, 0]))
ell = Ellipse(xy=avg,
width=2*sqrt(λ[0]), # λ[0] == σ²
height=2*sqrt(λ[1]),
angle=-angle,
edgecolor=color,
label=label)
ell.set_facecolor('none')
if label is not None:
ax.add_patch(ell)
else:
ax.add_artist(ell)
return [ell]
class ExperimentationPlot:
def __init__(self, label, ax, trajectories, penalties):
self.ax = ax
self.label = label
self.trajectories = trajectories
self.penalties = penalties
self.trajectories_plot = []
self.trajectories_scatter = []
self.ellipses = []
def generate_colors(self):
nb_step = len(self.trajectories[0])
return [c for c, _ in zip(cycle(SCANS_ID_TO_COLOR), range(nb_step))]
def init(self):
self.ax.clear()
nb_step = len(self.trajectories[0])
self.trajectories_plot = [self.ax.plot([], [], '-', c='black', alpha=0.5)[0] for _ in self.trajectories]
self.trajectories_scatter = [self.ax.scatter([0] * nb_step, [0] * nb_step, c=self.generate_colors(), marker='o') for _ in self.trajectories]
self.ellipses = []
show_label = True
for penalty in self.penalties:
if isinstance(penalty, Penalty):
self.ellipses += draw_elipse(self.ax, penalty.translation, penalty.cov, color='green',
label='Penalties covariance' if show_label else None)
show_label = False
self.ax.set_title(self.label)
self.ax.axis('equal')
self.ax.set_xlim(-3.5, 3.5)
self.ax.set_ylim(-5, 5)
@property
def static_lines(self):
return self.ellipses
@property
def lines(self):
return self.trajectories_plot + self.trajectories_scatter
def update(self, i):
# Each sample is a trajectory
for trajectory, plot, scatter in zip(self.trajectories, self.trajectories_plot, self.trajectories_scatter):
first_tf, _ = trajectory[0]
x = [first_tf.x]
y = [first_tf.y]
c = self.generate_colors()
# A trajectory is made of step (one step per pair of scan), which content iterations
for _, step in enumerate(trajectory[1:]):
final_tf, iters = step
max_iter_sample = len(iters)
iter = iters[min(max_iter_sample - 1, i)] # A registration might end before the i-th iteration
tf = iter["tf"]
xi = tf[0, 2]
yi = tf[1, 2]
th = acos(tf[0, 0])
x.append(xi)
y.append(yi)
# raw_arrow = Arrow(xi, yi, cos(th), sin(th), width=0.1, color='black')
# if len(arrow) <= j:
# arrow.append(ax.add_patch(raw_arrow))
# else:
# arrow[j].remove()
# arrow[j] = ax.add_patch(raw_arrow)
# print(i, list(zip(x, y)))
plot.set_data(x, y)
scatter.set_offsets(list(zip(x, y)))
self.ax.axis('equal')
class AnimatedRegistration:
def __init__(self, title, experiments, scans, gt_poses, walls, pairwise_dumps):
self.title = title
self.experiments = experiments
self.scans = [apply_tf_on_point_cloud(scan, pose) for scan, pose in zip(scans, gt_poses)]
self.gt_poses = gt_poses
self.walls = walls
self.dump = pairwise_dumps
""" experiments = { label => (trajectories, penalities), ...}
trajectories = [(so2_tf, iter_datum), ...]
penalities = [penalty, ...]
"""
self.max_iter = max([len(iters) for trajectories, penalties in experiments.values() for trajectory in trajectories for final_tf, iters in trajectory])
self.fig = plt.figure(figsize= (5* len(experiments), 6))
self.exp_plots = [ExperimentationPlot(label, self.fig.add_subplot(1, len(experiments), i + 1), trajectories, penalties) for i, (label, (trajectories, penalties)) in enumerate(experiments.items())]
# self.axis = [(label, self.fig.add_subplot(1, len(experiments), i + 1), exp[0], exp[1]) for i, (label, exp) in enumerate(experiments.items())]
self.func_ani = None
def init_animation(self):
self.func_ani = FuncAnimation(self.fig, self.update, frames=range(0, self.max_iter), init_func=self.init, blit=False)
def start_animation(self):
plt.show()
def plot_iter(self, i):
self.init()
self.update(i)
plt.show()
def plot_last_iter(self):
self.plot_iter(self.max_iter)
def init(self):
# self.arrows = []
self.lines = []
self.static_lines = []
self.fig.tight_layout()
self.fig.subplots_adjust(top=0.85)
for exp_plot in self.exp_plots:
exp_plot.init()
self.lines += exp_plot.lines
self.static_lines += exp_plot.static_lines
# The last step will have for reference a fusion of all scan except the last one, we then take the first iteration
descriptors_all_refs = self.dump[-1][0]["filtered_ref"]
# descriptors_read = self.dump[0]["filtered_read"]
self.static_lines += self._plot_scan(exp_plot.ax, self.gt_poses[0].to_tf() @ descriptors_all_refs.numpy, color="black", descriptors=descriptors_all_refs)
for id, (scan, gt_pose, color) in enumerate(zip(self.scans, self.gt_poses, cycle(SCANS_ID_TO_COLOR))):
self.static_lines += self._plot_scan(exp_plot.ax, scan, gt_pose, f"Scan #{id}", color)
# exp_plot.ax.legend(loc='upper left')
return self.static_lines + self.lines
def update(self, i):
self.fig.suptitle(f"Iter {i:02} - {self.title} \n Distribution of registration in a simulated corridor ")
for exp_plot in self.exp_plots:
exp_plot.update(i)
return self.static_lines + self.lines
def save(self, filename):
self.func_ani.save(filename, dpi=80, writer='imagemagick')
def _plot_scan(self, axis, scan, origin=None, label="", color="blue", animated=False, descriptors=None):
if origin is not None:
lines = [axis.scatter(scan[0, :], scan[1, :], c=color, s=1, animated=animated)] # , label=label+" scan points"
lines += axis.plot(origin.x, origin.y, 'x', color=color, label=label+" origin" , animated=animated)
else:
lines = []
if descriptors is not None:
des_by_labels = descriptors.descriptors_by_labels
if "normals" in des_by_labels:
normals = des_by_labels["normals"]
for x, y, nx, ny in zip(scan[0, :], scan[1, :], normals[0, :], normals[1, :]):
arr = axis.arrow(x, y, nx/2, ny/2)
lines.append(arr)
if "covariances" in des_by_labels:
covs = des_by_labels["covariances"]
for i, (x, y, cov) in enumerate(zip(scan[0, :], scan[1, :], covs.transpose())):
lines += draw_elipse(axis, (x, y), cov.reshape(2, 2), color=color)
return lines
def _plot_map(self, ax, map):
return [ax.plot((wall.p1.x, wall.p2.x), (wall.p1.y, wall.p2.y), 'black') for wall in map]
|
import web
import pymysql
import fileMgr
import wayPoint
render = web.template.render('templates/', base='layout',
globals={'hasattr': hasattr})
urls = (
'/', 'Index',
'/view/(.*)', 'View',
'/manage', 'Manage',
'/manage/create', 'Create',
'/manage/hide/(\d+)', 'Hide',
'/manage/show/(\d+)', 'Show',
'/manage/edit/(\d+)', 'Edit',
'/fileop', 'fileMgr.FileMgr',
'/app/waypoint/(.*)', 'wayPoint.WayPoint',
'/waypoint', 'WayPoint'
)
con = pymysql.connections.Connection(host='localhost',
user='webuser',
database='heli',
autocommit=True,
charset='utf8')
con.connect()
cur = pymysql.cursors.DictCursor(con)
def get_posts():
cur.execute("select * from posts;")
return cur.fetchall()
def get_post(id):
cur.execute("select * from posts where id=%s;" % (id))
return cur.fetchone()
def add_post(title, content):
cur.execute("""insert into posts(title, content, hidden)
values('%s', '%s', 0);""" % (title, content))
def set_post(id, title, content):
cur.execute("update posts set title='%s', content='%s', hidden=0 where id=%s;" % (title, content, id))
def hide_post(id):
cur.execute("update posts set hidden=1 where id=%s;" % (id))
return
def show_post(id):
cur.execute("update posts set hidden=0 where id=%s;" % (id))
return
class Index:
def GET(self):
posts = get_posts()
return render.index(posts)
class View:
def GET(self, id):
content = get_post(id)
return render.article(content)
class Manage:
def GET(self):
return render.manage(get_posts())
class Create:
def GET(self):
return render.edit({})
def POST(self):
input = web.input()
# print(input)
add_post(input.title, input.content)
raise web.seeother("/manage")
class Edit:
def GET(self, id):
return render.edit(get_post(id))
def POST(self, id):
input = web.input()
# print(input)
set_post(id, input.title, input.content)
raise web.seeother("/manage")
class Hide:
def POST(self, id):
hide_post(id)
raise web.seeother("/manage")
class Show:
def POST(self, id):
show_post(id)
raise web.seeother("/manage")
class WayPoint:
def GET(self):
return render.waypoint()
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
# coding=utf-8
import cv2
import os, shutil
#
import tensorflow as tf
from tensorflow.python.keras.applications.resnet50 import ResNet50
from tensorflow.python.keras.applications.vgg19 import VGG19
from tensorflow.python.keras.models import load_model
import numpy as np
import sys
font = cv2.FONT_HERSHEY_SIMPLEX
from keras.optimizers import Adam
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import utils
from scipy import misc
CLASSES = (
'NORMAL','PNEUMONIA')
#classification
model = load_model('model/model-ResNet50-final.h5')
#Load the trained model and modify the name of the model to load a different model.
# There are two models under the model.
#Initialize the visualization interface.
class Ui_MainWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super(Ui_MainWindow, self).__init__(parent)
#Initialize the parameters of the interface.
# self.face_recognition = face.Recognition()
self.timer_camera = QtCore.QTimer()#timer
self.timer_camera_capture = QtCore.QTimer()#timer
self.cap = cv2.VideoCapture()#Open the parameters of the camera
self.CAM_NUM = 0
self.set_ui()#initialize the inferface
self.slot_init()
self.__flag_work = 0
self.x = 0
def set_ui(self):
#initialize the buttons of the interface
self.__layout_main = QtWidgets.QHBoxLayout()
self.__layout_fun_button = QtWidgets.QVBoxLayout()
self.__layout_data_show = QtWidgets.QVBoxLayout()
#the open button
self.pushButton = QtWidgets.QPushButton(u'Open the Image')
# self.addface = QtWidgets.QPushButton(u'create the database')
# self.captureface = QtWidgets.QPushButton(u'collect')
# self.saveface = QtWidgets.QPushButton(u'save')
#the size of the picture
self.pushButton.setMinimumHeight(50)
# self.addface.setMinimumHeight(50)
# self.captureface.setMinimumHeight(50)
# self.saveface.setMinimumHeight(50)
#edit box location
self.lineEdit = QtWidgets.QLineEdit(self) # create QLineEdit
# self.lineEdit.textChanged.connect(self.text_changed)
#edit box size
self.lineEdit.setMinimumHeight(50)
#edit box location
# self.opencamera.move(10, 30)
# self.captureface.move(10, 50)
self.lineEdit.move(15, 350)
# show the information
#loading tools
self.label = QtWidgets.QLabel()
# self.label_move = QtWidgets.QLabel()
#set up the tools' size
self.lineEdit.setFixedSize(100, 30)
#set up sizes of the pictures
self.label.setFixedSize(641, 481)
self.label.setAutoFillBackground(False)
self.__layout_fun_button.addWidget(self.pushButton)
# self.__layout_fun_button.addWidget(self.addface)
# self.__layout_fun_button.addWidget(self.captureface)
# self.__layout_fun_button.addWidget(self.saveface)
self.__layout_main.addLayout(self.__layout_fun_button)
self.__layout_main.addWidget(self.label)
self.setLayout(self.__layout_main)
# self.label_move.raise_()
self.setWindowTitle(u'HeartLab_Test')
def slot_init(self):
self.pushButton.clicked.connect(self.button_open_image_click)
# self.addface.clicked.connect(self.button_add_face_click)
# self.timer_camera.timeout.connect(self.show_camera)
# self.timer_camera_capture.timeout.connect(self.capture_camera)
# self.captureface.clicked.connect(self.button_capture_face_click)
# self.saveface.clicked.connect(self.save_face_click)
#set up the button function
def button_open_image_click(self):
#clear the interface
self.label.clear()
#clear the comments
self.lineEdit.clear()
#open the image
imgName, imgType = QFileDialog.getOpenFileName(self, "Open the Image", "", "*.jpg;;*.png;;All Files(*)")
#get the paths of images
self.img = misc.imread(os.path.expanduser(imgName), mode='RGB')
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
# self.detection = self.img
#resize the picture to the specified size
self.img = cv2.resize(self.img, (640, 480), interpolation=cv2.INTER_AREA)
#image is none or not
if self.img is None:
return None
#pre-processing
code = utils.ImageEncode(imgName)
#prediction
ret = model.predict(code)
print(ret)
#the category with the greatest similarity
res1 = np.argmax(ret[0, :])
#print the category with the greatest similarity
print('result:', CLASSES[res1])
#show the category with the greatest similarity on the images
print ('max:',np.max(ret[0, :]))
cv2.putText(self.img, str(float('%.2f' % np.max(ret[0, :])) * 100) + '%', (1, 80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255),
thickness=2, lineType=2)
#show the category with the greatest similarity on the images
cv2.putText(self.img, str(CLASSES[res1]), (1, 160),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255),
thickness=2, lineType=2)
#chage the color tube
self.img_rgb = cv2.cvtColor(self.img, cv2.COLOR_BGR2BGRA)
#transfer the image format
self.QtImg = QtGui.QImage(self.img_rgb.data, self.img_rgb.shape[1], self.img_rgb.shape[0],
QtGui.QImage.Format_RGB32)
# show the image in the box;
# self.label.resize(QtCore.QSize(self.img_rgb.shape[1], self.img_rgb.shape[0]))
self.label.setPixmap(QtGui.QPixmap.fromImage(self.QtImg))
print(CLASSES[res1])
#edit box types
self.lineEdit.setText(CLASSES[res1])
def closeEvent(self, event):
#Close button funtion
ok = QtWidgets.QPushButton()
cacel = QtWidgets.QPushButton()
#close or not
msg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, u"Close", u"Close or Not?")
#click and confirm
msg.addButton(ok, QtWidgets.QMessageBox.ActionRole)
msg.addButton(cacel, QtWidgets.QMessageBox.RejectRole)
ok.setText(u'Confirm')
cacel.setText(u'Cancel')
# msg.setDetailedText('sdfsdff')
if msg.exec_() == QtWidgets.QMessageBox.RejectRole:
event.ignore()
else:
event.accept()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ui = Ui_MainWindow()
ui.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
from AnswerSelection import IAGru, OAGru_SMALL
from DataProcessor.MSRPD import MSRPD
from NeuralModel.IAGRU_WORD import IAGRU_WORD
from NeuralModel.OAGRU import OAGRU_small, OAGRU
from TaskBase import TaskBases
__author__ = 'benywon'
MSR = 'MSR'
class ParaphraseDetection(TaskBases):
def __init__(self, MODEL=IAGru, DATASET=MSR, **kwargs):
TaskBases.__init__(self)
if DATASET == MSR:
self.Data = MSRPD(**kwargs)
if MODEL == IAGru:
self.Model = IAGRU_WORD(data=self.Data, classfication=True, **kwargs)
elif MODEL == OAGru_SMALL:
self.Model = OAGRU_small(data=self.Data, classfication=True, **kwargs)
else:
self.Model = OAGRU(data=self.Data, classfication=True, **kwargs)
def Test(self):
print '\nstart testing...'
length = len(self.Data.TEST[0])
total = 0.
right = 0.
for i in xrange(length):
question = self.Data.TEST[0][i]
answer_yes = self.Data.TEST[1][i]
prediction = self.Model.test_function(question, answer_yes)
true = self.Data.TEST[2][i]
total += 1
if self.IsIndexMatch(prediction, true):
right += 1
precision = right / total
print 'Precision is :\t' + str(precision)
return precision
@TaskBases.Train
def Train(self):
precision = self.Test()
append_name = self.Data.dataset_name + '_Precision_' + str(precision)
self.Model.save_model(append_name)
if __name__ == '__main__':
c = ParaphraseDetection(optmizer='adadelta', MODEL=IAGru, DATASET=MSR, batch_training=False, sampling=3,
reload=False,
Margin=0.15,
N_out=2,
use_the_last_hidden_variable=False, epochs=50, Max_length=50,
N_hidden=50)
c.Train()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 10:06:13 2017
@author: wanjia
"""
class Handler(object):
def addTag(self,zlib,item,tagname):
#adds a new tag to the new item
# tagname = 'python lesson'
#in the bracket (tagname, '<tag type:0>')
item.addTag(tagname, '0')
#updates the item with the new tag
updatedItem = zlib.writeUpdatedItem(item)
if updatedItem.writeFailure != False:
print("Error updating item")
print(updatedItem.writeFailure['code'])
print(updatedItem.writeFailure['message']) |
from Jumpscale import j
class Package(j.baseclasses.threebot_package):
def prepare(self):
"""
is called at install time
:return:
"""
pass
def start(self):
"""
called when the 3bot starts
:return:
"""
## TODO: BAD
# self.db.models_add(path=self.package_root + "/models")
# self.gedis_server.actors_add(j.sal.fs.joinPaths(self.package_root, "actors"))
server = self.openresty
website = server.get_from_port(443)
locations = website.locations.get("threebotapp_locations")
website_location = locations.locations_spa.new()
website_location.name = "farmmanagement"
website_location.path_url = "/farmmanagement"
# website_location.use_jumpscale_weblibs = False
fullpath = j.sal.fs.joinPaths(self.package_root, "html/")
website_location.path_location = fullpath
locations.configure()
website.configure()
def stop(self):
"""
called when the 3bot stops
:return:
"""
pass
def uninstall(self):
"""
called when the package is no longer needed and will be removed from the threebot
:return:
"""
# TODO: clean up bcdb ?
pass
|
from SimpleXMLRPCServer import SimpleXMLRPCServer
import logging
import os
import time
# Set up logging
logging.basicConfig(level=logging.DEBUG)
server = SimpleXMLRPCServer(('localhost', 9090), logRequests=True)
# Expose a function
def list_contents(dir_name):
logging.debug('list_contents(%s)', dir_name)
time.sleep(20)
return os.listdir(dir_name)
def list_contents2(dir_name):
logging.debug('list_contents(%s)', dir_name)
return os.listdir(dir_name)
server.register_function(list_contents)
server.register_function(list_contents2)
try:
print 'Use Control-C to exit'
server.serve_forever()
except KeyboardInterrupt:
print 'Exiting'
|
from flask import Flask, render_template, url_for, Blueprint, request, redirect
from databaseIntegration.database_queries import Database
from databaseIntegration.database import create_connection, close_connection
from databaseIntegration.database_config import Config
app = Flask(__name__)
connection, cursor = create_connection(host=Config.database_host,
port=Config.database_port,
user=Config.database_user,
password=Config.database_password,
database=Config.database_name)
blue_print = Blueprint('clinic', __name__)
@app.route('/')
def hello_world():
return render_template("index.html")
@app.route('/human_data_table')
def human_table():
human = Database(connection=connection, cursor=cursor)
all_human = human.get_all_humans()
context = {
'all_human': all_human
}
return render_template("human_data_table.html", **context)
@app.route('/human_data_table/update', methods=["POST", "GET"])
def human_table_update():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
human_id = request.form["human_id"]
gender = request.form["gender"]
age = request.form["age"]
preliminary_diagnosis = request.form['preliminary_diagnosis']
admission_to_the_hospital = request.form['admission_to_the_hospital']
arrival_date = request.form['arrival_date']
approximate_growth = request.form['approximate_growth']
hair_type = request.form["hair_type"]
room_number = request.form["room_number"]
full_name = request.form["full_name"]
# try:
human.human_data_upgrade(human_id, gender, age, preliminary_diagnosis, admission_to_the_hospital,
arrival_date, approximate_growth, hair_type, room_number, full_name)
# except:
# return "Ошбика"
return redirect('/human_data_table')
@app.route('/human_data_table/add_human', methods=["POST", "GET"])
def human_table_add():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
human_id = request.form["human_id"]
gender = request.form["gender"]
age = request.form["age"]
preliminary_diagnosis = request.form['preliminary_diagnosis']
admission_to_the_hospital = request.form['admission_to_the_hospital']
arrival_date = request.form['arrival_date']
approximate_growth = request.form['approximate_growth']
hair_type = request.form["hair_type"]
room_number = request.form["room_number"]
full_name = request.form["full_name"]
# try:
human.human_add(human_id, gender, age, preliminary_diagnosis, admission_to_the_hospital,
arrival_date, approximate_growth, hair_type, room_number, full_name)
# except:
# return "Ошбика"
return redirect('/human_data_table')
@app.route('/human_data_table/delete_human', methods=["POST", "GET"])
def human_table_delete():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
human_id = request.form["human_id"]
human.human_delete(human_id)
return redirect('/human_data_table')
@app.route('/room_data_table')
def room_table():
room = Database(connection=connection, cursor=cursor)
all_rooms = room.get_all_rooms()
context = {
'all_rooms': all_rooms
}
return render_template("room_data_table.html", **context)
@app.route('/room_data_table/update', methods=["POST", "GET"])
def rooms_table_update():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
room_number = request.form["room_number"]
room_id = request.form["room_id"]
room_type = request.form["room_type"]
full_name = request.form['full_name']
room_phone = request.form['room_phone']
human.room_data_upgrade(room_number, room_id, room_type, full_name, room_phone)
return redirect('/room_data_table')
@app.route('/room_data_table/add', methods=["POST", "GET"])
def rooms_table_add():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
room_number = request.form["room_number"]
room_id = request.form["room_id"]
room_type = request.form["room_type"]
full_name = request.form['full_name']
room_phone = request.form['room_phone']
human.room_add(room_number, room_id, room_type, full_name, room_phone)
return redirect('/room_data_table')
@app.route('/room_data_table/add', methods=["POST", "GET"])
def room_delete():
human = Database(connection=connection, cursor=cursor)
if request.method == "POST":
room_id = request.form["room_id"]
human.room_delete(room_id)
return redirect('/room_data_table')
@app.route('/arrival_human_check')
def arrival_human_check():
arrival = Database(connection=connection, cursor=cursor)
all_arrivals = arrival.arrival_date_check("2020-10-08")
context = {
'all_arrivals': all_arrivals
}
return render_template("human_arrival_table.html", **context)
@app.route('/female_age_check')
def female_age_check():
females = Database(connection=connection, cursor=cursor)
all_females = females.female_age_check(18)
context = {
'all_females': all_females
}
return render_template("female_age_table.html", **context)
if __name__ == '__main__':
app.run()
|
from panda3d.core import Vec3, BitMask32
VIEWPORT_3D = 0
VIEWPORT_2D_FRONT = 6
VIEWPORT_2D_SIDE = 7
VIEWPORT_2D_TOP = 8
VIEWPORT_3D_MASK = BitMask32.bit(VIEWPORT_3D)
VIEWPORT_2D_MASK = BitMask32.bit(VIEWPORT_2D_FRONT) | \
BitMask32.bit(VIEWPORT_2D_SIDE) | \
BitMask32.bit(VIEWPORT_2D_TOP)
class ViewportSpec:
def __init__(self, type, name):
self.type = type
self.name = name
class Viewport2DSpec(ViewportSpec):
def __init__(self, type, name, unusedCoordinate, viewHpr,
flattenIndices, expandIndices):
ViewportSpec.__init__(self, type, name)
self.unusedCoordinate = unusedCoordinate
self.viewHpr = viewHpr
self.flattenIndices = flattenIndices
self.expandIndices = expandIndices
VIEWPORT_SPECS = {
VIEWPORT_3D: ViewportSpec(VIEWPORT_3D, "3D Perspective"),
VIEWPORT_2D_FRONT: Viewport2DSpec(VIEWPORT_2D_FRONT, "2D Front (Y/Z)",
0, Vec3(90, 0, 0), (1, 2),
(-1, 0, 2)),
VIEWPORT_2D_SIDE: Viewport2DSpec(VIEWPORT_2D_SIDE, "2D Side (X/Z)",
1, Vec3(0, 0, 0), (0, 2),
(0, -1, 2)),
VIEWPORT_2D_TOP: Viewport2DSpec(VIEWPORT_2D_TOP, "2D Top (X/Y)",
2, Vec3(0, -90, 0), (0, 1),
(0, 2, -1))
}
|
from __future__ import print_function
from ryu.base import app_manager
from ryu.ofproto import ofproto_v1_3
class MultipleController(app_mananger.RyuApp):
OFP_VERSIONS =[ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(MultipleController, self).__init__(*args, **kwargs)
|
# Generated by Django 2.1.7 on 2019-03-09 11:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('milliard', '0002_auto_20190309_0853'),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='CorrectAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('correct_answer_text', models.CharField(max_length=100)),
],
),
migrations.AlterField(
model_name='question',
name='question_text',
field=models.TextField(max_length=200, unique=True),
),
migrations.AddField(
model_name='correctanswer',
name='question',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='correct_answer', to='milliard.Question'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='milliard.Question'),
),
]
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.pyg.pyg_ofima_beam as pyg_ofima_beam
import dataflow_pipeline.pyg.pyg_PUC_beam as pyg_PUC_beam
import dataflow_pipeline.pyg.pyg_CeCo_beam as pyg_CeCo_beam
import dataflow_pipeline.pyg.pyg_ajustes_beam as pyg_ajustes_beam
import dataflow_pipeline.pyg.pyg_puestos_beam as pyg_puestos_beam
import dataflow_pipeline.pyg.pyg_agentes_beam as pyg_agentes_beam
import os
import socket
pyg_api = Blueprint('pyg_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@pyg_api.route("/pyg_ofima")
def pyg_ofima():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Ofima/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[6:10]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('ofima/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.ofima` WHERE fecha = '" + mifecha + "' AND FUENTE = 'OFIMA'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_ofima_beam.run('gs://ct-pyg/ofima/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Ofima/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
##################################################################################################################################
# ____ _ _ ___ #
# / _ \ | | | | / __ \ #
# | |_| | | | | | / / \_| #
# | ___/ | | | | | | __ #
# | | \ \__/ / | \__/ / #
# |_| \____/ \____/ #
# #
##################################################################################################################################
@pyg_api.route("/pyg_PUC")
def pyg_PUC():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/PUC/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[4:8]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('PUC/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.PUC` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_PUC_beam.run('gs://ct-pyg/PUC/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/PUC/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
##################################################################################################################################
# ___ ____ ___ ___ #
# / __ \ | __| / __ \ / __ \ #
# / / \_| | |__ / / \_| / / \ \ #
# | | __ | __| | | __ | | | | #
# | \__/ / | |__ | \__/ / | \__/ / #
# \____/ |____| \____/ \____/ #
# #
##################################################################################################################################
@pyg_api.route("/pyg_CeCo")
def pyg_CeCo():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/CeCo/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[5:9]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('CeCo/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.CeCo` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_CeCo_beam.run('gs://ct-pyg/CeCo/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/CeCo/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
##################################################################################################################################
@pyg_api.route("/ajustes")
def pyg_ajustes():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Ajustes/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[8:12]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('ajustes/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.ofima` WHERE fecha = '" + mifecha + "' AND FUENTE = 'AJUSTES'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_ajustes_beam.run('gs://ct-pyg/ajustes/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Ajustes/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
##################################################################################################################################
@pyg_api.route("/puestos")
def pyg_puestos():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Puestos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[8:12]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('puestos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.puestos` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_puestos_beam.run('gs://ct-pyg/puestos/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Puestos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
##################################################################################################################################
@pyg_api.route("/agentes")
def pyg_agentes():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Agentes/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[8:12]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-pyg')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('agentes/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.PyG.agentes` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pyg_agentes_beam.run('gs://ct-pyg/agentes/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/PYG/Agentes/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
|
from django.contrib import admin
from events.models import Events,Profile,CampusRepresantative
# Register your models here.
class EventAdmin(admin.ModelAdmin):
model = Events
list_display = ('EventName' ,'EventDescription','eventCost')
class ProfileAdmin(admin.ModelAdmin):
model = Profile
list_display = ('Name','PhoneNo','Institute_Uni')
class CampusRepresentativeAdmmin(admin.ModelAdmin):
model = CampusRepresantative
list_display = ('Name','Phone','email','Institute')
admin.site.register(Events,EventAdmin)
admin.site.register(Profile,ProfileAdmin)
admin.site.register(CampusRepresantative,CampusRepresentativeAdmmin)
|
from sys import version_info
import numpy as np
print "enter the nuber of columns for first matrix"
#m = input('enter the value for m:')
#n = input('enter the value for n:')
a=np.matrix([[1,2,3],[3,4,5],[5,2,1]])
b=np.matrix([[1,1,3],[6,4,5],[5,2,99]])
#matrix = m*n
i=0
j=0
#a=10,10
for i in range(len(a)):
for j in range(len(a)):
print np.matrix[i][j]
# print np.matrix[i][j]
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dataset=pd.read_csv('Position_Salaries.csv')
X=dataset.iloc[:,1:2].values
Y=dataset.iloc[:,2].values
from sklearn.preprocessing import PolynomialFeatures
pl_reg=PolynomialFeatures(degree=4)
X_poly=pl_reg.fit_transform(X)
from sklearn.linear_model import LinearRegression
lin_reg=LinearRegression()
lin_reg.fit(X_poly,Y)
plt.scatter(X, Y,color='red')
plt.plot(X,lin_reg.predict(X_poly))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.