text
stringlengths 8
6.05M
|
|---|
# Title:
# Filename: .py
# Usage:
# Description:
# Author: Nevan Lowe
# Version: 1.1
# Python Revision: 3.*
# IPython Revision: n/a
# TO DO:
#
#---------------------------------------------------------
import random
from datetime import datetime
startTime = datetime.now()
# creates counters to track wins and losses.
wincount = 0
losecount = 0
cycles = int(input('How many tests do you want to run? '))
for x in range(cycles):
# sets the game to play one hundred million times
contents = [1, 0, 0]
# sets the doors and their contents. 1 = car, 0 = donkey
random.shuffle(contents)
# places the car inside a random door
pick = random.randint(0, 2)
# you pick a random door
contents.insert(3, contents.pop(pick))
pick = 1
# moves your door to the end (this is to help make 'revealing' an incorrect door easier). This is done by removing the door you picked and inserting it at the end
if contents[2] == 0:
# checks if there is a donkey behind your door
contents.remove(0)
# if there is, 'reveal' the only other door with a donkey. This is done by checking for the first door with a donkey behind it and deleting it
else:
contents.pop(0)
# if there isn't, 'reveal' one of the donkeys. This is done by deleting the first item in the list. Because your pick is the car and it's been moved to the end of
# the list, the first item in the list is guaranteed to be a donkey
pick -= 1
# this is effectively switching
if contents[pick] == 1:
# this is where the car is revealed
wincount += 1
# if it is under your door, I add 1 to the win counter
else:
losecount += 1
# if the car is not under your door, I add 1 to the loss counter
summary = 'Over the past {} games, switching won you a car {} times, while if you stuck with your original choice you would have won {} times, & switching caused you to win {}% of the time.'
percentage = wincount/(cycles/100)
print(cycles, wincount, losecount)
#cycles, wincount, losecount = [str(x) for x in (cycles, wincount, losecount)]
print(cycles, wincount, losecount)
print('Over the past '+str(cycles)+' games, switching won you a car '+str(wincount)+' times, while if you stuck with your original choice you would have won '+str(losecount)+' times, & switching caused you to win '+str(wincount/(cycles/100))+'% of the time.')
# this is the end results of a 100 million Monty Hall Problems
print(datetime.now() - startTime)
# says how long the program took to run. This is done by comparing the time the program started to the current time
# import random
# from datetime import datetime
#
#
# # set a start time to evaluate the efficiency of the script
# startTime = datetime.now()
#
# # set counters to monitor the number of cycles and wins/losses
#
# wincount = 0
# losecount = 0
# wincount2 = 0
# losecount2 = 0
#
# for x in range(100):
# # sets the doors and their contents. 1 = car, 0 = donkey
# doors = [1, 2, 3]
# contents = [1, 0, 0]
#
# # places the car inside a random door
# random.shuffle(contents)
#
# # you pick a random door
# pick = random.randint(0, 2)
#
# # moves your door to the end (this is to help make 'revealing' an
# # incorrect door easier). This is done by removing the door you picked and
# # inserting it at the end
# doors.insert(3, doors.pop(pick))
# contents.insert(3, contents.pop(pick))
# pick = 1
#
#
# # checks if there is a donkey behind your door
# if contents[2] == 0:
# # if there is, 'reveal' the only other door with a donkey. This is done
# # by checking for the first door with a donkey behind it and deleting it
#
# doors.pop(contents.index(0))
# contents.remove(0)
# else:
# # if there isn't, 'reveal' one of the donkeys. This is done by deleting
# # the first item in the list. Because your pick is the car and it's been
# # moved to the end of the list, the first item in the list is guaranteed
# # to be a donkey
# doors.pop(0)
# contents.pop(0)
# # this is effectively switching
#
# pick -= 1
# if contents[pick] == 1:
# # this is where the car is revealed
# wincount += 1
# wincount2 += 1
# # if it is under your door, I add 1 to both wincounters
# else:
# losecount += 1
# losecount2 += 1
# # if the car is not under your door, I add 1 to both loss counters
# if wincount2 + losecount2 == 3000000:
# print('Over the past 3 million games, switching won you a car '+str(wincount2)+' times, while you won an ostrich '+str(losecount2)+' times, & switching caused you to win '+str(wincount2/30000)+'% of the time.')
# wincount2 = 0
# losecount2 = 0
# # this segment is just showing the results of 3 million games, rather than
# # waiting for the entire game to end. This is done by adding both of the
# # second counters
# # together and checking if the sum = 3000000. Then both of the second
# # counters are reset.
#
# print('Over the past 100 million games, switching won you a car '+str(wincount)+' times, while you won an ostrich '+str(losecount)+' times, & switching caused you to win '+str(wincount/1000000)+'% of the time.')
# # this is the end results of a 100 million Monty Hall Problems
#
# print(datetime.now() - startTime)
# # says how long the program took to run. This is done by comparing the time the
# # program started to the current time
#
|
class error_handler:
"""
manages non-fatal program errors. Errors must be added using errors.add()
Argument should be a string containing the error message.
"""
def __init__(self):
pass
error_cache = ""
def add(self, error):
self.error_cache += "\n"
self.error_cache += error
def toString(self):
if self.isEmpty():
return("No errors reported")
return self.error_cache
def isEmpty(self):
if self.error_cache == "":
return True
else:
return False
|
import random as rn
num=rn.randint(1,6)
count=0
while True:
inp_num=int(input("enter the number: "))
if inp_num == num:
count += 1
print(f"you guessed no {count} attempts" )
break
elif inp_num < num:
print("guessed no is less than actual no")
count+=1
else:
print("guessed no is less than actual no")
count+=1
if count==3:
print("better luck next time")
break
|
from django.contrib import admin
# Register your models here.
from archives.models import ClassVideo, ClassNote, GroupLink
admin.site.register(ClassVideo)
admin.site.register(ClassNote)
admin.site.register(GroupLink)
|
from flask import Flask, render_template
from flask_mail import Mail, Message
from flaskext.mysql import MySQL
from queries import get_group_email_ids
app =Flask(__name__)
mail=Mail(app)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'idea.management.system4@gmail.com'
app.config['MAIL_PASSWORD'] = 'seproject'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'seproject'
app.config['MYSQL_DATABASE_DB'] = 'IMS_DB'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
@app.route("/email_welcome/<user_name>/<email_id>")
def email_welcome(user_name,email_id):
msg1 = Message('Welcome to IMS', sender = 'idea.management.system4@gmail.com', recipients = [email_id])
msg1.html=(render_template('welcome.html',sending_mail=True,name= user_name))
mail.send(msg1)
return "sent"
@app.route("/email_post_problem/<g_id>/<g_name>")
def email_post_problem(g_id,g_name):
list_emails = get_group_email_ids(cursor,g_id)
msg1 = Message('IMS PROBLEM STATEMENT', sender = 'idea.management.system4@gmail.com', recipients = list_emails)
msg1.html=(render_template('post_problem.html',group_name=g_name,sending_mail=True))
mail.send(msg1)
print(list_emails)
return "sent" + ' '.join(list_emails)
@app.route("/email_event/<g_id>/<g_name>/<e_date>")
def email_event(g_id,g_name,e_date):
list_emails = get_group_email_ids(cursor,g_id)
msg1 = Message('IMS EVENT INVITE', sender = 'idea.management.system4@gmail.com', recipients = list_emails)
msg1.html=(render_template('event_create.html',sending_mail=True,group_name=g_name,date=e_date))
print(list_emails)
mail.send(msg1)
return "sent" + ' '.join(list_emails)
@app.route("/email_group/<g_id>/<g_name>")
def email_group(g_id,g_name):
list_emails = get_group_email_ids(cursor,g_id)
msg1 = Message('IMS GROUP INVITE', sender = 'idea.management.system4@gmail.com', recipients = list_emails)
msg1.html=(render_template('group_create.html',group_name=g_name,sending_mail=True))
mail.send(msg1)
return "sent" + ' '.join(list_emails)
@app.route("/add_user/<g_name>/<email_id>")
def add_user(g_id,g_name,email_id):
msg1 = Message('IMS GROUP INVITE', sender = 'idea.management.system4@gmail.com', recipients = [email_id])
msg1.html=(render_template('group_create.html',group_name=g_name,sending_mail=True))
mail.send(msg1)
return "sent" + email_id
if __name__ == '__main__':
app.run(debug = True,port=5002)
|
# -*- coding: utf-8 -*-
class ObjectView(object):
def __init__(self,d):
self.__dict__ = d
|
a = input().split()
if a[0] == a[3]:
print("F")
else:
print("V")
|
from azure.storage.blob import BlockBlobService
import pandas as pd
import time
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import numpy as np
import plotly
import plotly.graph_objs as go
from plotly.offline import plot
import random
from dash.dependencies import Input, Output
from app import app
#main concept of this is 2 different list no zip
input_blob = 'topwordsfortopics.csv'
df = pd.read_csv(input_blob)
def Average(lst):
return sum(lst) / len(lst)
aspect_number = 1
#find dataframe rows how many
#print(len(df.index))
#1.access dataframe--------------------------------
dataframerows = len(df)
aspect_num1_string = df.at[0, 'top_words']
#2.put string and slice to list---------------------
aspect_num1_list = (aspect_num1_string.split(':'))
#3.same thing aspect num 1 word_impt slicing to list
aspect_num1_wordimpt = df.at[aspect_number-1, 'word_impt']
aspect_num1_wordimptlist = aspect_num1_wordimpt.split(':')
#print(aspect_num1_wordimptlist)
#4.using list comprehension to perform conversion
#have to float convert
#if round to int lose too much info
#list comprehension just like for loop but better performance
aspect_num1_wordimptlist = [float(i) for i in aspect_num1_wordimptlist]
#5.make word bigger
aspect_num1_wordimptlist_scaled = [i*0.45 for i in aspect_num1_wordimptlist]
#6.find average
aspect_num1_wordimptlist_average = Average(aspect_num1_wordimptlist_scaled)
#7.
#iterate and check if > average
#make word bigger or smaller based on the relative value compared to average
#access word weightage for element 0
#aspect_num1_wordimptlist[0]
#have to access weight list in sync with word list iterate together
#word and weightage lists will run in conjunction
aspect_num1_wordimptlist_scaled = [i * 1.3 if i > aspect_num1_wordimptlist_average else i*0.3 for i in aspect_num1_wordimptlist_scaled]
#8.scale word size up
scaled_words_final = []
#aspect_num1_list is words
#aspect_num1_wordimptlist_scaled is weightage but scaled
for weightage in aspect_num1_wordimptlist_scaled:
if weightage > aspect_num1_wordimptlist_average:
scaled_words_final.append(1.4 * weightage*2) #this is actually just appending weightage to the scaled_wrods_final list
#scale the words based on preset float * weightage
elif weightage < aspect_num1_wordimptlist_average:
scaled_words_final.append(0.8 * weightage* 6)
#print('shawn word')
#print(scaled_words_final)
#dash scattter graph
#split needs to be string
#print(aspect_num1_list[0][0])
#9.
#plotly color scheme you can change
colors = [px.colors.sequential.Viridis[random.randrange(1,10)] for i in range(30)]
weights = scaled_words_final
#10.
#this has to be here because 'trace' word will pop up for every mouseover on every word. 'Weightage' under
#mystring var is to replace it
#remove trace text
mystring = '<extra>Weightage</extra>'
aspect_num1_wordimptlist_str = [str(s) + mystring for s in aspect_num1_wordimptlist]
#11.
#this is here because initially 2 yaxis is requested trace0 and trace1 on right chart legacy code so i just used this way
#for chart2
trace0 = go.Bar(
name = 'Weight',
x = aspect_num1_list,
y = aspect_num1_wordimptlist,
width=0.5,
marker={'color': '#80e5ff'}
)
datas = [trace0]
variable_indicators = df["aspect_num"]
#12.
#layout.purely for display.
#html.H1 is dash built in way of display h1 html.
#html div styling make sure add it after , not )
layout = html.Div(children=[
html.H1(children='Word Cloud'),
html.Div([
html.Div([
dcc.Dropdown(
id="variable_choice",
options=[{"label": i, "value": i} for i in variable_indicators],value=1
)
], style={"width": "50%", "display" : "inline-block"}),
]),
#put graph in div encapsulation.Then can style.
#left chart
html.Div([
dcc.Graph(
id='scatter_chart',figure = {
'data' : [
go.Scatter(
x = [4,8,8,7,8,5,4,5.5,9,10,8,6,4,5,5,3,2,2,2,2],
y = [3,2,3.5,5.5,7,9,7,8,9,10,11,11,11,10,10.5,10,8,6,5,4],
mode = 'text',text = aspect_num1_list, textfont={'size': weights,'color' : colors, 'family' : 'Georgia'}, marker ={'opacity':0.01},
hovertemplate = aspect_num1_wordimptlist_str
)],
'layout' : go.Layout(
title = 'Word Cloud',
xaxis = {'showgrid':False, 'showticklabels':False, 'zeroline': False},
yaxis = {'showgrid':False, 'showticklabels':False, 'zeroline': False},
hovermode = 'closest', paper_bgcolor = "rgb(0,0,0,0)",
plot_bgcolor = "rgb(0,0,0,0)", #transparent bg
font_color = "#CECECE"
)
}, style={'display': 'inline-block', 'width' : '50%'}
),
#right chart
dcc.Graph(
id='weightage_chart',figure = {
'data' : datas,
'layout' : go.Layout(
title = 'Word Frequency and Weightage',
showlegend = False,
xaxis = {'title': '','showline': True},
yaxis = {'title': 'Word Count','side': 'left','showline': True}, paper_bgcolor = "rgb(0,0,0,0)",
plot_bgcolor = "rgb(0,0,0,0)", #transparent bg
font_color = "#CECECE"
)
}, style={'display': 'inline-block', 'width' : '50%'}
)
])
])
#wordcloud update
#dash built in callback for no refresh and instant response
#output to scatter chart
#but below callback update_graph_1 is standard format to override the data in the current chart
@app.callback(
Output("scatter_chart", "figure"),
[Input("variable_choice", "value")])
def update_graph_1(variable_choice):
#fixed x error on dropdown
if variable_choice == None:
variable_choice = 1
#df create new copy as good practice
dff = df[df["aspect_num"]==variable_choice]
#update graph vars when click
#variable_choice = 1 for testing since no default var
aspect_number = variable_choice
#print(aspect_number)
aspect_num1_string = df.at[aspect_number-1, 'top_words']
#split the string from df and put inside first list of nested list
aspect_num1_list = (aspect_num1_string.split(':'))
aspect_num1_wordimpt = df.at[aspect_number-1, 'word_impt']
aspect_num1_wordimptlist = aspect_num1_wordimpt.split(':')
#remove trace text
mystring = '<extra>Weightage</extra>'
aspect_num1_wordimptlist_str = [str(s) + mystring for s in aspect_num1_wordimptlist]
#aspect_num = variable_choice
return {
'data' : [
go.Scatter(
x = [4,8,8,7,8,5,4,5.5,9,10,8,6,4,5,5,3,2,2,2,2],
y = [3,2,3.5,5.5,7,9,7,8,9,10,11,11,11,10,10.5,10,8,6,5,4],
mode = 'text',text = aspect_num1_list, textfont={'size': weights,'color' : colors}, marker ={'opacity':0.01},
hovertemplate = aspect_num1_wordimptlist_str
)],
'layout' : go.Layout(
title = "Topic " + str(variable_choice),
xaxis = {'showgrid':False, 'showticklabels':False, 'zeroline': False, 'rangemode' : 'tozero'},
yaxis = {'showgrid':False, 'showticklabels':False, 'zeroline': False, 'rangemode' : 'tozero'},
hovermode = 'closest',paper_bgcolor = "rgb(0,0,0,0)",
plot_bgcolor = "rgb(0,0,0,0)", #transparent bg
font_color = "#CECECE"
)
}
#word freq chart update
@app.callback(
Output("weightage_chart", "figure"),
[Input("variable_choice", "value")])
def update_graph_2(variable_choice):
#fixed x error on dropdown
if variable_choice == None:
variable_choice = 1
#print(variable_choice)
#df create new copy
dff = df[df["aspect_num"]==variable_choice]
#update graph vars when click
#variable_choice = 1 for testing since no default var
aspect_number = variable_choice
#rint(aspect_number)
aspect_num1_string = df.at[aspect_number-1, 'top_words']
#split the string from df and put inside first list of nested list
aspect_num1_list = (aspect_num1_string.split(':'))
aspect_num1_wordimpt = df.at[aspect_number-1, 'word_impt']
aspect_num1_wordimptlist = aspect_num1_wordimpt.split(':')
#for chart2
trace0 = go.Bar(
name = 'Weight',
x = aspect_num1_list,
y = aspect_num1_wordimptlist,
width=0.5,
marker={'color': '#80e5ff'}
)
datas = [trace0]
#remove trace text
mystring = '<extra>Weightage</extra>'
aspect_num1_wordimptlist_str = [str(s) + mystring for s in aspect_num1_wordimptlist]
#aspect_num = variable_choice
return {
'data' : datas,
'layout' : go.Layout(
title = 'Word Weightage',
showlegend = False,
xaxis = {'title': '','showline': False, 'showgrid':False, 'zeroline':False},
yaxis = {'title': 'Word Weightage','side': 'left','showline': False,'showgrid': False,'zeroline' : False}, plot_bgcolor = "rgb(0,0,0,0)", #transparent bg
paper_bgcolor = "rgb(0,0,0,0)",
font_color = "#CECECE"
)
}
|
import json
import textwrap
from typing import Iterable, Mapping, Sequence
from ai.backend.client.output.fields import scaling_group_fields
from ai.backend.client.output.types import FieldSpec
from .base import api_function, BaseFunction
from ..request import Request
from ..session import api_session
__all__ = (
'ScalingGroup',
)
_default_list_fields = (
scaling_group_fields['name'],
scaling_group_fields['description'],
scaling_group_fields['is_active'],
scaling_group_fields['created_at'],
scaling_group_fields['driver'],
scaling_group_fields['scheduler'],
)
_default_detail_fields = (
scaling_group_fields['name'],
scaling_group_fields['description'],
scaling_group_fields['is_active'],
scaling_group_fields['created_at'],
scaling_group_fields['driver'],
scaling_group_fields['driver_opts'],
scaling_group_fields['scheduler'],
scaling_group_fields['scheduler_opts'],
)
class ScalingGroup(BaseFunction):
"""
Provides getting scaling-group information required for the current user.
The scaling-group is an opaque server-side configuration which splits the whole
cluster into several partitions, so that server administrators can apply different auto-scaling
policies and operation standards to each partition of agent sets.
"""
def __init__(self, name: str):
self.name = name
@api_function
@classmethod
async def list_available(cls, group: str):
"""
List available scaling groups for the current user,
considering the user, the user's domain, and the designated user group.
"""
rqst = Request(
'GET', '/scaling-groups',
params={'group': group},
)
async with rqst.fetch() as resp:
data = await resp.json()
print(data)
return data['scaling_groups']
@api_function
@classmethod
async def list(
cls,
fields: Sequence[FieldSpec] = _default_list_fields,
) -> Sequence[dict]:
"""
List available scaling groups for the current user,
considering the user, the user's domain, and the designated user group.
"""
query = textwrap.dedent("""\
query($is_active: Boolean) {
scaling_groups(is_active: $is_active) {
$fields
}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {'is_active': None}
data = await api_session.get().Admin._query(query, variables)
return data['scaling_groups']
@api_function
@classmethod
async def detail(
cls,
name: str,
fields: Sequence[FieldSpec] = _default_detail_fields,
) -> dict:
"""
Fetch information of a scaling group by name.
:param name: Name of the scaling group.
:param fields: Additional per-scaling-group query fields.
"""
query = textwrap.dedent("""\
query($name: String) {
scaling_group(name: $name) {$fields}
}
""")
query = query.replace('$fields', ' '.join(f.field_ref for f in fields))
variables = {'name': name}
data = await api_session.get().Admin._query(query, variables)
return data['scaling_group']
@api_function
@classmethod
async def create(cls, name: str, description: str = '', is_active: bool = True,
driver: str = None, driver_opts: Mapping[str, str] = None,
scheduler: str = None, scheduler_opts: Mapping[str, str] = None,
fields: Iterable[str] = None) -> dict:
"""
Creates a new scaling group with the given options.
"""
if fields is None:
fields = ('name',)
query = textwrap.dedent("""\
mutation($name: String!, $input: CreateScalingGroupInput!) {
create_scaling_group(name: $name, props: $input) {
ok msg scaling_group {$fields}
}
}
""")
query = query.replace('$fields', ' '.join(fields))
variables = {
'name': name,
'input': {
'description': description,
'is_active': is_active,
'driver': driver,
'driver_opts': json.dumps(driver_opts),
'scheduler': scheduler,
'scheduler_opts': json.dumps(scheduler_opts),
},
}
data = await api_session.get().Admin._query(query, variables)
return data['create_scaling_group']
@api_function
@classmethod
async def update(cls, name: str, description: str = '', is_active: bool = True,
driver: str = None, driver_opts: Mapping[str, str] = None,
scheduler: str = None, scheduler_opts: Mapping[str, str] = None,
fields: Iterable[str] = None) -> dict:
"""
Update existing scaling group.
"""
if fields is None:
fields = ('name',)
query = textwrap.dedent("""\
mutation($name: String!, $input: ModifyScalingGroupInput!) {
modify_scaling_group(name: $name, props: $input) {
ok msg
}
}
""")
query = query.replace('$fields', ' '.join(fields))
variables = {
'name': name,
'input': {
'description': description,
'is_active': is_active,
'driver': driver,
'driver_opts': None if driver_opts is None else json.dumps(driver_opts),
'scheduler': scheduler,
'scheduler_opts': None if scheduler_opts is None else json.dumps(scheduler_opts),
},
}
data = await api_session.get().Admin._query(query, variables)
return data['modify_scaling_group']
@api_function
@classmethod
async def delete(cls, name: str):
"""
Deletes an existing scaling group.
"""
query = textwrap.dedent("""\
mutation($name: String!) {
delete_scaling_group(name: $name) {
ok msg
}
}
""")
variables = {'name': name}
data = await api_session.get().Admin._query(query, variables)
return data['delete_scaling_group']
@api_function
@classmethod
async def associate_domain(cls, scaling_group: str, domain: str):
"""
Associate scaling_group with domain.
:param scaling_group: The name of a scaling group.
:param domain: The name of a domain.
"""
query = textwrap.dedent("""\
mutation($scaling_group: String!, $domain: String!) {
associate_scaling_group_with_domain(
scaling_group: $scaling_group, domain: $domain) {
ok msg
}
}
""")
variables = {'scaling_group': scaling_group, 'domain': domain}
data = await api_session.get().Admin._query(query, variables)
return data['associate_scaling_group_with_domain']
@api_function
@classmethod
async def dissociate_domain(cls, scaling_group: str, domain: str):
"""
Dissociate scaling_group from domain.
:param scaling_group: The name of a scaling group.
:param domain: The name of a domain.
"""
query = textwrap.dedent("""\
mutation($scaling_group: String!, $domain: String!) {
disassociate_scaling_group_with_domain(
scaling_group: $scaling_group, domain: $domain) {
ok msg
}
}
""")
variables = {'scaling_group': scaling_group, 'domain': domain}
data = await api_session.get().Admin._query(query, variables)
return data['disassociate_scaling_group_with_domain']
@api_function
@classmethod
async def dissociate_all_domain(cls, domain: str):
"""
Dissociate all scaling_groups from domain.
:param domain: The name of a domain.
"""
query = textwrap.dedent("""\
mutation($domain: String!) {
disassociate_all_scaling_groups_with_domain(domain: $domain) {
ok msg
}
}
""")
variables = {'domain': domain}
data = await api_session.get().Admin._query(query, variables)
return data['disassociate_all_scaling_groups_with_domain']
@api_function
@classmethod
async def associate_group(cls, scaling_group: str, group_id: str):
"""
Associate scaling_group with group.
:param scaling_group: The name of a scaling group.
:param group_id: The ID of a group.
"""
query = textwrap.dedent("""\
mutation($scaling_group: String!, $user_group: UUID!) {
associate_scaling_group_with_user_group(
scaling_group: $scaling_group, user_group: $user_group) {
ok msg
}
}
""")
variables = {'scaling_group': scaling_group, 'user_group': group_id}
data = await api_session.get().Admin._query(query, variables)
return data['associate_scaling_group_with_user_group']
@api_function
@classmethod
async def dissociate_group(cls, scaling_group: str, group_id: str):
"""
Dissociate scaling_group from group.
:param scaling_group: The name of a scaling group.
:param group_id: The ID of a group.
"""
query = textwrap.dedent("""\
mutation($scaling_group: String!, $user_group: String!) {
disassociate_scaling_group_with_user_group(
scaling_group: $scaling_group, user_group: $user_group) {
ok msg
}
}
""")
variables = {'scaling_group': scaling_group, 'user_group': group_id}
data = await api_session.get().Admin._query(query, variables)
return data['disassociate_scaling_group_with_user_group']
@api_function
@classmethod
async def dissociate_all_group(cls, group_id: str):
"""
Dissociate all scaling_groups from group.
:param group_id: The ID of a group.
"""
query = textwrap.dedent("""\
mutation($group_id: UUID!) {
disassociate_all_scaling_groups_with_group(user_group: $group_id) {
ok msg
}
}
""")
variables = {'group_id': group_id}
data = await api_session.get().Admin._query(query, variables)
return data['disassociate_all_scaling_groups_with_group']
|
import os
import pickle # 파이썬 자료형의 데이터를 파일에 r/w 하게 해주는 모듈.
import re
import requests
from bs4 import BeautifulSoup
from .data import Episode, Webtoon, WebtoonNotExist
class Crawler:
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
SAVE_PATH = os.path.join(ROOT_PATH, 'saved_data')
def __init__(self):
self._webtoon_dict = {}
os.makedirs(self.SAVE_PATH, exist_ok=True) # exist_ok=False: 폴더가 이미 존재하면 OSError 발생.
def get_html(self):
"""
웹툰 전체 목록의 HTML을 리턴한다.
파일로 저장되어 있다면 파일을 읽어온다. 위치는 ./saved_data/weekday.html
파일로 저장되어있지 않다면 웹에서 요청해서 받아온다.
:return: 문자열 형태의 HTML 내용.
"""
root = os.path.dirname(os.path.abspath(__file__))
dir_path = os.path.join(root, 'saved_data')
file_path = os.path.join(dir_path, 'weekday.html')
if os.path.exists(file_path):
html = open(file_path, 'rt').read()
else:
os.makedirs(dir_path, exist_ok=True)
response = requests.get('https://comic.naver.com/webtoon/weekday.nhn')
html = response.text
open(file_path, 'wt').write(html)
return html
@property
def webtoon_dict(self):
"""
_webtoon_dict에 있는 모든 웹툰 리스트를 리턴한다.
html을 읽어와서 요일별 웹툰들을 파싱하고, 파싱한 웹툰제목이 _webtoon_dict에 없으면 Webtoon클래스의 객체를 새로 추가한다.
:return: _webtoon_dict
"""
if not self._webtoon_dict:
html = self.get_html()
soup = BeautifulSoup(html, 'lxml') # lxml파서는 디폴트로 제공하는 html.parser 보다 속도가 빠르다.
day_columns = soup.select_one('div.list_area.daily_all').select('.col') # select(): css문법으로 태그를 뽑아준다.
element_list = []
for col in day_columns:
day_element = col.select('.col_inner ul > li')
element_list.extend(day_element)
for element in element_list:
# select_one(): 셀렉된 태그들 중 첫번째 태그 리턴. html 클래스는 대괄호로 접근.
href = element.select_one('a.title')['href']
# re.search(): href 문자열에서 정규식에 매칭되는 문자열을 리턴. 예) titleId=1234
matched_querystring = re.search(r'titleId=(\d+)', href)
# group(): 정규식에 들어맞는 문자열을 제외한 값만 리턴. 예) 1234
webtoon_id = matched_querystring.group()
title = element.select_one('a.title').get_text(strip=True) # 문자열 양 끝에 whitespace 제거.
url_thumbnail = element.select_one('.thumb > a > img')['src']
if title not in self._webtoon_dict:
new_webtoon = Webtoon(webtoon_id, title, url_thumbnail)
self._webtoon_dict[title] = new_webtoon
return self._webtoon_dict
def webtoon_list(self):
"""
웹툰 전체 목록을 웹툰 제목으로 출력한다.
"""
# 딕셔너리의 키, 밸류를 순회할 때는 dict.items()
for title, webtoon in self.webtoon_dict.items():
print(title)
def get_webtoon(self, title):
"""
title이 제목인 Webtoon객체를 가져옴
:param title:
:return: Webtoon
"""
try:
return self.webtoon_dict[title]
except KeyError:
raise WebtoonNotExist(title)
def save(self):
"""
_webtoon_dict을 saved_data/crawler.pickle 에 기록한다.
"""
# pickle에서 파이썬 자료형의 데이터를 r/w할 때는 바이트단위로 읽어온다.
with open(os.path.join(self.SAVE_PATH, 'crawler.pickle'), 'wb') as f:
pickle.dump(self._webtoon_dict, f)
def load(self):
"""
saved_data/crawler.pickle 파일을 읽어들여서 _webtoon_dict 을 업데이트한다.
"""
with open(os.path.join(self.SAVE_PATH, 'crawler.pickle'), 'rb') as f:
self._webtoon_dict = pickle.load(f)
|
import config
import SocketServer, socket
import threading, time, struct
import logging
class VariableWatch_c(object):
#Class to keep track of variables that are being watched
def __init__(self, v):
self.var = v
self.addr = v.addr
self.change_count = -1 #This makes it so variable will be sent first time.
def check_change(self):
if self.change_count != self.var.change_count:
self.change_count = self.var.change_count
return True
else:
return False
class VariableWatch_list(object):
def __init__(self, variables):
self.list = []
self.addr_list = []
self.variables = variables #Reference to variable module
def update_addr_list(self):
self.addr_list = []
for v in self.list:
self.addr_list.append(v.addr)
def add(self, addr):
if addr in self.addr_list:
logging.info("GlassController: Address %04X allready in watch list. Not Added", addr)
else:
v = self.variables.get(addr)
if v!=None:
self.list.append(VariableWatch_c(v))
self.update_addr_list()
logging.debug("GlassController: Added Variable %04X to watch list.", addr)
def check(self):
change_list = []
for v in self.list:
if v.check_change():
change_list.append(v)
return change_list
class GlassConnection(object):
#This keeps track of all the data in the glass connection.
#Variables that need to be watched ....
#This does all the communicating with the aircraft module
def __init__(self, variables):
#self.aircraft = aircraft
self.response = None
self.variables = variables #Reference to variable module
#self.events = events
#List to watch variables
self.var_watch = VariableWatch_list(variables) #List of variables watched
self.name = "Undefined"
def getsend(self): #Checks and returns any data that needs to be sent.
response = []
#Check for varible changes to send
var_change = self.var_watch.check()
if len(var_change)>0:
r = ''
desc = 'Var Change:'
for v in var_change:
r+= struct.pack("H", v.addr) + struct.pack(v.var.pack_format, v.var.data.value)
s_value = str(v.var.data.value)
format = "%" + v.var.format_s
value = format %v.var.data.value
desc+= '0x%04X' %v.addr + '=' + value + ','
response.append(['VD',r, desc]) #Response is command_id, then data
return response
def AddVariable(self, addr): #This adds a variable to watch
self.var_watch.add(addr)
def SetVariable(self, addr, value):
#Set the value of the variable
self.variables.set(addr,value)
def SetName(self, name):
#Set the name of the connection. (i.e. Co-Pilot PFD, RTU, etc.)
self.name = name
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
#def __init__(self, request, client_address, server):
# print "MICHAEL", request, client_address, server
# SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
# return
def log_comm(self, dir, command_byte, command_data, desc):
#Keeps log of last 50 transmissions in both direction for each connection.
command_data_s = ""
for byte in command_data:
command_data_s += "%02X " %ord(byte)
time_stamp = "%7.2f" %(time.time() - self.start_time)
self.log.insert(0,[dir,time_stamp,command_byte,command_data_s, desc, ])
if len(self.log) > 50:
self.log.pop()
#def __init__(self, aircraft):
# self.aircraft = aircraft
# print aircraft
def setup(self):
self.connection = GlassConnection(self.server.variables)
self.TX_bytes = 0
self.RX_bytes = 0
self.start_time = time.time()
self.log = []
connections.add(self)
self.commands = ["AV","SV","SE","SN","PI"]
logging.info("GlassController: Connected to client %r", self.client_address)
#self.request.send('hi ' + str(self.client_address) + '\n')
#self.socket.settimeout(10)
self.request.setblocking(0)
self.variables = self.connection.variables
#self.events = self.connection.events
#Time of last transmission of data.
# -- Used so data is always sent to client, to make sure client hasn't disconnected.
self.time_lastTX = 0
def process_data(self, command_byte, command_data):
logging.debug("GlassController: Process Data Byte = %r Data = %r", command_byte, command_data)
data_len = len(command_data)
desc = "UNKNOWN"
if command_byte == "AV":
desc = "Add Variables to Watch "
for i in range(0,data_len, 2): #Decode addr every 2 char.
addr = struct.unpack("H",command_data[i:i+2])[0]
self.connection.AddVariable(addr)
desc += " %04X" %addr
elif command_byte == "SN":
self.connection.SetName(command_data)
desc = "Set Connection Name to %s" %command_data
elif command_byte == "SV":
i=0
while i<data_len: #Loop through till data runs out.
#Get addr
addr = struct.unpack("H",command_data[i:i+2])[0]
i+=2
#Determine size of data value.
#Make sure it exists.
if self.variables.exists(addr):
#Get variable
var = self.variables.dict[addr]
#Determine size
size = var.pack_size
value_s = command_data[i:i+size]
i+= size
#Try to write to it, if var is not writable will get error.
#print addr, "%r" %value_s
v = struct.unpack(var.pack_format,value_s)[0]
#print "V= ",v, var.pack_format
var.setvalue(v)
#self.connection.SetVariable(addr,value_s)
format = "%" + var.format_s
value = format %v
desc = "Set Variable 0x%04X %s to " %(addr,var.name) + value
elif command_byte == "SE": #Send Event
#Events don't repeat.. so certian events can be passed through server,
#with server not needing to know of them.
#If events repeat then server needs to know size of data of each event,
#for it to parse it correctly.
addr = struct.unpack("H", command_data[:2])[0]
data = command_data[2:] #Data is all data after addr.
#Send addr, and data to event obj on server.
#self.events.process(addr,data)
#To see if server is aware of event and needs to process it.
#Automatically forward event to all clients.
#*** STILL NEED TO DO ****
self.log_comm('TX', command_byte, command_data, desc)
def parse_data(self):
#Go through data and find command and end codes to parse recv buffer
data = self.recv_buffer
buffer_length = len(data)
if buffer_length>0:
go=True
else:
go=False
while go:
go=False #If data is found then go will be set to true
#1st two byte command code
command_id = data[:2]
if command_id in self.commands: #Check to make sure it is command code
#Now get length two bytes
length = struct.unpack("H", data[2:4])[0]
#Check if buffer is long enough for all data.
if buffer_length >= length + 4:
#Parse data
command_data = data[4:length+4]
logging.debug("GlassController: Command %r %r", command_id, command_data)
self.process_data(command_id, command_data)
#Delete from recv buffer all data before end point
self.recv_buffer = data = data[length+4:]
#print "AFTER %r -- %r" %(self.recv_buffer, data)
if len(data) > 0: #If more data then check next data
go = True
#print self.recv_buffer, i_start, i_end
else:
logging.warning("GlassController: ERROR: Command %s Not Valid " ,command_id)
self.recv_buffer = ''
def sendrecv(self):
#Send and recieve data
time.sleep(1/60.)
temp_time = time.time()
#Send data if available
if len(self.send_buffer)>=1:
self.request.send(self.send_buffer)
logging.debug("GlassController: Sending %r", self.send_buffer)
self.send_buffer = '' #Clear buffer after it has been sent.
self.time_lastTX = temp_time
elif temp_time - self.time_lastTX > 3:
self.add_to_send([['PI','','Ping from Server']])
#Try to recieve data
try:
data = self.request.recv(1024)
#print "DATA %r" %data
self.RX_bytes += len(data)
self.recv_buffer += data
except socket.error, e:
if e[0]==11 or e[0]==10035: #Standard error if no data to receieve
pass
else: #Something is wrong True Error occured
logging.warning("GlassController: Socket Error %r", e)
self.go = False #Quit connection
def add_to_send(self, response):
#Takes data to send in list form
for id, data, desc in response: #Cycle through all data that needs to be sent.
length = len(data)
send_s = id + struct.pack("H",length) + data
self.send_buffer += send_s
self.TX_bytes += len(send_s)
self.log_comm('RX', id, data, desc)
#print id, length, "%r" %data
def handle(self):
self.connected = False
self.initial_sent = False
self.recv_buffer = ''
self.send_buffer = ''
self.go = True
self.request.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
while self.go:
self.add_to_send(self.connection.getsend()) #Sees if any response is needed
self.sendrecv()
if self.go:
self.parse_data()
def finish(self):
logging.info("GlassController: Disconecteed %r", self.client_address)
#self.request.send('bye ' + str(self.client_address) + '\n')
del(self)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
#Threads will close if main program stops.
daemon_threads = True
#Allows server to reuse addresses, if sockets don't shutdown correctly.
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, variables):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
#self.aircraft = aircraft_data #This is so the handler can access the aircraft data.
self.variables = variables
#self.events = events
class Glass_Server_c():
def __init__(self, variables, port):
self.port = port
self.server = ThreadedTCPServer(('', self.port), ThreadedTCPRequestHandler, variables)
#print self.server.server_address
self.go = True
server_thread = threading.Thread(target=self.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.setDaemon(True)
server_thread.start()
logging.info("Glass Server running in thread: %r", server_thread.getName())
logging.info("Glass Server Address: %r", self.server.server_address)
def serve_forever(self):
self.server.timeout = 3.0
while self.go:
self.server.handle_request()
def shutdown(self):
#self.server.shutdown()
self.go = False
class mod_data_c(object):
def __init__(self, mod_list, variables):
#Go through mod_list and make list of data objects.
#Used to run test, comp, or comp_second on all modules.
self.mod_data = []
for mod in mod_list:
self.mod_data.append(mod.data(variables))
def test(self):
#print "MOD DATA TEST"
for mod_data in self.mod_data:
mod_data.test()
def comp(self):
#print "MOD DATA COMP"
for mod_data in self.mod_data:
mod_data.comp()
class Glass_Connections_c(object):
def __init__(self):
self.list = {}
self.count = 0
def add(self, connection):
self.list[self.count] = connection
self.count += 1
def AJAX_list(self):
out = []
for key in self.list:
conn = self.list[key]
out.append([conn.go, key, conn.connection.name, conn.client_address[0], conn.client_address[1], conn.TX_bytes, conn.RX_bytes])
return out
def AJAX_log(self, index):
if index in self.list.keys():
return self.list[index].log
else:
return [["Key not found", "YO"]]
class Glass_Controller_c(object):
def setup_Modules(self):
import variables.variable as variable
import modules
# try:
# import modules
# except ImportError:
# print "IMPORT ERROR - GlassServer.py Modules"
# sys.path.append(os.path.split(sys.path[0])[0])
# import config
#self.variables = variable.variable_c()
self.variables = variable.variables
self.variables.parse_variable_files(modules.variable_files)
self.mod_data = mod_data_c(modules.mod_list, self.variables)
#create var file
self.variables.create_var_file('variables/var.txt')
#self.tests = test.Test_c(variable.variables)
import gstest
self.gstest = gstest
def setup_FSComm(self, variables):
import FlightSim.comm
#Load module data
FlightSim.comm.FS_Comm.load_mod_data(self.mod_data)
self.FS_Comm = FlightSim.comm.FS_Comm
self.FS_Comm.setup_sim(config.connection.active)
def __init__(self):
logging.info("Glass Controller Initializing")
self.setup_Modules()
self.setup_FSComm(self.variables)
self.init_server()
def init_server(self):
self.Glass_Server = Glass_Server_c(self.variables, config.general.glassserver_port)
logging.info("GlassServer is Running")
def comp(self):
#print "COMP"
self.mod_data.comp()
self.gstest.run_test()
def quit(self):
logging.info("Glass Controller Quiting")
self.FS_Comm.quit()
self.Glass_Server.shutdown()
controller = Glass_Controller_c()
connections = Glass_Connections_c()
|
import pytest
import logging
import sys, os
import eons
import esam
sys.path.append(os.path.join((os.path.dirname(os.path.abspath(__file__))), "data"))
from SimpleDatum import SimpleDatum
def test_datum_creation_via_self_registering():
logging.info("Creating SimpleDatum via self Registration")
# datum = SelfRegistering("SimpleDatum", name="R4ND0M N4M3") #TODO: How do?
datum = eons.SelfRegistering("SimpleDatum")
logging.info(f"datum = {datum.__dict__}")
# logging.info("Done")
assert(datum is not None)
def test_datum_creation_via_direct_init():
logging.info("Creating SimpleDatum via direct initialization")
datum = SimpleDatum("SimpleDatum")
logging.info(f"datum = {datum.__dict__}")
# logging.info("Done")
assert(datum is not None)
|
def men_from_boys(arr):
even, odd = [], []
[even.append(x) if x%2==0 else odd.append(x) for x in set(arr)]
return sorted(even) + sorted(odd,reverse=True)
'''
Scenario
Now that the competition gets tough it will Sort out the men from the boys .
Men are the Even numbers and Boys are the odd
Task
Given an array/list [] of n integers, Separate The even numbers from the odds,
or Separate the men from the boys
Notes
Return an array/list where Even numbers come before the odds
Since Men are stronger than the Boys, sort the Even numbers in
ascending order While the odds in descending order.
Array/list numbers could be a mixture of positives , negatives .
'''
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Iterable
from pants.backend.swift.goals import tailor
from pants.backend.swift.target_types import SwiftSourcesGeneratorTarget, SwiftSourceTarget
from pants.engine.rules import Rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
def rules() -> Iterable[Rule | UnionRule]:
return tailor.rules()
def target_types() -> Iterable[type[Target]]:
return (
SwiftSourceTarget,
SwiftSourcesGeneratorTarget,
)
|
from cctpy.baseutils import Vectors, Equal, Debug
import unittest
import numpy as np
class BaseUtilsTest(unittest.TestCase):
def test_equal_float(self):
self.assertTrue(Equal.equal_float(1., 1., 1e-5))
self.assertTrue(Equal.equal_float(np.sqrt(2), np.sqrt(2), 1e-5))
self.assertTrue(Equal.equal_float(np.sqrt(2), np.sqrt(2) + 1e-6, 1e-5))
def test_length(self):
self.assertEqual(Vectors.length(np.array([1., 1.])), np.sqrt(2))
self.assertEqual(Vectors.length(np.array([1., 1., 1.])), np.sqrt(3))
def test_update_length(self):
arr1 = np.array([1.])
Vectors.update_length(arr1, 2)
arr2 = np.array([2.])
self.assertTrue(Equal.equal_vector(arr1, arr2, 1e-10))
arr1 = np.array([1., 1.])
Vectors.update_length(arr1, 2)
arr2 = np.array([np.sqrt(2), np.sqrt(2)])
self.assertTrue(Equal.equal_vector(arr1, arr2, 1e-10))
def test_normalize_locally(self):
arr = np.array([1., 1.])
Vectors.normalize_self(arr)
self.assertTrue(Equal.equal_float(Vectors.length(arr), 1.))
arr = np.array([23., 12.])
Vectors.normalize_self(arr)
self.assertTrue(Equal.equal_float(Vectors.length(arr), 1.))
arr = np.array([0, -1.])
Vectors.normalize_self(arr)
self.assertTrue(Equal.equal_float(Vectors.length(arr), 1.))
def test_rotate_self_z_axis(self):
v2 = np.array([2.0, 3.0])
r1 = Vectors.rotate_self_z_axis(v2.copy(), 0.1)
r2 = Vectors.rotate_self_z_axis(v2.copy(), 0.2)
r3 = Vectors.rotate_self_z_axis(v2.copy(), -0.1)
r4 = Vectors.rotate_self_z_axis(v2.copy(), 1.0)
# print(r1, r2, r3,r4)
self.assertTrue(Equal.equal_vector(r1, np.array([1.6905080806155672, 3.184679329127734])))
self.assertTrue(Equal.equal_vector(r2, np.array([1.3641251632972997, 3.3375383951138478])))
self.assertTrue(Equal.equal_vector(r3, np.array([2.2895085804965363, 2.785345662540421])))
self.assertTrue(Equal.equal_vector(r4, np.array([-1.4438083426874098, 3.3038488872202123])))
def test_debug(self):
Debug.print_traceback()
if __name__ == '__main__':
unittest.main(verbosity=1)
|
#-*- coding: utf-8 -*-
from googlefinance.client import get_price_data
import pandas as pd
from datetime import date
stock_list = pd.read_csv('data/krx-list.csv')
stock_code = stock_list.code
# parameter setting
param = {
'q': "005930", # Stock code (ex: "005930": Samsung Electronics)
'i': "86400", # Interval size in seconds ("86400" = 1 day intervals)
'x': "KRX", # Stock exchange symbol on which stock is traded (ex: "NASD")
'p': "365d" # Period (Ex: "1Y" = 1 year)
}
# check file existence
# assign file path
PATH_FILE = 'data/'+param['q']+'.csv'
# figure out number of days to update
with open(PATH_FILE, 'r') as f:
lines = f.readlines()
last_row = lines[-1].split()[0] # the last day of current data set
last_date = datetime.datetime.strptime(last_row, '%Y-%m-%d').date()
today = datetime.date.today() # today
diff = today - last_date # number of days to update
# loop
for x in stock_code:
param['q'] = x # assign stock code
df = get_price_data(param) # get data via googlefinance
df.to_csv('data/'+param['q']+'.csv', sep=',') # save as csv
|
#!/usr/bin/env python
# ----------------------------------------------------------
# aircraft_data MODULE for GlassCockpit procject RJGlass
# ----------------------------------------------------------
# This module handels and stores all aircraft data, and communicated via Simconnect to FSX
#
# Copyright 2007 Michael LaBrie
#
# This file is part of RJGlass.
#
# RJGlass is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# RJGlass is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# test mod for CVS
# ---------------------------------------------------------------
import time
import variables.valid_check as valid_check
class xpdr_check(object):
#XPDR specific valid check. Digit from 0000 to 7777
def test(self, value):
valid = True
temp_v = int(value) #Make sure int postitive
if ((7778 < temp_v) or (temp_v < 0)):
valid = False
digits = [temp_v / pow(10,i) %10 for i in range(4)]
for i in digits:
#If any digit >7 then not valid Transponder Code
if (i>7):
valid = False
if valid:
return value
else:
return None
class radio_c(object):
def __init__(self,variable):
self.NAV1_ACTIVE = variable.byName('NAV1_ACTIVE') #self.IAS.value is value read from FSX
#variable.add_check(valid_check.within(118.000,135.000), ['NAV1_ACTIVE'])
#Set up Valid Checks
navs = ['NAV1_ACTIVE','NAV1_STANDBY','NAV2_ACTIVE','NAV2_STANDBY']
variable.add_test(valid_check.within(108.000,117.990), navs)
variable.add_test(valid_check.roundto(0.05), navs)
coms = ['COM1_ACTIVE','COM1_STANDBY','COM2_ACTIVE','COM2_STANDBY']
variable.add_test(valid_check.within(118.000,136.990), coms)
variable.add_test(valid_check.roundto(0.025), coms)
adfs = ['ADF1_ACTIVE','ADF1_STANDBY','ADF2_ACTIVE','ADF2_STANDBY']
variable.add_test(valid_check.within(100.0,1799.99), adfs)
variable.add_test(valid_check.roundto(0.1), adfs)
variable.add_test(xpdr_check(),['XPDR'])
obs = ['NAV1_OBS','NAV2_OBS']
variable.add_test(valid_check.within(0,360), obs)
#self.NAV1_ACTIVE.add_test(valid_check.within(118.000,135.000))
#self.NAV1_ACTIVE.add_test(valid_check.roundto(0.05))
def test(self):
pass
def comp(self):
#Computations per frame
pass
class data(object):
def __init__(self, variable):
self.variable = variable
self.radio = radio_c(variable)
def comp(self):
#Client is true, if RJGlass is in client or test mode.
#global_time = globaltime.value
#Computer delta_t = Time between last comp and this one
self.radio.comp()
def comp_second(self):
pass
def test(self):
#time.sleep(0.01)
#self.airspeed.IAS.value += 1
pass
|
################################################################################
# #
# PLOT ONE PRIMITIVE #
# #
################################################################################
import hdf5_to_dict as io
import plot as bplt
from analysis_fns import *
import units
import matplotlib
import matplotlib.pyplot as plt
import sys
import numpy as np
from scipy.signal import convolve2d
# TODO parse lots of options I set here
USEARRSPACE=False
UNITS=False
SIZE = 100
window=[-SIZE,SIZE,-SIZE,SIZE]
#window=[-SIZE/4,SIZE/4,0,SIZE]
FIGX = 10
FIGY = 10
dumpfile = sys.argv[1]
if len(sys.argv) > 3:
gridfile = sys.argv[2]
var = sys.argv[3]
elif len(sys.argv) > 2:
gridfile = None
var = sys.argv[2]
# Optionally take extra name
name = sys.argv[-1]
if UNITS and var not in ['Tp']:
M_unit = float(sys.argv[-1])
if gridfile is not None:
hdr = io.load_hdr(dumpfile)
geom = io.load_geom(hdr, gridfile)
dump = io.load_dump(dumpfile, hdr, geom)
else:
# Assumes gridfile in same directory
hdr,geom,dump = io.load_all(dumpfile)
# If we're plotting a derived variable, calculate + add it
if var in ['jcov', 'jsq']:
dump['jcov'] = np.einsum("...i,...ij->...j", dump['jcon'], geom['gcov'][:,:,None,:,n])
dump['jsq'] = np.sum(dump['jcon']*dump['jcov'], axis=-1)
elif var in ['divE2D']:
JE1g, JE2g = T_mixed(dump, 1,0).mean(axis=-1)*geom['gdet'], T_mixed(dump, 2,0).mean(axis=-1)*geom['gdet']
face_JE1 = 0.5*(JE1g[:-1,:] + JE1g[1:,:])
face_JE2 = 0.5*(JE2g[:,:-1] + JE2g[:,1:])
divJE = (face_JE1[1:,1:-1] - face_JE1[:-1,1:-1]) / geom['dx1'] + (face_JE2[1:-1,1:] - face_JE2[1:-1,:-1]) / geom['dx2']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][1:-1,1:-1,0] = divJE
dump[var] /= np.sqrt(T_mixed(dump, 1,0)**2 + T_mixed(dump, 2,0)**2 + T_mixed(dump, 3,0)**2)*geom['gdet'][:,:,None]
elif var in ['divB2D']:
B1g, B2g = dump['B1'].mean(axis=-1)*geom['gdet'], dump['B2'].mean(axis=-1)*geom['gdet']
corner_B1 = 0.5*(B1g[:,1:] + B1g[:,:-1])
corner_B2 = 0.5*(B2g[1:,:] + B2g[:-1,:])
divB = (corner_B1[1:,:] - corner_B1[:-1,:]) / geom['dx1'] + (corner_B2[:,1:] - corner_B2[:,:-1]) / geom['dx2']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][:-1,:-1,0] = divB
dump[var] /= np.sqrt(dump['B1']**2 + dump['B2']**2 + dump['B3']**2)*geom['gdet'][:,:,None]
elif var in ['divB3D']:
B1g, B2g, B3g = dump['B1']*geom['gdet'][:,:,None], dump['B2']*geom['gdet'][:,:,None], dump['B3']*geom['gdet'][:,:,None]
corner_B1 = 0.25*(B1g[:,1:,1:] + B1g[:,1:,:-1] + B1g[:,:-1,1:] + B1g[:,:-1,:-1])
corner_B2 = 0.25*(B2g[1:,:,1:] + B2g[1:,:,:-1] + B2g[:-1,:,1:] + B2g[:-1,:,:-1])
corner_B3 = 0.25*(B3g[1:,1:,:] + B3g[1:,:-1,:] + B3g[:-1,1:,:] + B3g[:-1,:-1,:])
divB = (corner_B1[1:,:,:] - corner_B1[:-1,:,:]) / geom['dx1'] + (corner_B2[:,1:,:] - corner_B2[:,:-1,:]) / geom['dx2'] + (corner_B3[:,:,1:] - corner_B3[:,:,:-1]) / geom['dx3']
dump[var] = np.zeros_like(dump['RHO'])
dump[var][:-1,:-1,:-1] = divB
dump[var] /= np.sqrt(dump['B1']**2 + dump['B2']**2 + dump['B3']**2)*geom['gdet'][:,:,None]
elif var[-4:] == "_pdf":
var_og = var[:-4]
dump[var_og] = d_fns[var_og](dump)
dump[var], dump[var+'_bins'] = np.histogram(np.log10(dump[var_og]), bins=200, range=(-3.5,3.5), weights=np.repeat(geom['gdet'], geom['n3']).reshape(dump[var_og].shape), density=True)
elif var not in dump:
dump[var] = d_fns[var](dump)
# Add units after all calculations, manually
if UNITS:
if var in ['Tp']:
cgs = units.get_cgs()
dump[var] /= cgs['KBOL']
else:
unit = units.get_units_M87(M_unit, tp_over_te=3)
if var in ['bsq']:
dump[var] *= unit['B_unit']**2
elif var in ['B']:
dump[var] *= unit['B_unit']
elif var in ['Ne']:
dump[var] = dump['RHO'] * unit['Ne_unit']
elif var in ['Te']:
dump[var] = ref['ME'] * ref['CL']**2 * unit['Thetae_unit'] * dump['UU']/dump['RHO']
elif var in ['Thetae']:
# TODO non-const te
dump[var] = unit['Thetae_unit'] * dump['UU']/dump['RHO']
fig = plt.figure(figsize=(FIGX, FIGY))
# Treat PDFs separately
if var[-4:] == "_pdf":
plt.plot(dump[var+'_bins'][:-1], dump[var])
plt.title("PDF of "+var[:-4])
plt.xlabel("Log10 value")
plt.ylabel("Probability")
plt.savefig(name+".png", dpi=100)
plt.close(fig)
exit()
# Plot XY differently for vectors, scalars
if var in ['jcon','ucon','ucov','bcon','bcov']:
axes = [plt.subplot(2, 2, i) for i in range(1,5)]
for n in range(4):
bplt.plot_xy(axes[n], geom, np.log10(dump[var][:,:,:,n]), arrayspace=USEARRSPACE, window=window)
elif var not in ['divE2D', 'divB2D']:
# TODO allow specifying vmin/max, average from command line or above
ax = plt.subplot(1, 1, 1)
bplt.plot_xy(ax, geom, dump[var], arrayspace=USEARRSPACE, window=window, vmin=1e10, vmax=1e12)
plt.tight_layout()
plt.savefig(name+"_xy.png", dpi=100)
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
# Plot XZ
if var in ['jcon', 'ucon', 'ucov', 'bcon', 'bcov']:
axes = [plt.subplot(2, 2, i) for i in range(1, 5)]
for n in range(4):
bplt.plot_xz(axes[n], geom, np.log10(dump[var][:,:,:,n]), arrayspace=USEARRSPACE, window=window)
elif var in ['divB2D', 'divE2D', 'divE2D_face', 'divB3D']:
ax = plt.subplot(1, 1, 1)
bplt.plot_xz(ax, geom, np.log10(np.abs(dump[var])), arrayspace=USEARRSPACE, window=window, vmin=-6, vmax=0)
if var in ['divE2D', 'divE2D_face']:
#JE1 = -T_mixed(dump, 1,0)
#JE2 = -T_mixed(dump, 2,0)
JE1 = dump['ucon'][:,:,:,1]
JE2 = dump['ucon'][:,:,:,2]
bplt.overlay_flowlines(ax, geom, JE1, JE2, nlines=20, arrayspace=USEARRSPACE)
#bplt.overlay_quiver(ax, geom, JE1, JE2)
else:
bplt.overlay_field(ax, geom, dump, nlines=20, arrayspace=USEARRSPACE)
else:
ax = plt.subplot(1, 1, 1)
bplt.plot_xz(ax, geom, np.log10(dump[var]), vmin=-3, vmax=1, arrayspace=USEARRSPACE, window=window)
norm = np.sqrt(dump['ucon'][:,:,0,1]**2 + dump['ucon'][:,:,0,2]**2)*geom['gdet']
JF1 = dump['ucon'][:,:,:,1] #/norm
JF2 = dump['ucon'][:,:,:,2] #/norm
#bplt.overlay_quiver(ax, geom, dump, JF1, JF2, cadence=96, norm=15)
bplt.overlay_flowlines(ax, geom, JF1, JF2, nlines=100, arrayspace=USEARRSPACE, reverse=True)
plt.tight_layout()
plt.savefig(name+"_xz.png", dpi=100)
plt.close(fig)
|
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
CATEGORIES = ["Dog", "Cat"]
def prepare(filepath):
IMG_SIZE = 50
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
print('Loading trained model')
model = tf.keras.models.load_model("Cats_Dogs.model")
print('There are 12501 images in test set')
print ('Select an image between 1-12500')
i=input()
file="C:\\Users/Jidin/Downloads/PetImages/test1/"+ i +".jpg"
img_array=cv2.imread(file,cv2.IMREAD_GRAYSCALE)
cv2.imshow('image',img_array)
prediction = model.predict([prepare(file)])
print("^PREDICTION^")
print(CATEGORIES[int(prediction[0][0])])
cv2.waitKey(0)& 0xFF ==ord('q')
cv2.destroyAllWindows()
|
def countzero(string):
output = string.count('()')
for x in string:
if x in 'abdegopq069DOPQR':
output += 1
elif x in '%&B8':
output += 2
return output
'''
Gigi is a clever monkey, living in the zoo, his teacher (animal keeper)
recently taught him some knowledge of "0".
In Gigi's eyes, "0" is a character contains some circle(maybe one, maybe two).
So, a is a "0",b is a "0",6 is also a "0",and 8 have two "0" ,etc...
Now, write some code to count how many "0"s in the text.
Let us see who is smarter? You ? or monkey?
Input always be a string(including words numbers and symbols),
You don't need to verify it, but pay attention to the difference
between uppercase and lowercase letters.
Here is a table of characters:
one zero abdegopq069DOPQR () <-- A pair of braces as a zero
two zero %&B8
Output will be a number of "0".
'''
|
#!/usr/bin/python3
def delete_at(my_list=[], idx=0):
new_list = my_list.copy()
len_list = len(my_list)
if my_list:
if idx < 0 or idx >= len_list:
return my_list
else:
my_list.clear()
for i in range(len_list):
if i != idx:
my_list.append(new_list[i])
return my_list
else:
return my_list
|
import dash_bootstrap_components as dbc
from dash import html
spinners = html.Div(
[
dbc.Spinner(color="primary"),
dbc.Spinner(color="secondary"),
dbc.Spinner(color="success"),
dbc.Spinner(color="warning"),
dbc.Spinner(color="danger"),
dbc.Spinner(color="info"),
dbc.Spinner(color="light"),
dbc.Spinner(color="dark"),
]
)
|
from sklearn import datasets
iris = datasets.load_iris()
x = iris.data
y = iris.target
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
pre = clf.fit(x,y).predict(x)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(pre, y)
print(accuracy)
|
customers=['grandpa','grandma','cousin','sister']
print("I have found a bigger table to maintain more customers:")
customers.insert(0,"ant")
customers.insert(2,"prince")
customers.append("wife")
print("Hello "+customers[0]+", Welcome to my party!")
print("Hello "+customers[1]+", Welcome to my party!")
print("Hello "+customers[2]+", Welcome to my party!")
print("Hello "+customers[3]+", Welcome to my party!")
print("Hello "+customers[4]+", Welcome to my party!")
print("Hello "+customers[5]+", Welcome to my party!")
print("Hello "+customers[6]+", Welcome to my party!")
|
import torch.nn as nn
def get_criterion(version):
if version == 1:
criterion = nn.BCEWithLogitsLoss(reduction="mean")
else:
raise Exception(f"Criterion version '{version}' is unknown!")
return criterion
|
import math
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
from ..transforms._presets import ImageClassification, InterpolationMode
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
__all__ = [
"RegNet",
"RegNet_Y_400MF_Weights",
"RegNet_Y_800MF_Weights",
"RegNet_Y_1_6GF_Weights",
"RegNet_Y_3_2GF_Weights",
"RegNet_Y_8GF_Weights",
"RegNet_Y_16GF_Weights",
"RegNet_Y_32GF_Weights",
"RegNet_Y_128GF_Weights",
"RegNet_X_400MF_Weights",
"RegNet_X_800MF_Weights",
"RegNet_X_1_6GF_Weights",
"RegNet_X_3_2GF_Weights",
"RegNet_X_8GF_Weights",
"RegNet_X_16GF_Weights",
"RegNet_X_32GF_Weights",
"regnet_y_400mf",
"regnet_y_800mf",
"regnet_y_1_6gf",
"regnet_y_3_2gf",
"regnet_y_8gf",
"regnet_y_16gf",
"regnet_y_32gf",
"regnet_y_128gf",
"regnet_x_400mf",
"regnet_x_800mf",
"regnet_x_1_6gf",
"regnet_x_3_2gf",
"regnet_x_8gf",
"regnet_x_16gf",
"regnet_x_32gf",
]
class SimpleStemIN(Conv2dNormActivation):
"""Simple stem for ImageNet: 3x3, BN, ReLU."""
def __init__(
self,
width_in: int,
width_out: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
) -> None:
super().__init__(
width_in, width_out, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=activation_layer
)
class BottleneckTransform(nn.Sequential):
"""Bottleneck transformation: 1x1, 3x3 [+SE], 1x1."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int,
bottleneck_multiplier: float,
se_ratio: Optional[float],
) -> None:
layers: OrderedDict[str, nn.Module] = OrderedDict()
w_b = int(round(width_out * bottleneck_multiplier))
g = w_b // group_width
layers["a"] = Conv2dNormActivation(
width_in, w_b, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=activation_layer
)
layers["b"] = Conv2dNormActivation(
w_b, w_b, kernel_size=3, stride=stride, groups=g, norm_layer=norm_layer, activation_layer=activation_layer
)
if se_ratio:
# The SE reduction ratio is defined with respect to the
# beginning of the block
width_se_out = int(round(se_ratio * width_in))
layers["se"] = SqueezeExcitation(
input_channels=w_b,
squeeze_channels=width_se_out,
activation=activation_layer,
)
layers["c"] = Conv2dNormActivation(
w_b, width_out, kernel_size=1, stride=1, norm_layer=norm_layer, activation_layer=None
)
super().__init__(layers)
class ResBottleneckBlock(nn.Module):
"""Residual bottleneck block: x + F(x), F = bottleneck transform."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int = 1,
bottleneck_multiplier: float = 1.0,
se_ratio: Optional[float] = None,
) -> None:
super().__init__()
# Use skip connection with projection if shape changes
self.proj = None
should_proj = (width_in != width_out) or (stride != 1)
if should_proj:
self.proj = Conv2dNormActivation(
width_in, width_out, kernel_size=1, stride=stride, norm_layer=norm_layer, activation_layer=None
)
self.f = BottleneckTransform(
width_in,
width_out,
stride,
norm_layer,
activation_layer,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.activation = activation_layer(inplace=True)
def forward(self, x: Tensor) -> Tensor:
if self.proj is not None:
x = self.proj(x) + self.f(x)
else:
x = x + self.f(x)
return self.activation(x)
class AnyStage(nn.Sequential):
"""AnyNet stage (sequence of blocks w/ the same output shape)."""
def __init__(
self,
width_in: int,
width_out: int,
stride: int,
depth: int,
block_constructor: Callable[..., nn.Module],
norm_layer: Callable[..., nn.Module],
activation_layer: Callable[..., nn.Module],
group_width: int,
bottleneck_multiplier: float,
se_ratio: Optional[float] = None,
stage_index: int = 0,
) -> None:
super().__init__()
for i in range(depth):
block = block_constructor(
width_in if i == 0 else width_out,
width_out,
stride if i == 0 else 1,
norm_layer,
activation_layer,
group_width,
bottleneck_multiplier,
se_ratio,
)
self.add_module(f"block{stage_index}-{i}", block)
class BlockParams:
def __init__(
self,
depths: List[int],
widths: List[int],
group_widths: List[int],
bottleneck_multipliers: List[float],
strides: List[int],
se_ratio: Optional[float] = None,
) -> None:
self.depths = depths
self.widths = widths
self.group_widths = group_widths
self.bottleneck_multipliers = bottleneck_multipliers
self.strides = strides
self.se_ratio = se_ratio
@classmethod
def from_init_params(
cls,
depth: int,
w_0: int,
w_a: float,
w_m: float,
group_width: int,
bottleneck_multiplier: float = 1.0,
se_ratio: Optional[float] = None,
**kwargs: Any,
) -> "BlockParams":
"""
Programmatically compute all the per-block settings,
given the RegNet parameters.
The first step is to compute the quantized linear block parameters,
in log space. Key parameters are:
- `w_a` is the width progression slope
- `w_0` is the initial width
- `w_m` is the width stepping in the log space
In other terms
`log(block_width) = log(w_0) + w_m * block_capacity`,
with `bock_capacity` ramping up following the w_0 and w_a params.
This block width is finally quantized to multiples of 8.
The second step is to compute the parameters per stage,
taking into account the skip connection and the final 1x1 convolutions.
We use the fact that the output width is constant within a stage.
"""
QUANT = 8
STRIDE = 2
if w_a < 0 or w_0 <= 0 or w_m <= 1 or w_0 % 8 != 0:
raise ValueError("Invalid RegNet settings")
# Compute the block widths. Each stage has one unique block width
widths_cont = torch.arange(depth) * w_a + w_0
block_capacity = torch.round(torch.log(widths_cont / w_0) / math.log(w_m))
block_widths = (torch.round(torch.divide(w_0 * torch.pow(w_m, block_capacity), QUANT)) * QUANT).int().tolist()
num_stages = len(set(block_widths))
# Convert to per stage parameters
split_helper = zip(
block_widths + [0],
[0] + block_widths,
block_widths + [0],
[0] + block_widths,
)
splits = [w != wp or r != rp for w, wp, r, rp in split_helper]
stage_widths = [w for w, t in zip(block_widths, splits[:-1]) if t]
stage_depths = torch.diff(torch.tensor([d for d, t in enumerate(splits) if t])).int().tolist()
strides = [STRIDE] * num_stages
bottleneck_multipliers = [bottleneck_multiplier] * num_stages
group_widths = [group_width] * num_stages
# Adjust the compatibility of stage widths and group widths
stage_widths, group_widths = cls._adjust_widths_groups_compatibilty(
stage_widths, bottleneck_multipliers, group_widths
)
return cls(
depths=stage_depths,
widths=stage_widths,
group_widths=group_widths,
bottleneck_multipliers=bottleneck_multipliers,
strides=strides,
se_ratio=se_ratio,
)
def _get_expanded_params(self):
return zip(self.widths, self.strides, self.depths, self.group_widths, self.bottleneck_multipliers)
@staticmethod
def _adjust_widths_groups_compatibilty(
stage_widths: List[int], bottleneck_ratios: List[float], group_widths: List[int]
) -> Tuple[List[int], List[int]]:
"""
Adjusts the compatibility of widths and groups,
depending on the bottleneck ratio.
"""
# Compute all widths for the current settings
widths = [int(w * b) for w, b in zip(stage_widths, bottleneck_ratios)]
group_widths_min = [min(g, w_bot) for g, w_bot in zip(group_widths, widths)]
# Compute the adjusted widths so that stage and group widths fit
ws_bot = [_make_divisible(w_bot, g) for w_bot, g in zip(widths, group_widths_min)]
stage_widths = [int(w_bot / b) for w_bot, b in zip(ws_bot, bottleneck_ratios)]
return stage_widths, group_widths_min
class RegNet(nn.Module):
def __init__(
self,
block_params: BlockParams,
num_classes: int = 1000,
stem_width: int = 32,
stem_type: Optional[Callable[..., nn.Module]] = None,
block_type: Optional[Callable[..., nn.Module]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
_log_api_usage_once(self)
if stem_type is None:
stem_type = SimpleStemIN
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if block_type is None:
block_type = ResBottleneckBlock
if activation is None:
activation = nn.ReLU
# Ad hoc stem
self.stem = stem_type(
3, # width_in
stem_width,
norm_layer,
activation,
)
current_width = stem_width
blocks = []
for i, (
width_out,
stride,
depth,
group_width,
bottleneck_multiplier,
) in enumerate(block_params._get_expanded_params()):
blocks.append(
(
f"block{i+1}",
AnyStage(
current_width,
width_out,
stride,
depth,
block_type,
norm_layer,
activation,
group_width,
bottleneck_multiplier,
block_params.se_ratio,
stage_index=i + 1,
),
)
)
current_width = width_out
self.trunk_output = nn.Sequential(OrderedDict(blocks))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_features=current_width, out_features=num_classes)
# Performs ResNet-style weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
nn.init.normal_(m.weight, mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
def forward(self, x: Tensor) -> Tensor:
x = self.stem(x)
x = self.trunk_output(x)
x = self.avgpool(x)
x = x.flatten(start_dim=1)
x = self.fc(x)
return x
def _regnet(
block_params: BlockParams,
weights: Optional[WeightsEnum],
progress: bool,
**kwargs: Any,
) -> RegNet:
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
norm_layer = kwargs.pop("norm_layer", partial(nn.BatchNorm2d, eps=1e-05, momentum=0.1))
model = RegNet(block_params, norm_layer=norm_layer, **kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META: Dict[str, Any] = {
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
}
_COMMON_SWAG_META = {
**_COMMON_META,
"recipe": "https://github.com/facebookresearch/SWAG",
"license": "https://github.com/facebookresearch/SWAG/blob/main/LICENSE",
}
class RegNet_Y_400MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_400mf-c65dace8.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.046,
"acc@5": 91.716,
}
},
"_ops": 0.402,
"_file_size": 16.806,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 4344144,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.804,
"acc@5": 92.742,
}
},
"_ops": 0.402,
"_file_size": 16.806,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_800MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_800mf-1b27b58c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 76.420,
"acc@5": 93.136,
}
},
"_ops": 0.834,
"_file_size": 24.774,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 6432512,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.828,
"acc@5": 94.502,
}
},
"_ops": 0.834,
"_file_size": 24.774,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_1_6GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_1_6gf-b11a554e.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.950,
"acc@5": 93.966,
}
},
"_ops": 1.612,
"_file_size": 43.152,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 11202430,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.876,
"acc@5": 95.444,
}
},
"_ops": 1.612,
"_file_size": 43.152,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_3_2GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_3_2gf-b5a9779c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.948,
"acc@5": 94.576,
}
},
"_ops": 3.176,
"_file_size": 74.567,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 19436338,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.982,
"acc@5": 95.972,
}
},
"_ops": 3.176,
"_file_size": 74.567,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_8GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_8gf-d0d0e4a8.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.032,
"acc@5": 95.048,
}
},
"_ops": 8.473,
"_file_size": 150.701,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 39381472,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.828,
"acc@5": 96.330,
}
},
"_ops": 8.473,
"_file_size": 150.701,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_16GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf-9e6ed7dd.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.424,
"acc@5": 95.240,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 83590140,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.886,
"acc@5": 96.328,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 83590140,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.012,
"acc@5": 98.054,
}
},
"_ops": 46.735,
"_file_size": 319.49,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 83590140,
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.976,
"acc@5": 97.244,
}
},
"_ops": 15.912,
"_file_size": 319.49,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_32GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf-4dee3f7a.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.878,
"acc@5": 95.340,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 145046770,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.368,
"acc@5": 96.498,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 145046770,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.838,
"acc@5": 98.362,
}
},
"_ops": 94.826,
"_file_size": 554.076,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 145046770,
"_metrics": {
"ImageNet-1K": {
"acc@1": 84.622,
"acc@5": 97.480,
}
},
"_ops": 32.28,
"_file_size": 554.076,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_Y_128GF_Weights(WeightsEnum):
IMAGENET1K_SWAG_E2E_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth",
transforms=partial(
ImageClassification, crop_size=384, resize_size=384, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"num_params": 644812894,
"_metrics": {
"ImageNet-1K": {
"acc@1": 88.228,
"acc@5": 98.682,
}
},
"_ops": 374.57,
"_file_size": 2461.564,
"_docs": """
These weights are learnt via transfer learning by end-to-end fine-tuning the original
`SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
""",
},
)
IMAGENET1K_SWAG_LINEAR_V1 = Weights(
url="https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth",
transforms=partial(
ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
),
meta={
**_COMMON_SWAG_META,
"recipe": "https://github.com/pytorch/vision/pull/5793",
"num_params": 644812894,
"_metrics": {
"ImageNet-1K": {
"acc@1": 86.068,
"acc@5": 97.844,
}
},
"_ops": 127.518,
"_file_size": 2461.564,
"_docs": """
These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
""",
},
)
DEFAULT = IMAGENET1K_SWAG_E2E_V1
class RegNet_X_400MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_400mf-adf1edd5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 72.834,
"acc@5": 90.950,
}
},
"_ops": 0.414,
"_file_size": 21.258,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 5495976,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 74.864,
"acc@5": 92.322,
}
},
"_ops": 0.414,
"_file_size": 21.257,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_800MF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_800mf-ad17e45c.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 75.212,
"acc@5": 92.348,
}
},
"_ops": 0.8,
"_file_size": 27.945,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 7259656,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.522,
"acc@5": 93.826,
}
},
"_ops": 0.8,
"_file_size": 27.945,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_1_6GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_1_6gf-e3633e7f.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#small-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 77.040,
"acc@5": 93.440,
}
},
"_ops": 1.603,
"_file_size": 35.339,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 9190136,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.668,
"acc@5": 94.922,
}
},
"_ops": 1.603,
"_file_size": 35.339,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_3_2GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_3_2gf-f342aeae.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 78.364,
"acc@5": 93.992,
}
},
"_ops": 3.177,
"_file_size": 58.756,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 15296552,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.196,
"acc@5": 95.430,
}
},
"_ops": 3.177,
"_file_size": 58.756,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_8GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_8gf-03ceed89.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 79.344,
"acc@5": 94.686,
}
},
"_ops": 7.995,
"_file_size": 151.456,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 39572648,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 81.682,
"acc@5": 95.678,
}
},
"_ops": 7.995,
"_file_size": 151.456,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_16GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_16gf-2007eb11.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#medium-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.058,
"acc@5": 94.944,
}
},
"_ops": 15.941,
"_file_size": 207.627,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 54278536,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 82.716,
"acc@5": 96.196,
}
},
"_ops": 15.941,
"_file_size": 207.627,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
class RegNet_X_32GF_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/regnet_x_32gf-9d47f8d0.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#large-models",
"_metrics": {
"ImageNet-1K": {
"acc@1": 80.622,
"acc@5": 95.248,
}
},
"_ops": 31.736,
"_file_size": 412.039,
"_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
},
)
IMAGENET1K_V2 = Weights(
url="https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth",
transforms=partial(ImageClassification, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"num_params": 107811560,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"_metrics": {
"ImageNet-1K": {
"acc@1": 83.014,
"acc@5": 96.288,
}
},
"_ops": 31.736,
"_file_size": 412.039,
"_docs": """
These weights improve upon the results of the original paper by using a modified version of TorchVision's
`new training recipe
<https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
""",
},
)
DEFAULT = IMAGENET1K_V2
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_400MF_Weights.IMAGENET1K_V1))
def regnet_y_400mf(*, weights: Optional[RegNet_Y_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_400MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_400MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_400MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_400MF_Weights
:members:
"""
weights = RegNet_Y_400MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=16, w_0=48, w_a=27.89, w_m=2.09, group_width=8, se_ratio=0.25, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_800MF_Weights.IMAGENET1K_V1))
def regnet_y_800mf(*, weights: Optional[RegNet_Y_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_800MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_800MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_800MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_800MF_Weights
:members:
"""
weights = RegNet_Y_800MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=14, w_0=56, w_a=38.84, w_m=2.4, group_width=16, se_ratio=0.25, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_1_6GF_Weights.IMAGENET1K_V1))
def regnet_y_1_6gf(*, weights: Optional[RegNet_Y_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_1.6GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_1_6GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_1_6GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_1_6GF_Weights
:members:
"""
weights = RegNet_Y_1_6GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=27, w_0=48, w_a=20.71, w_m=2.65, group_width=24, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_3_2GF_Weights.IMAGENET1K_V1))
def regnet_y_3_2gf(*, weights: Optional[RegNet_Y_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_3.2GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_3_2GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_3_2GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_3_2GF_Weights
:members:
"""
weights = RegNet_Y_3_2GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=21, w_0=80, w_a=42.63, w_m=2.66, group_width=24, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_8GF_Weights.IMAGENET1K_V1))
def regnet_y_8gf(*, weights: Optional[RegNet_Y_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_8GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_8GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_8GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_8GF_Weights
:members:
"""
weights = RegNet_Y_8GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=17, w_0=192, w_a=76.82, w_m=2.19, group_width=56, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_16GF_Weights.IMAGENET1K_V1))
def regnet_y_16gf(*, weights: Optional[RegNet_Y_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_16GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_16GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_16GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_16GF_Weights
:members:
"""
weights = RegNet_Y_16GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=18, w_0=200, w_a=106.23, w_m=2.48, group_width=112, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_Y_32GF_Weights.IMAGENET1K_V1))
def regnet_y_32gf(*, weights: Optional[RegNet_Y_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_32GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_32GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_32GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_32GF_Weights
:members:
"""
weights = RegNet_Y_32GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=20, w_0=232, w_a=115.89, w_m=2.53, group_width=232, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def regnet_y_128gf(*, weights: Optional[RegNet_Y_128GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetY_128GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_Y_128GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_Y_128GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_Y_128GF_Weights
:members:
"""
weights = RegNet_Y_128GF_Weights.verify(weights)
params = BlockParams.from_init_params(
depth=27, w_0=456, w_a=160.83, w_m=2.52, group_width=264, se_ratio=0.25, **kwargs
)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_400MF_Weights.IMAGENET1K_V1))
def regnet_x_400mf(*, weights: Optional[RegNet_X_400MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_400MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_400MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_400MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_400MF_Weights
:members:
"""
weights = RegNet_X_400MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=22, w_0=24, w_a=24.48, w_m=2.54, group_width=16, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_800MF_Weights.IMAGENET1K_V1))
def regnet_x_800mf(*, weights: Optional[RegNet_X_800MF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_800MF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_800MF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_800MF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_800MF_Weights
:members:
"""
weights = RegNet_X_800MF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=16, w_0=56, w_a=35.73, w_m=2.28, group_width=16, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_1_6GF_Weights.IMAGENET1K_V1))
def regnet_x_1_6gf(*, weights: Optional[RegNet_X_1_6GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_1.6GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_1_6GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_1_6GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_1_6GF_Weights
:members:
"""
weights = RegNet_X_1_6GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=18, w_0=80, w_a=34.01, w_m=2.25, group_width=24, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_3_2GF_Weights.IMAGENET1K_V1))
def regnet_x_3_2gf(*, weights: Optional[RegNet_X_3_2GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_3.2GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_3_2GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_3_2GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_3_2GF_Weights
:members:
"""
weights = RegNet_X_3_2GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=25, w_0=88, w_a=26.31, w_m=2.25, group_width=48, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_8GF_Weights.IMAGENET1K_V1))
def regnet_x_8gf(*, weights: Optional[RegNet_X_8GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_8GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_8GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_8GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_8GF_Weights
:members:
"""
weights = RegNet_X_8GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=23, w_0=80, w_a=49.56, w_m=2.88, group_width=120, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_16GF_Weights.IMAGENET1K_V1))
def regnet_x_16gf(*, weights: Optional[RegNet_X_16GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_16GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_16GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_16GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_16GF_Weights
:members:
"""
weights = RegNet_X_16GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=22, w_0=216, w_a=55.59, w_m=2.1, group_width=128, **kwargs)
return _regnet(params, weights, progress, **kwargs)
@register_model()
@handle_legacy_interface(weights=("pretrained", RegNet_X_32GF_Weights.IMAGENET1K_V1))
def regnet_x_32gf(*, weights: Optional[RegNet_X_32GF_Weights] = None, progress: bool = True, **kwargs: Any) -> RegNet:
"""
Constructs a RegNetX_32GF architecture from
`Designing Network Design Spaces <https://arxiv.org/abs/2003.13678>`_.
Args:
weights (:class:`~torchvision.models.RegNet_X_32GF_Weights`, optional): The pretrained weights to use.
See :class:`~torchvision.models.RegNet_X_32GF_Weights` below for more details and possible values.
By default, no pretrained weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to either ``torchvision.models.regnet.RegNet`` or
``torchvision.models.regnet.BlockParams`` class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/regnet.py>`_
for more detail about the classes.
.. autoclass:: torchvision.models.RegNet_X_32GF_Weights
:members:
"""
weights = RegNet_X_32GF_Weights.verify(weights)
params = BlockParams.from_init_params(depth=23, w_0=320, w_a=69.86, w_m=2.0, group_width=168, **kwargs)
return _regnet(params, weights, progress, **kwargs)
|
from rtmidi.midiutil import open_midiport
import schedule
import time, datetime, copy
from Midi import MidiEvent
class Sender:
"""
MIDIイベントの送信の機能をまとめたクラス
"""
def __init__(self):
"""
コンストラクタ
"""
self.midi_out, self.port = open_midiport(None, "output", client_name = 'sender')
self.events = [] # 送信したMIDIイベントの情報
self.index = 0 # 次に送るMIDIイベントのインデックス
def send_message(self, message):
"""
MIDIメッセージを送信する
Parameters
----------
message : list
送りたいMIDIメッセージ
"""
self.midi_out.send_message(message)
self.events.append(MidiEvent(message, datetime.datetime.now())) # append() : O(1)
def send_events(self, events, interval):
"""
MIDIイベント群を送信する
Parameters
----------
events : MidiEvents
送りたいMIDIイベント群
interval : float
ループ間隔の値[sec]
"""
# 各MIDIメッセージの送信時刻(絶対時間)を計算する
events.to_abs_time()
# interval[sec]間隔でMIDIメッセージ送信を実行するようスケジューラを設定
schedule.every(interval).seconds.do(self.send_events_in_time, events, time.time())
# MIDIイベントをすべて送信するまでループ実行
while self.index != len(events):
schedule.run_pending()
self.close_midi_out()
def send_events_in_time(self, events, start_time):
"""
送信時刻が経過時間以下のMIDIイベントを送信する
Parameters
----------
events : MidiEvents
送りたいMIDIイベント群
start_time : float
MIDIシーケンスの送信開始時刻
"""
# 経過時間を計算
elapsed_time = time.time() - start_time
# 送信時刻が経過時間以下のMIDIイベントを取得
events_in_time = []
for index in range(self.index, len(events)):
if events[index].time <= elapsed_time:
events_in_time.append(events[index]) # append() : O(1)
else:
break
# 取得したMIDIイベントを送信
for event in events_in_time:
self.send_message(event.message)
# 次に送るMIDIイベントのインデックスを更新
self.index += len(events_in_time)
def show_send_events_and_times(self):
"""
送信したMIDIイベントと送信時刻を表示する
"""
for event in self.events:
print('MIDI OUT : {} @ {}'.format(event.message, event.time))
def close_midi_out(self):
"""
MIDI OUTポートを閉じる
"""
self.midi_out.close_port()
|
import numpy as np
import pandas as pd
from KNearestRegressor import KNearestRegressors
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
#taking inputs
data=pd.read_csv('Social_Network_Ads.csv')
X=data.iloc[:,2:4].values
y=data.iloc[:,-1].values
#using train_test_split function
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
#using StandardScaler function
from sklearn.preprocessing import StandardScaler
scaler=StandardScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
#an object of knn
knn=KNearestRegressors(k=5)
knn.fit(X_train,y_train)
#using the predict() function
knn.predict(np.array(X_test).reshape(len(X_test), len(X_test[0])))
print(round((r2_score(y_test, y_pred) * 100)))
print(round((accuracy_score(y_test,y_pred)*100)))
|
'''
Created on 2015. 6. 20.
@author: 윤선
'''
import wx
class MyFrame(wx.Frame):
def __init__(self, parent, title):
super(MyFrame, self).__init__(parent,\
title=title, size=(700,500))
# UI 초기화하는 메서드 호출
self.InitUi()
self.Center()
self.Show()
# UI 초기화 하는 메서드 정의
def InitUi(self):
# Panel 객체
panel = wx.Panel(self)
# 일반 버튼
btn1 = wx.Button(panel, label="눌러보셈", pos=(5,5))
# 토글 버튼
btn2 = wx.ToggleButton(panel, label="토글버튼", pos=(120, 5))
btn3 = wx.Button(panel, label="종료", pos=(235, 5))
# 각각의 버튼 객체에 id를 임의로 부여한다.
btn1.id = 1
btn2.id = 2
btn3.id = 3
# btn1에 이벤트 등록하기
btn1.Bind(wx.EVT_BUTTON, self.OnClicked)
# btn2에 이벤트 등록하기
btn2.Bind(wx.EVT_TOGGLEBUTTON, self.OnClicked)
btn3.Bind(wx.EVT_BUTTON, self.OnClicked)
# 정적인 텍스트 출력
wx.StaticText(panel, label="아이디", pos=(5, 40))
wx.StaticText(panel, label="비밀번호", pos=(5, 70))
# 텍스트 컨트롤
self.inputId = wx.TextCtrl(panel, pos=(100, 40))
self.inputPwd = wx.TextCtrl(panel, pos=(100, 70))
# 로그인 버튼
loginBtn = wx.Button(panel, label="로그인", pos=(100, 100))
# 버튼에 이벤트 등록
loginBtn.Bind(wx.EVT_BUTTON, self.LoginBtnClicked)
# 로그인 버튼을 눌렀을 때 실행할 메서드 정의
def LoginBtnClicked(self, event):
# 1. 입력한 id와 비밀번호를 읽어온다.
id = self.inputId.GetValue()
pwd = self.inputPwd.GetValue()
msg = None
# 2. DB에 있는 내용과 일치하는지 확인한다.
if id == "gura" and pwd == "1234":
msg = "gura 회원님 반갑습니다."
else:
msg = "id 혹은 비밀번호가 틀려요!"
# 3. 사용자에게 알리기
dlg = wx.MessageDialog(self, msg, "알림", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def OnClicked(self, event):
# 이벤트가 일어난 버튼의 참조값을 얻어온다.
btn = event.GetEventObject()
# 눌러진 버튼에 부여된 id를 읽어와서 선택적인 작업을 한다.
if btn.id == 1:
dlg = wx.MessageDialog(self, "일반 버튼 처리", "처리", wx.OK)
dlg.ShowModal()
dlg.Destroy()
elif btn.id == 2:
isActive = event.GetEventObject().GetValue()
print("isActive : ", isActive)
elif btn.id == 3:
self.Close(True)
if __name__ == '__main__':
app = wx.App()
MyFrame(None, title="버튼")
app.MainLoop()
|
from django.db import models
from django.urls import reverse
from visits.models import PerformanceBase
# Create your models here.
class Appointment(models.Model):
creation_date = models.DateField(auto_now_add=True)
appointment_date = models.DateField(verbose_name='date')
appointment_time = models.TimeField(verbose_name='time')
patient = models.ForeignKey('patients.Patient', on_delete=models.CASCADE)
def get_absolute_url(self):
return reverse("appointments:appointment_detail", kwargs={"pk": self.pk})
class PerformanceAppointment(PerformanceBase):
appointment_id = models.ForeignKey(Appointment, on_delete=models.CASCADE)
def get_absolute_url(self):
return reverse("appointments:appointment_detail", kwargs={"pk": self.appointment_id.pk})
|
from flask import Flask
app = Flask(__name__)
from knowledge_graph import routes, config
|
# ----------------------------------------------------
# Name: Nicholas Houchois
# UNI: nbh2119
#
# Tester file for the effects.py module
# ----------------------------------------------------
import effects
def main():
print("Please choose the effect you would like to use: ")
print("1) object_filter")
print("2) shades_of_gray")
print("3) negate_red")
print("4) negate_green")
print("5) negate blue")
print("6) mirror")
number = input("Please enter a number: ")
if number == "1":
infile1_name = input("Please enter the name of input file 1: ")
infile2_name = input("Please enter the name of input file 2: ")
infile3_name = input("Please enter the name of input file 3: ")
outfile_name = input("Please enter the name of the output file: ")
# open input files
infile_1 = open(infile1_name, "r")
infile_2 = open(infile2_name, "r")
infile_3 = open(infile3_name, "r")
infile_list = [infile_1, infile_2, infile_3]
print("working...")
effects.object_filter(infile_list, outfile_name)
infile_1.close()
infile_2.close()
infile_3.close()
else:
infile_name = input("Please enter the name of the input file: ")
outfile_name = input("Please enter the name of the output file: ")
infile = open(infile_name, "r")
print("working...")
if number == "2":
effects.shades_of_gray(infile, outfile_name)
elif number == "3":
effects.negate_red(infile, outfile_name)
elif number == "4":
effects.negate_green(infile, outfile_name)
elif number == "5":
effects.negate_blue(infile, outfile_name)
elif number == "6":
effects.mirror(infile, outfile_name)
infile.close()
main()
|
from django.urls import path, include
from .views import listaproductos, detalleproductos, MyPDF, some_view, DemoPDFView, render_pdf_view, agregarproveedor, \
agregarproveedor2, buscarproducto, ListarProveedor, ProveedorCreate
from venta.apis import urls as apiurls
app_name = "app1"
urlpatterns = [
path('listaproductos', listaproductos, name="lista"),
path('listaproductos/<int:pk>', detalleproductos, name="detalle"),
path('agregarproveedor', agregarproveedor, name="agregarp"),
path('agregarproveedor2', agregarproveedor2, name="agregarp2"),
path('buscarproduto', buscarproducto, name="buscarp"),
path('listarp', ListarProveedor.as_view(), name="listarp"),
path('new', ProveedorCreate.as_view(), name="listarp"),
path('imprimir/<int:pk>/informe', some_view, name="imprimir"),
path('api/', include(apiurls.router.urls)),
# path('listarusuario', primeravista),
# path('listarusuario2', primeravista),
# path('admin/', admin.site.urls),
]
|
import os
from os import listdir
from order import Order
from drone import Drone
from warehouse import Warehouse
from math import sqrt,ceil
PATH = "../Google/"
class Main(object):
def __init__(self, filename):
super(Main, self).__init__()
self.filename = filename
self.orders=[]
self.warehouses=[]
self.drones=[]
self.currentTurn = 0
self.maxTurn=0
self.weightList=[]
self.maxWeight=0
self.busyDrone=[]
self.availableDrone=[]
def parse(self):
f=open(self.filename,"r")
line=f.readline()
rows,columns,drones,turns,maxWeight=list(map(int, line.strip().split(" ")))
self.maxTurn=turns
self.maxWeight=maxWeight
self.log=["" for i in range(drones)]
line=f.readline()
nbProductType=int(line)
line=f.readline()
productWeight=list(map(int, line.strip().split(" ")))
self.weightList=productWeight
line=f.readline()
nbWareHouse=int(line)
for i in range(nbWareHouse):
line=f.readline()
x,y=list(map(int, line.strip().split(" ")))
line=f.readline()
productStored=list(map(int, line.strip().split(" ")))
self.warehouses.append(Warehouse(x,y,productStored,i))
for drone in range (drones):
self.drones.append(Drone(self.warehouses[0].x,self.warehouses[0].y,maxWeight,drone,productWeight))
self.availableDrone.append(drone)
line=f.readline()
nbOrder=int(line)
for i in range(nbOrder):
line=f.readline()
x,y=list(map(int, line.strip().split(" ")))
line=f.readline()
nbItems=int(line)
line=f.readline()
items=list(map(int, line.strip().split(" ")))
self.orders.append(Order(x,y,nbItems,items,i))
def move(self,droneID,x,y):
turns=sqrt( (abs(self.drones[droneID].x - x))**2 + (abs(self.drones[droneID].y)-y)**2 )
self.drones[droneID].x=x
self.drones[droneID].y=y
return ceil(turns)
def load(self,droneID,warehouseID,itemID,nbItem, t):
if (self.drones[droneID].x != self.warehouses[warehouseID].x or self.drones[droneID].y != self.warehouses[warehouseID].y):
self.currentTurn+=self.move(droneID,self.warehouses[warehouseID].x,self.warehouses[warehouseID].y)
self.warehouses[warehouseID].pickProduct(itemID, nbItem)
self.drones[droneID].addItem(nbItem,itemID)
self.currentTurn+=1
#print(str(droneID)+" L "+str(warehouseID)+" "+str(itemID)+" "+str(nbItem))
if t:
self.log[droneID]+=('\n'+str(droneID)+" L "+str(warehouseID)+" "+str(itemID)+" "+str(nbItem))
def unload(self,droneID,warehouseID,itemID,nbItem):
if (self.drones[droneID].x != self.warehouses[warehouseID].x or self.drones[droneID].y != self.warehouses[warehouseID].y):
self.currentTurn+=self.move(droneID,self.warehouses[warehouseID].x,self.warehouses[warehouseID].y)
self.drones[droneID].removeItem(nbItem,itemID)
self.warehouses[warehouseID].addProduct(itemID, nbItem)
self.currentTurn+=1
#print(str(droneID)+" U "+str(warehouseID)+" "+str(itemID)+" "+str(nbItem))
self.log[droneID]+=('\n'+str(droneID)+" U "+str(warehouseID)+" "+str(itemID)+" "+str(nbItem))
def wait(self,droneID,nbWait):
#print(str(droneID)+" W "+str(nbWait))
self.log[droneID]+=('\n'+str(droneID)+" W "+str(nbWait))
def deliver(self,droneID,orderID,itemID,nbItem, t):
if (self.drones[droneID].x != self.orders[orderID].x or self.drones[droneID].y != self.orders[orderID].y):
self.currentTurn+=self.move(droneID,self.orders[orderID].x,self.orders[orderID].y)
self.drones[droneID].removeItem(nbItem,itemID)
self.orders[orderID].deliverItem(nbItem,itemID)
self.currentTurn+=1
#print(str(droneID)+" D "+str(orderID)+" "+str(itemID)+" "+str(nbItem))
if t:
self.log[droneID]+=('\n'+str(droneID)+" D "+str(orderID)+" "+str(itemID)+" "+str(nbItem))
def handleDelivery(self,orderID):
currentOrder=self.orders[orderID]
items=currentOrder.items
todo = []
for key in items.keys():
weight=self.weightList[key]
deliveryItemWeight=weight*items[key]
if deliveryItemWeight<self.maxWeight:
selectedDrone=self.availableDrone.pop()
self.busyDrone.append(selectedDrone)
todo.append((key,items[key],selectedDrone,orderID))
if not self.loadAndDeliver(key,items[key],selectedDrone,orderID, False):
return False
else:
return False
for elem in todo:
self.loadAndDeliverReally(elem[0],elem[1],elem[2],elem[3], True)
def loadAndDeliver(self,item,nbItem, droneID, orderID, t):
res = self.chooseWarehouse(self.drones[droneID].x,self.drones[droneID].y,item,nbItem)
if res == -1:
del self.busyDrone[self.busyDrone.index(droneID)]
self.availableDrone.append(droneID)
return False
else:
#self.load(droneID, res.ID, item, nbItem, t)
#self.deliver(droneID, orderID, item, nbItem, t)
del self.busyDrone[self.busyDrone.index(droneID)]
self.availableDrone.insert(0, droneID)
return True
def loadAndDeliverReally(self,item,nbItem, droneID, orderID, t):
res = self.chooseWarehouse(self.drones[droneID].x,self.drones[droneID].y,item,nbItem)
if True:
self.load(droneID, res.ID, item, nbItem, t)
self.deliver(droneID, orderID, item, nbItem, t)
return True
def chooseWarehouse(self,x,y,item,nbItem):
warehouses=[]
ok = False
for warehouse in self.warehouses:
if warehouse.getProductNumber(item) >= nbItem:
warehouses.append(warehouse)
ok = True
if not ok:
return -1
maxDistance = 999999999999
closestWareHouse = None
for warehouse in warehouses:
distance = sqrt( (abs(warehouse.getLocation()[0] - x))**2 + (abs(warehouse.getLocation()[1] - y)**2 ))
if distance <= maxDistance:
maxDistance = distance
closestWareHouse = warehouse
return closestWareHouse
def simulation(self):
while self.currentTurn < self.maxTurn:
for order in self.orders:
self.handleDelivery(order.ID)
break
def writeOutput(self):
f = open("./output/"+self.filename, 'w')
lel = 0
for t in self.log:
lel+= len(t.split('\n'))-1
f.write(str(lel))
for t in self.log:
f.write(t)
print(lel)
if __name__ == '__main__':
allFiles=os.listdir(PATH)
for files in allFiles:
if (".in" in files):
parser=Main(files)
parser.parse()
parser.simulation()
print(parser.currentTurn, parser.maxTurn)
parser.writeOutput()
print(files)
|
from controllers.property import PropertyController
from core.db import session
from flask_restful import Resource
from flask_restful import abort
from flask_restful import fields
from flask_restful import marshal_with
from flask_restful import reqparse
from models.property import Property as PropertyModel
from resources.users import output_fields as user_output_fields
from resources import parser
output_fields = {
'id': fields.Integer,
'created': fields.DateTime,
'modified': fields.DateTime,
'type': fields.String,
'name': fields.String,
'address': fields.String,
'city': fields.String,
'state': fields.String,
'postal_zip': fields.String,
'landlord': fields.Nested(user_output_fields),
'tenants': fields.Raw,
'meta': fields.Raw,
}
class PropertyResource(Resource):
def __init__(self, **args):
self.landlord_id = args['landlord_id']
@marshal_with(output_fields)
def get(self, id):
try:
property_contoller = PropertyController(id=id, landlord_id=self.landlord_id)
return property_contoller.property
except Exception:
abort(404, message="Property not loaded")
@marshal_with(output_fields)
def put(self, id):
try:
property_contoller = PropertyController(id=id, landlord_id=self.landlord_id)
parser.add_argument('name', type=str, trim=True, store_missing=False)
parser.add_argument('address', type=str, trim=True, store_missing=False)
parser.add_argument('city', type=str, trim=True, store_missing=False)
parser.add_argument('state', type=str, trim=True, store_missing=False)
parser.add_argument('postal_zip', type=str, trim=True, store_missing=False)
parsed_args = parser.parse_args()
property_contoller.update(**parsed_args)
return property_contoller.property, 202
except Exception:
abort(400, message="Error editing Property")
class PropertiesList(Resource):
def __init__(self, **kwargs):
self.landlord_id = kwargs['landlord_id']
@marshal_with(output_fields)
def get(self):
return session.query(PropertyModel).all()
@marshal_with(output_fields)
def post(self):
try:
# Processing input arguments
parser.add_argument('name', type=str, trim=True)
parser.add_argument('address', type=str, required=True, trim=True)
parser.add_argument('city', type=str, required=True, trim=True)
parser.add_argument('state', type=str, trim=True)
parser.add_argument('postal_zip', type=str, trim=True)
parsed_args = parser.parse_args()
parsed_args['landlord_id'] = self.landlord_id
property = PropertyController().create(**parsed_args)
return property, 201
except Exception as e:
abort(400, message=str(e))
|
import configparser
import os
import logging
import multiprocessing
from datetime import datetime as dt
from apscheduler.schedulers.background import BlockingScheduler
PATH = r'\\app-solaroad01\data\Setup\Data'
DB_PATH = 'db'
DB_FILE = 'processedFiles.data'
LOG_PATH = 'log'
CONFIG_FILE = 'config.ini'
formatter = logging.Formatter('%(asctime)s: %(levelname)-8s - [%(name)s] %(message)s')
logger = logging.getLogger('solaroad')
uploadTime = multiprocessing.Value('uploadTime', 2) # 2 AM everyday
scheduler = BlockingScheduler()
def doProcessing():
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
currentFiles = []
processedFiles = []
logfile = os.path.join(LOG_PATH, dt.now().strftime('log_%d_%m_%Y.log'))
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
dbFile = os.path.join(DB_PATH, DB_FILE)
with open(dbFile, 'r') as fileHandle:
for processedFile in fileHandle.readlines():
processedFiles.append(processedFile)
fileHandle.close()
for dirPath, dirnames, fileNames in os.walk(PATH):
for fileName in fileNames:
line = os.path.join(dirPath, fileName)
currentFiles.append(line)
for file in [f for f in currentFiles if f not in processedFiles]:
logger.debug("%s needs to be processed!", file)
# process(file)
with open(dbFile, 'w') as fileHandle:
fileHandle.write(file + "\n")
fileHandle.close()
if 'upload_time' in config['SensorCloud']:
try:
rescheduledTime = int(config['SensorCloud']['upload_time'])
if 0 <= rescheduledTime <= 23 and rescheduledTime != uploadTime.value:
logging.debug('Rescheduling processingJob to %s:00 hours from %s:00 hours everyday!', rescheduledTime,
uploadTime.value)
scheduler.reschedule_job('processing_job', 'cron', hour=rescheduledTime)
uploadTime.value = rescheduledTime
except ValueError:
logger.error('%s is not a valid upload time. Expecting value in the range (0-23).',
config['SensorCloud']['upload_time'])
logger.debug('End of processing for the day!')
logger.removeHandler(fh)
fh.close()
scheduler.add_job(doProcessing, 'cron', hour=str(uploadTime.value), id='processing_job')
scheduler.start()
|
#region Import Modules
from fluid_properties import *
import numpy as np
import pandas as pd
import math
from pyXSteam.XSteam import XSteam
import matplotlib.pyplot as plt
import pprint
#endregion
#region Inputs:
# Geometry
geometry={}
geometry['L']=0.69
geometry['W']=0.4
geometry['H']=2*(2/3)
# Ambient Conditions
external={}
external['emissivity']=1.0
external['v_Wind']=15
T_amb=np.array([-40,-30,-25,-20,-15,-10,-5,0])
# Internal Volume
internal={}
internal['T_in']=1
#endregion
#region Calculations
fig = plt.figure()
ax1 = fig.add_subplot(111)
Q_conv=np.zeros(len(T_amb))
Q_rademit=np.zeros(len(T_amb))
Q_balance=np.zeros(len(T_amb))
i=0
j=0
for T_ambi in T_amb:
# Ambient Conditions
external['A_sup'] = 2 * ( geometry['L'] * geometry['W'] + geometry['L'] * geometry['H'] + geometry['W'] * geometry['H']) # [m^2]
external['T_amb'] = T_ambi
sigma = 5.67 * 10 ** (-8)
# Internal Volume
internal['V_total'] = geometry['H'] * geometry['L'] * geometry['W']
# Conduction
# Calculate convection coefficient
external['T_film'] = (internal['T_in'] + external['T_amb']) * 0.5
external['mu'] = thermal_properties('mu', 'air', external['T_film']) # [m^2/(s)]
external['rho'] = thermal_properties('rho', 'air', external['T_film']) # [kg/(m^3)]
external['k'] = thermal_properties('k', 'air', external['T_film']) # [W/m*°K]
external['Pr'] = thermal_properties('Pr', 'air', external['T_film'])
#Front Face
external_facefront=external.copy()
external_facefront['L_characteristic']=geometry['H']
external_facefront['Re']=(external_facefront['rho']*external_facefront['v_Wind']*external_facefront['L_characteristic'])/external_facefront['mu']
external_facefront=nusselt_external_frontplate(external_facefront)
external_facefront['h'] = (external_facefront['Nu'] * external_facefront['k']) / (external_facefront['L_characteristic'])
external_facefront['A_sup']=geometry['H']*geometry['L']
#Laterals
external_laterals = external.copy()
external_laterals['L_characteristic'] = geometry['W']
external_laterals['Re']=(external_laterals['rho']*external_laterals['v_Wind']*external_laterals['L_characteristic'])/external_laterals['mu']
external_laterals=nusselt_external_flatplate(external_laterals)
external_laterals['h'] = (external_laterals['Nu'] * external_laterals['k']) / (external_laterals['L_characteristic'])
external_laterals['A_sup'] = 2*geometry['H'] * geometry['W']
#Top Face
external_top = external_laterals.copy()
external_top['A_sup'] = geometry['L'] * geometry['W']
#Back Face
external_faceback=external.copy()
external_faceback['L_characteristic']=geometry['H']
external_faceback['Re']=(external_faceback['rho']*external_faceback['v_Wind']*external_faceback['L_characteristic'])/external_faceback['mu']
external_faceback=nusselt_external_backplate(external_faceback)
external_faceback['h'] = (external_faceback['Nu'] * external_faceback['k']) / (external_faceback['L_characteristic'])
external_faceback['A_sup'] = geometry['H'] * geometry['L']
external['A_sup']=(external_facefront['A_sup']+external_laterals['A_sup']+external_top['A_sup']+external_faceback['A_sup'])
external['h_ext']=((external_facefront['h']*external_facefront['A_sup'])+(external_laterals['h']*external_laterals['A_sup'])+(external_top['h']*external_top['A_sup'])+(external_faceback['h']*external_faceback['A_sup']))/(external['A_sup'])
# Calculate ventilation needs:
ventilation = {}
external['Q_rademit'] = sigma * external['A_sup'] * external['emissivity'] * (
((internal['T_in'] + 273.15) ** 4) - ((external['T_amb'] + 273.15) ** 4))
external['Q_conv'] = external['h_ext'] * (external['A_sup']) * (internal['T_in'] - external['T_amb'])
Q_balance[i]=(external['Q_conv']+external['Q_rademit'])
Q_conv[i] = external['Q_conv']
Q_rademit[i] = external['Q_rademit']
i+=1
ax1.scatter(T_amb, Q_balance, label=str('Internal Temperature = '+ str(internal['T_in']) + ' [℃]'))
i=0
#endregion
#region Plots
# Plot1
ax1.set_xlabel('Ambient Temperature - T_{amb} - [ºC]')
ax1.set_xticks(T_amb)
ax1.set_ylabel('Thermal Power transferred to the exterior - [kW]')
ax1.set_title('Thermal Power Required to maintain internal temperature at T= '+str(internal['T_in'])+' [ºC]')
ax1.legend()
fig.show()
#endregion
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.Sinus.SinusUIParameters import SinusUIParameters
class SinusOmegaCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min=SinusUIParameters.OmegaSliderMin,
validator_max=SinusUIParameters.OmegaSliderMax,
validator_accuracy=SinusUIParameters.OmegaLineEditAccuracy,
line_edit=self.window.OmegalineEdit,
slider_min=SinusUIParameters.OmegaSliderMin,
slider_max=SinusUIParameters.OmegaSliderMax,
slider=self.window.horizontalSliderOmega,
update_slider_func=self.update_omega_slider,
update_line_edit_func=self.update_omega_line_edit
)
def update_omega_slider(self):
try:
self.update_slider(
line_edit=self.window.OmegalineEdit,
slider=self.window.horizontalSliderOmega,
calc_constant=SinusUIParameters.OmegaCalcConstant
)
except:
import sys
from LoggersConfig import loggers
loggers['Debug'].debug(f'SinusOmegaCallBackOperator: {sys.exc_info()}')
def update_omega_line_edit(self):
try:
self.update_line_edit(
line_edit=self.window.OmegalineEdit,
slider=self.window.horizontalSliderOmega,
calc_constant=SinusUIParameters.OmegaCalcConstant,
update_model_func=self.update_omega
)
except:
import sys
from LoggersConfig import loggers
loggers['Debug'].debug(f'SinusOmegaCallBackOperator: {sys.exc_info()}')
def update_omega(self, val):
self.model.omega = val
|
from typing import Optional
from dataclasses import dataclass, field
from .base import StoreCfg, Schema
@dataclass
class NewsgroupStoreCfg(StoreCfg):
path: str = 'flex'
name: str = 'newsgroupbao'
local: bool = True
schema: Optional[Schema] = field(default_factory=lambda: Schema(
example_id=None,
document=['text'],
))
formatter: str = 'fewshot.formatters.newsgroupbao'
@dataclass
class ReutersStoreCfg(StoreCfg):
path: str = 'flex'
name: str = 'reutersbao'
local: bool = True
schema: Optional[Schema] = field(default_factory=lambda: Schema(
example_id=None,
document=['text'],
))
formatter: str = 'fewshot.formatters.reutersbao'
@dataclass
class HuffpostStoreCfg(StoreCfg):
path: str = 'flex'
name: str = 'huffpostbao'
needs_local_datasets_dir: bool = True
local: bool = True
schema: Optional[Schema] = field(default_factory=lambda: Schema(
example_id=None,
document=['headline'],
label='category',
))
formatter: str = 'fewshot.formatters.huffpostbao'
@dataclass
class FewrelStoreCfg(StoreCfg):
path: str = 'flex'
name: str = 'fewrelbao'
local: bool = True
schema: Optional[Schema] = field(default_factory=lambda: Schema(
example_id=None,
document=[], # It's complicated
label='wikidata_property_name',
))
formatter: str = 'fewshot.formatters.fewrelbao'
@dataclass
class AmazonStoreCfg(StoreCfg):
path: str = 'amazon'
local: bool = True
schema: Optional[Schema] = field(default_factory=lambda: Schema(
example_id=None,
document=['text'],
))
formatter: str = 'fewshot.formatters.amazon'
|
""" File: shapes.py
Author: Abraham Aruguete
Purpose: So instead of doing something, I'm going to now implement some data structures. Please oh God be easy. """
def shape_alpha():
x = [ , ]
x[0] = [10, , ,40]
x[1] = [[1.1, -17], [123, 456]]
x[0][1] = "abc"
x[0][2] = "jkl"
def shape_bravo():
x = [ , ]
x[0] = [ , ]
x[1] = [ , ]
x[1][1] = "rufus"
x[1][0] = ["bogus" , "righteous" ]
x[0][1] = x[1][0]
x[0][0] = ["whoa", "excellent"]
def shape_charlie(arg1):
x = [ , arg1]
x[0] = [ , arg1]
x[0][0] = [ , arg1]
x[0][0][0] = [None, arg1]
def shape_delta(arg1, arg2):
x = [arg1, arg2, , 10]
x[2] = [arg1, , 20, arg2]
x[2][1] = [arg1, arg2, [arg1, arg2], 30]
def shape_echo(arg1, arg2, arg3):
x = [ , arg1]
x[0] = [ , arg2]
x[0][0] = [ , arg3]
x[0][0][0] = x
|
lst = []
try:
while 1:
result = input()
if result.isupper() and result !=0:
L=result.split()
for i in L:
if i not in lst:
lst.append(i)
else:
continue
continue
else:
print(len(lst))
except EOFError:
print(len(lst))
|
import pandas as pd
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
df = pd.read_csv('bbc.csv')
df.repertoire.value_counts()
'''
Encoder les categories de la variable cible
'''
le = LabelEncoder()
df['repertoire'] = le.fit_transform(df.repertoire)
df.repertoire.value_counts()
y = df.repertoire
'''
Vectoriser le texte avec TfidfVectorizer - TF-IDF
'''
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words ='english')
X = vectorizer.fit_transform(df.contenu)
'''
Entrainer un modele Naive Bayes Multinomial
'''
clf = MultinomialNB(alpha = 0.1)
clf.fit(X,y)
yhat = clf.predict(X)
'''
La classification est presque parfaite
'''
clf.score(X,y)
confusion_matrix(yhat, y)
|
import pygame,sys,random
pygame.init()
screen = pygame.display.set_mode([600,600])
screen.fill([255,255,255])
for i in range(50):
width = random.randint(0,250)
height = random.randint(0,250)
top = random.randint(0,250)
left = random.randint(0,250)
pygame.draw.rect(screen,[0,0,0],[left,top,width,height],1)
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
|
# Listcomps
symbols = '$¢£¥€¤'
codes = [ord(symbol) for symbol in symbols]
codesAsString = ", ".join(str(code) for code in codes)
print("The code points for the string '$¢£¥€¤' are", codesAsString)
# Tuples as records
traveler_ids = [('USA', '31195855'), ('BRA', 'CE342567'), ('ESP', 'XDA205856')]
for passport in sorted(traveler_ids):
print('%s/%s' % passport)
# Using listcomps to nest lists
board = [['_'] * 3 for _ in range(3)]
print("\nUsing a listcomp the original board is", board)
board[0][2] = 'x'
print("Now the board is", board)
board = [['_'] * 3] * 3
print("\nUsing multiplication the board is", board)
board[0][2] = 'x'
print("Now the board is", board)
|
#coding:gb2312
#元组练习题
fruits=("apple","banana","peach","strawberry","orange\n\n")
print("Original items :")
for fruit in fruits:
print(fruit)
#fruits[0]=watermelon
#print(fruits) #修改元组元素是被禁止的
fruits=("litchi","banana","peach","pear","orange") #修改了其中两种水果
print("Modified items :")
for fruit in fruits:
print(fruit)
|
import numpy as np
from . IonSpeciesScalarQuantity import IonSpeciesScalarQuantity
from . OtherScalarQuantity import OtherScalarQuantity
class OtherIonSpeciesScalarQuantity(IonSpeciesScalarQuantity):
def __init__(self, name, data, description, grid, output, momentumgrid=None):
"""
Constructor.
"""
attr = {'description': description}
super().__init__(name=name, data=data, grid=grid, attr=attr, output=output)
self.time = grid.t[1:]
def __repr__(self):
return self.__str__()
def __str__(self):
"""
Convert this object to a string.
"""
s = '({}) Other ion species scalar quantity of size NI x NT = {} x {}\n'.format(self.name, *self.data.shape)
for i in range(len(self.ions.Z)):
s += " {:2s} (Z = {:3d})\n".format(*self.ions[i])
return s
def __getitem__(self, name):
"""
Direct access to data.
"""
idx = self.ions.getIndex(name)
return OtherScalarQuantity(name='{}_{}'.format(self.name, name), data=self.data[:,idx], description=self.description, grid=self.grid, output=self.output)
|
from django.contrib import admin
from students.models import Student
admin.site.register(Student)
|
data = []
total_yeses = 0
with open("input.txt") as f:
# Use the same input from day 4
current_record = []
for line in f:
if line != "\n":
current_record = current_record + line.split(" ")
else:
current_record = [n.strip() for n in current_record]
data.append(current_record)
current_record = []
# Don't forget the last one!! Doh
current_record = [n.strip() for n in current_record]
data.append(current_record)
# So now we have a list (data) of lists. Each list is one group of passengers' responses
for group in data:
this_group = ""
num_in_group = len(group)
#print(group)
#print("There are " + str(num_in_group) + " in this group")
# Find the longest string in the group
longest = max(group, key=len)
group_yeses = 0
# Loop through the longest string to check whether each letter is also in the other strings
for letter in longest:
letter_ok = True
for response in group:
if letter not in response:
letter_ok = False
# If the letter is OK by now, count it as a yes
if letter_ok:
group_yeses += 1
total_yeses += group_yeses
print(total_yeses)
|
import tkinter as tk
import app_main.main_ui as win
import app_main.make_widgets as mkw
import app_main.service as s1
import app_sub1.service_Student_Member as s2
import app_sub1.service_Video as s3
def main():
root = tk.Tk()
app = win.AppWindow(root)
root.geometry('%dx%d+%d+%d' % (400, 200, 10, 10))
ser1=s1.service() # ui 이벤트와 상관없이 수행해야하는 기능
ser2=s2.StudentMemberDao()
ser3=s3.video_service(app)
mkw.make(app,ser2,ser3)
app.mainloop()
|
import pymysql
# 项目运行的时候,加载pymysql模块
pymysql.install_as_MySQLdb()
|
import pymysql.cursors
# 创建连接
config = {
'user':'root',
'password':'Bg1234',
'host':'192.168.15.211',
'port':3306,
'database':'bgdb'}
conn = pymysql.connect(**config)
# 创建游标
cur = conn.cursor()
# 执行查询SQL
sql = "select * from student"
cur.execute(sql)
# 获取查询结果
result_set = cur.fetchall()
if result_set:
for row in result_set:
print ("%s, %s, %s, %s, %s, %s" % (row[0],row[1],row[2],row[3],row[4],row[5]))
# 关闭游标和连接
cur.close()
conn.close()
|
from tfcgp.problem import Problem
from tfcgp.config import Config
from tfcgp.evolver import Evolver
from tfcgp.learn_evo import LearnEvolver
from tfcgp.ga import GA
from sklearn import datasets
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.FATAL)
c = Config()
c.update("cfg/test.yaml")
data = datasets.load_iris()
p = Problem(data.data, data.target)
def test_creation():
e = Evolver(p, c)
assert e.max_fit == 0.0
def test_mutate():
e = Evolver(p, c)
child = e.mutate(e.best)
assert np.any(child.genes != e.best.genes)
assert len(child.genes) == len(e.best.genes)
def test_improvement():
e = Evolver(p, c)
e.run(5)
assert e.max_fit > 0.0
def test_lamarckian():
p = Problem(data.data, data.target, lamarckian=True)
e = Evolver(p, c)
e.run(5)
assert e.max_fit > 0.0
def test_learn_evo():
e = LearnEvolver(p, c)
e.run(5)
assert e.max_fit > 0.0
def test_ga():
e = GA(p, c)
e.run(5)
assert e.max_fit > 0.0
|
i = [73,67,38,33]
for a in i:
if a >37 and a%5!=0:
p = a%5
q = (a-p)+5
print(q)
else:
print(a)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from examples.finite_mtenv_bandit import FiniteMTBanditEnv # noqa: E402
from tests.utils.utils import validate_mtenv
def get_valid_n_tasks_and_arms() -> List[int]:
return [(1, 2), (10, 20), (100, 200)]
def get_invalid_n_tasks_and_arms() -> List[int]:
return [(-1, 2), (0, 3), (1, -2), (3, 0)]
@pytest.mark.parametrize("n_tasks, n_arms", get_valid_n_tasks_and_arms())
def test_mtenv_bandit_with_valid_input(n_tasks, n_arms):
env = FiniteMTBanditEnv(n_tasks=n_tasks, n_arms=n_arms)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks, n_arms", get_invalid_n_tasks_and_arms())
def test_mtenv_bandit_with_invalid_input(n_tasks, n_arms):
with pytest.raises(Exception):
env = FiniteMTBanditEnv(n_tasks=n_tasks, n_arms=n_arms)
validate_mtenv(env=env)
|
import gzip, pickle
from tempfile import TemporaryFile
def convert(path):
"""
Builds a file containing a numpy array with a pkl file specified by path.
"""
with gzip.open(path, 'rb'):
data, label = pickle.load(f)
f_data = open("./data", 'w')
f_data
|
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from torch.nn.functional import pairwise_distance, cosine_similarity
import argparse
import os
import sys
import numpy as np
from tqdm import tqdm
try:
from .eval_metrics import evaluate
from .logger import Logger
from .model import DeepSpeakerModel, TripletMarginLoss, distance
from .DeepSpeakerDataset import DeepSpeakerDataset
from .VoxcelebTestset import VoxcelebTestset
from .voxceleb_wav_reader import read_voxceleb_structure
from . import constants as c
from .audio_processing import totensor, truncatedinput, read_npy, mk_MFB, mk_mel, mk_if
except ValueError:
from eval_metrics import evaluate
from logger import Logger
from model import DeepSpeakerModel, TripletMarginLoss, distance
from DeepSpeakerDataset import DeepSpeakerDataset
from VoxcelebTestset import VoxcelebTestset
from voxceleb_wav_reader import read_voxceleb_structure
import constants as c
from audio_processing import totensor, truncatedinput, read_npy, mk_MFB, mk_mel, mk_if
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Model options
parser.add_argument('--dataroot', type=str, default='audio/voxceleb1',
help='path to dataset')
parser.add_argument('--test-pairs-path', type=str, default='audio/voxceleb1/veri_test.txt',
help='path to pairs file')
parser.add_argument('--log-dir', default='./data/pytorch_speaker_logs',
help='folder to output model checkpoints')
parser.add_argument('--resume', default=None, type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--start-epoch', default=1, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--epochs', type=int, default=25, metavar='E',
help='number of epochs to train (default: 25)')
# Training options
parser.add_argument('--embedding-size', type=int, default=512, metavar='ES',
help='Dimensionality of the embedding')
parser.add_argument('--batch-size', type=int, default=32, metavar='BS',
help='input batch size for training (default: 32)')
parser.add_argument('--test-batch-size', type=int, default=64, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--n-triplets', type=int, default=1000000, metavar='N',
help='how many triplets will generate from the dataset')
parser.add_argument('--margin', type=float, default=0.1, metavar='MARGIN',
help='the margin value for the triplet loss function (default: 1.0)')
parser.add_argument('--min-softmax-epoch', type=int, default=10, metavar='MINEPOCH',
help='minimum epoch for initial parameter using softmax (default: 10)')
parser.add_argument('--loss-ratio', type=float, default=1.0, metavar='LOSSRATIO',
help='the ratio softmax loss - triplet loss (default: 1.0')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.125)')
parser.add_argument('--lr-decay', default=1e-4, type=float, metavar='LRD',
help='learning rate decay ratio (default: 1e-4')
parser.add_argument('--wd', default=0.0, type=float,
metavar='W', help='weight decay (default: 0.0)')
parser.add_argument('--optimizer', default='sgd', type=str,
metavar='OPT', help='The optimizer to use (default: sgd)')
parser.add_argument('--distance', default='CosineSimilarity', type=str,
help='CosineSimilarity is default to match research paper (also accepts pairwise_distance)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=0, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
# Testing Options
parser.add_argument('--test-only', action='store_true', default=False,
help='whether to skip training and do testing only')
parser.add_argument('--sample', action='store_true', default=False,
help='whether to smaller dataset for debugging')
# Add Preprocessing Step
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makemel', action='store_true', default=False,
help='need to make mel file')
parser.add_argument('--makeif', action='store_true', default=False,
help='need to make if file')
args = parser.parse_args()
# set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
if args.cuda:
cudnn.benchmark = True
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
print('==> Reading wav files')
voxceleb_dev, voxceleb_test = read_voxceleb_structure(args.dataroot, sample=args.sample)
# generate_test_pair = not args.test_pairs_path
# if generate_test_pair:
# args.test_pairs_path = os.path.join(args.dataroot, 'test_pairs.csv')
# voxceleb = read_voxceleb_structure(args.dataroot, generate_test_pair=generate_test_pair)
# Setup Workers
MAX_THREAD_COUNT = 5
# num_threads = min(MAX_THREAD_COUNT, os.cpu_count())
import multiprocessing
num_threads = min(MAX_THREAD_COUNT, multiprocessing.cpu_count())
def parallel_function(f, sequence, num_threads=None):
from multiprocessing import Pool
pool = Pool(processes=num_threads)
result = pool.map(f, sequence)
cleaned = [x for x in result if x is not None]
pool.close()
pool.join()
return cleaned
num_features = c.NUM_FEATURES
if args.makemfb:
print('==> Started MFB')
num_features = c.FILTER_BANK
print('==> Started converting wav to npy')
parallel_function(mk_MFB, [datum['file_path'] for datum in voxceleb_test], num_threads)
print('===> Converting test set is done')
if not args.test_only:
parallel_function(mk_MFB, [datum['file_path'] for datum in voxceleb_dev], num_threads)
print('===> Converting dev set is done')
print("==> Complete converting")
elif args.makemel:
print('==> Started MEL')
num_features = c.MEL_FEATURES
print('==> Started converting wav to npy')
parallel_function(mk_mel, [datum['file_path'] for datum in voxceleb_test], num_threads)
print('===> Converting test set is done')
if not args.test_only:
parallel_function(mk_mel, [datum['file_path'] for datum in voxceleb_dev], num_threads)
print('===> Converting dev set is done')
print("==> Complete converting")
elif args.makeif:
print('==> Started IF')
num_features = c.IF_FEATURES
print('==> Started converting wav to npy')
parallel_function(mk_if, [datum['file_path'] for datum in voxceleb_test], num_threads)
print('===> Converting test set is done')
if not args.test_only:
parallel_function(mk_if, [datum['file_path'] for datum in voxceleb_dev], num_threads)
print('===> Converting dev set is done')
print("==> Complete converting")
# Data
transform_train = transforms.Compose([
totensor(permute=False),
truncatedinput(c.NUM_FRAMES),
])
transform_test = transforms.Compose([
totensor(permute=False),
truncatedinput(c.NUM_FRAMES),
])
file_loader = read_npy
train_dir = DeepSpeakerDataset(voxceleb=voxceleb_dev,
dir=args.dataroot,
n_triplets=args.n_triplets,
loader=file_loader,
transform=transform_train)
test_dir = VoxcelebTestset(dir=args.dataroot,
pairs_path=args.test_pairs_path,
loader=file_loader,
transform=transform_test)
# del voxceleb
del voxceleb_dev
del voxceleb_test
# create logger
LOG_DIR = args.log_dir + '/run-optim_{}-n{}-lr{}-wd{}-m{}-embeddings{}-msceleb-alpha10-num_classes{}-num_features{}-num_frames{}'\
.format(args.optimizer, args.n_triplets, args.lr, args.wd,
args.margin,args.embedding_size,len(train_dir.classes), num_features, c.NUM_FRAMES)
logger = Logger(LOG_DIR)
def main():
# Views the training images and displays the distance on anchor-negative and anchor-positive
test_display_triplet_distance = False
# print the experiment configuration
print('\nparsed options:\n{}\n'.format(vars(args)))
print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))
# instantiate model and initialize weightsNUM_FEATURES
# TODO(xin): IMPORTANT load num_classes from checkpoint
model = DeepSpeakerModel(embedding_size=args.embedding_size,
num_classes=len(train_dir.classes),
feature_dim=num_features,
frame_dim=c.NUM_FRAMES)
if args.cuda:
model.cuda()
from torchsummary import summary
summary(model, (1, c.NUM_FRAMES, c.NUM_FEATURES))
# # More detailed information on model
# print(model)
optimizer = create_optimizer(model, args.lr)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print('=> loading checkpoint {}'.format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
else:
print('=> no checkpoint found at {}'.format(args.resume))
start = args.start_epoch
end = start + args.epochs
train_loader = torch.utils.data.DataLoader(train_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dir, batch_size=args.test_batch_size, shuffle=False, **kwargs)
for epoch in range(start, end):
if args.test_only:
test(test_loader, model, epoch)
return
train(train_loader, model, optimizer, epoch)
test(test_loader, model, epoch)
def train(train_loader, model, optimizer, epoch):
# switch to train mode
model.train()
labels, distances = [], []
pbar = tqdm(enumerate(train_loader))
for batch_idx, (data_a, data_p, data_n, label_p, label_n) in pbar:
#print("on training{}".format(epoch))
if args.cuda:
data_a, data_p, data_n = data_a.cuda(), data_p.cuda(), data_n.cuda()
# compute output
out_a, out_p, out_n = model(data_a), model(data_p), model(data_n)
if epoch > args.min_softmax_epoch:
triplet_loss = TripletMarginLoss(args.margin).forward(out_a, out_p, out_n)
loss = triplet_loss
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.log_value('selected_triplet_loss', triplet_loss.data.item()).step()
#logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data.item()).step()
logger.log_value('selected_total_loss', loss.data.item()).step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data_a), len(train_loader.dataset),
epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
100. * batch_idx / len(train_loader),
loss.data.item()))
dists = distance(out_a, out_n, args.distance)
distances.append(dists.data.cpu().numpy())
labels.append(np.zeros(dists.size(0)))
dists = distance(out_a, out_p, args.distance)
distances.append(dists.data.cpu().numpy())
labels.append(np.ones(dists.size(0)))
else:
# Choose the hard negatives
d_p = distance(out_a, out_p, args.distance)
d_n = distance(out_a, out_n, args.distance)
all = (d_n - d_p < args.margin).cpu().data.numpy().flatten()
# log loss value for mini batch.
total_coorect = np.where(all == 0)
logger.log_value('Minibatch Train Accuracy', len(total_coorect[0]))
total_dist = (d_n - d_p).cpu().data.numpy().flatten()
logger.log_value('Minibatch Train distance', np.mean(total_dist))
hard_triplets = np.where(all == 1)
if len(hard_triplets[0]) == 0:
continue
if args.cuda:
out_selected_a = Variable(torch.from_numpy(out_a.cpu().data.numpy()[hard_triplets]).cuda())
out_selected_p = Variable(torch.from_numpy(out_p.cpu().data.numpy()[hard_triplets]).cuda())
out_selected_n = Variable(torch.from_numpy(out_n.cpu().data.numpy()[hard_triplets]).cuda())
selected_data_a = Variable(torch.from_numpy(data_a.cpu().data.numpy()[hard_triplets]).cuda())
selected_data_p = Variable(torch.from_numpy(data_p.cpu().data.numpy()[hard_triplets]).cuda())
selected_data_n = Variable(torch.from_numpy(data_n.cpu().data.numpy()[hard_triplets]).cuda())
else:
out_selected_a = Variable(torch.from_numpy(out_a.data.numpy()[hard_triplets]))
out_selected_p = Variable(torch.from_numpy(out_p.data.numpy()[hard_triplets]))
out_selected_n = Variable(torch.from_numpy(out_n.data.numpy()[hard_triplets]))
selected_data_a = Variable(torch.from_numpy(data_a.data.numpy()[hard_triplets]))
selected_data_p = Variable(torch.from_numpy(data_p.data.numpy()[hard_triplets]))
selected_data_n = Variable(torch.from_numpy(data_n.data.numpy()[hard_triplets]))
selected_label_p = torch.from_numpy(label_p.cpu().numpy()[hard_triplets])
selected_label_n= torch.from_numpy(label_n.cpu().numpy()[hard_triplets])
triplet_loss = TripletMarginLoss(args.margin).forward(out_selected_a, out_selected_p, out_selected_n)
cls_a = model.forward_classifier(selected_data_a)
cls_p = model.forward_classifier(selected_data_p)
cls_n = model.forward_classifier(selected_data_n)
criterion = nn.CrossEntropyLoss()
predicted_labels = torch.cat([cls_a,cls_p,cls_n])
if args.cuda:
true_labels = torch.cat([Variable(selected_label_p.cuda()),Variable(selected_label_p.cuda()),Variable(selected_label_n.cuda())])
cross_entropy_loss = criterion(predicted_labels.cuda(),true_labels.cuda())
else:
true_labels = torch.cat([Variable(selected_label_p),Variable(selected_label_p),Variable(selected_label_n)])
cross_entropy_loss = criterion(predicted_labels,true_labels)
loss = cross_entropy_loss + triplet_loss * args.loss_ratio
# compute gradient and update weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss value for hard selected sample
logger.log_value('selected_triplet_loss', triplet_loss.data).step()
logger.log_value('selected_cross_entropy_loss', cross_entropy_loss.data).step()
logger.log_value('selected_total_loss', loss.data).step()
if batch_idx % args.log_interval == 0:
pbar.set_description(
'Train Epoch: {:3d} [{:8d}/{:8d} ({:3.0f}%)]\tLoss: {:.6f} \t Number of Selected Triplets: {:4d}'.format(
# epoch, batch_idx * len(data_a), len(train_loader.dataset),
epoch, batch_idx * len(data_a), len(train_loader) * len(data_a),
100. * batch_idx / len(train_loader),
loss.data,len(hard_triplets[0])))
dists = distance(out_selected_a, out_selected_n, args.distance)
distances.append(dists.data.cpu().numpy())
labels.append(np.zeros(dists.size(0)))
dists = distance(out_selected_a, out_selected_p, args.distance)
distances.append(dists.data.cpu().numpy())
labels.append(np.ones(dists.size(0)))
#accuracy for hard selected sample, not all sample.
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
tpr, fpr, accuracy = evaluate(distances, labels)
print('\33[91mTrain set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
logger.log_value('Train Accuracy', np.mean(accuracy))
# do checkpointing
torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
'{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))
def test(test_loader, model, epoch):
# switch to evaluate mode
model.eval()
labels, distances = [], []
pbar = tqdm(enumerate(test_loader))
for batch_idx, (data_a, data_p, label) in pbar:
current_sample = data_a.size(0)
data_a = data_a.resize_(args.test_input_per_file *current_sample, 1, data_a.size(2), data_a.size(3))
data_p = data_p.resize_(args.test_input_per_file *current_sample, 1, data_a.size(2), data_a.size(3))
if args.cuda:
data_a, data_p = data_a.cuda(), data_p.cuda()
data_a, data_p, label = Variable(data_a, volatile=True), \
Variable(data_p, volatile=True), Variable(label)
# compute output
out_a, out_p = model(data_a), model(data_p)
dists = distance(out_a,out_p, args.distance)
dists = dists.data.cpu().numpy()
dists = dists.reshape(current_sample,args.test_input_per_file).mean(axis=1)
distances.append(dists)
labels.append(label.data.cpu().numpy())
if batch_idx % args.log_interval == 0:
pbar.set_description('Test Epoch: {} [{}/{} ({:.0f}%)]'.format(
# epoch, batch_idx * len(data_a), len(test_loader.dataset),
epoch, batch_idx * len(data_a), len(test_loader) * len(data_a),
100. * batch_idx / len(test_loader)))
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
tpr, fpr, accuracy = evaluate(distances, labels)
print('\33[91mTest set: Accuracy: {:.8f}\n\33[0m'.format(np.mean(accuracy)))
logger.log_value('Test Accuracy', np.mean(accuracy))
def create_optimizer(model, new_lr):
# setup optimizer
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=new_lr,
momentum=0.99, dampening=0.9,
weight_decay=args.wd)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=new_lr,
weight_decay=args.wd)
elif args.optimizer == 'adagrad':
optimizer = optim.Adagrad(model.parameters(),
lr=new_lr,
lr_decay=args.lr_decay,
weight_decay=args.wd)
return optimizer
if __name__ == '__main__':
main()
|
# To pass env vars to Python scripts run by Publik in services which remove custom env vars:
# https://unix.stackexchange.com/questions/44370/how-to-make-unix-service-see-environment-variables
# So we hardcode the values in the file below when the container starts
import sys
sys.path.insert(0, "/home")
from pyenv import *
# Databases
DATABASES['default']['ENGINE'] = 'tenant_schemas.postgresql_backend'
DATABASES['default']['NAME'] = DB_AUTHENTIC_NAME
DATABASES['default']['USER'] = DB_AUTHENTIC_USER
DATABASES['default']['PASSWORD'] = DB_AUTHENTIC_PASS
DATABASES['default']['HOST'] = 'db'
DATABASES['default']['PORT'] = DB_PORT
BROKER_URL = 'amqp://{user}:{password}@rabbitmq:{port}//'.format(
user=RABBITMQ_DEFAULT_USER,
password=RABBITMQ_DEFAULT_PASS,
port=RABBITMQ_PORT,
)
# Zone
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
# Email configuration
ADMINS = (
(ERROR_MAIL_AUTHOR, ERROR_MAIL_ADDR),
)
EMAIL_SUBJECT_PREFIX = '[authentic] '
SERVER_EMAIL = ERROR_MAIL_ADDR
DEFAULT_FROM_EMAIL = ERROR_MAIL_ADDR
# SMTP configuration
EMAIL_HOST = SMTP_HOST
EMAIL_HOST_USER = SMTP_USER
EMAIL_HOST_PASSWORD = SMTP_PASS
EMAIL_PORT = SMTP_PORT
# HTTPS Security
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Idp
# SAML 2.0 IDP
#A2_IDP_SAML2_ENABLE = False
# CAS 1.0 / 2.0 IDP
#A2_IDP_CAS_ENABLE = False
# OpenID 1.0 / 2.0 IDP
#A2_IDP_OPENID_ENABLE = False
# Authentifications
#A2_AUTH_PASSWORD_ENABLE = True
#A2_SSLAUTH_ENABLE = False
CACHES = {
'default': {
'BACKEND': 'hobo.multitenant.cache.TenantCache',
'REAL_BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Role provisionning via local RabbitMQ
HOBO_ROLE_EXPORT = True
LOGGING = LOGGING_FROM_PYENV
|
import glob
import numpy as np
import argparse
import os
"""
Last modified on 04/22
related to v3.1
"""
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples', help='samples per small dataset file', type=int, default=300)
args = parser.parse_args()
single_sample_num = args.samples
bigdatafilepath = 'big_data'
if not os.path.exists(bigdatafilepath):
os.mkdir(bigdatafilepath)
datafilepath = 'data'
os.chdir(datafilepath)
datafiles = glob.glob('dataset_vae_*.bin')
print(len(datafiles))
os.chdir('..')
big_data_set = np.memmap(os.path.join(bigdatafilepath, 'dataset_vae.bin'),
dtype='float32', mode='w+', shape=(single_sample_num * len(datafiles), 121, 160, 5))
# input('Enter...')
for i, singleFile in enumerate(datafiles):
_path = os.path.join(datafilepath, singleFile)
# single_dataset = np.load(_path)
# print(single_dataset[0, 0, 0, 0])
# del single_dataset
single_dataset = np.memmap(_path, dtype='float32', mode='r', shape=(single_sample_num, 121, 160, 5))
# single_dataset = single_dataset['arr_0']
# big_data_set.append(single_dataset)
# print(single_dataset[0, 0, 0, 0])
# print('{}-th single Enter...'.format(i))
# input()
big_data_set[i*single_sample_num: (i+1)*single_sample_num, :, :, :] = single_dataset
# print('after {} Enter'.format(i))
# input()
# big_data_set = np.concatenate(big_data_set)
# print(big_data_set)
# print(big_data_set.shape)
# input('press Enter to continue...')
# np.savez(os.path.join(bigdatafilepath, 'dataset_vae.npz'),
# big_data_set)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 20:11:35 2018
@author: user
迴圈偶數連加
"""
a=int(input())
b=int(input())
i=a
sum=0
while i <= b:
if (i % 2 == 0):
sum=sum+i
i=i+1
print(sum)
|
import tensorflow.keras as keras
model = keras.Sequential()
model.add(keras.layers.Dense(3))
model.add(keras.layers.Dense(5))
model.add(keras.layers.Dense(2))
model = keras.Sequential()
model.add(keras.layers.Dense(128))
model.add(keras.layers.Dense(100))
model.add(keras.layers.Dense(60))
model.add(keras.layers.Dense(20))
model.add(keras.layers.Dense(5))
model.add(keras.layers.Dense(1))
|
from __future__ import absolute_import, division, print_function, unicode_literals
from metaflow import FlowSpec,Parameter, step, batch, retry,catch,S3
import pandas as pd
import random
import numpy as np
import os
import torch
import tensorflow as tf
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
import boto3
import logging
import smart_open
class Train(FlowSpec):
@step
def start(self):
import torch
# Load the dataset into a pandas dataframe.
with S3() as s3:
df = pd.read_csv(smart_open.smart_open('s3://sentstorage/scrape/labelledtweets.tsv'),delimiter='\t', header=None, names=['label','tweet'])
#df = pd.read_csv("t.tsv", delimiter='\t', header=None, names=['label','tweet'])
self.tweets = df.tweet.values
self.labels = df.label.values
# Load the BERT tokenizer.
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.max_len = 0
# For every sentence...
for self.tweet in self.tweets:
# Tokenize the text and add `[CLS]` and `[SEP]` tokens.
self.input_ids = self.tokenizer.encode(self.tweet, add_special_tokens=True)
# Update the maximum sentence length.
self.max_len = max(self.max_len, len(self.input_ids))
# Tokenize all of the sentences and map the tokens to thier word IDs.
self.input_ids2 = []
self.attention_masks2 = []
# For every sentence...
for self.tweet in self.tweets:
self.encoded_dict = self.tokenizer.encode_plus(
self.tweet, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = 128, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
self.input_ids2.append(self.encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
self.attention_masks2.append(self.encoded_dict['attention_mask'])
# Convert the lists into tensors.
self.input_ids2 = torch.cat(self.input_ids2, dim=0)
self.attention_masks2 = torch.cat(self.attention_masks2, dim=0)
self.labels = torch.tensor(self.labels)
# Combine the training inputs into a TensorDataset.
self.dataset = TensorDataset(self.input_ids2, self.attention_masks2, self.labels)
# Create a 90-10 train-validation split.
# Calculate the number of samples to include in each set.
self.train_size = int(0.9 * len(self.dataset))
self.val_size = len(self.dataset) - self.train_size
# Divide the dataset by randomly selecting samples.
self.train_dataset, self.val_dataset = random_split(self.dataset, [self.train_size, self.val_size])
# The DataLoader needs to know our batch size for training, so we specify it
# here. For fine-tuning BERT on a specific task, the authors recommend a batch
# size of 16 or 32.
self.batch_size = 32
# Create the DataLoaders for our training and validation sets.
# We'll take training samples in random order.
self.train_dataloader = DataLoader(
self.train_dataset, # The training samples.
sampler = RandomSampler(self.train_dataset), # Select batches randomly
batch_size = self.batch_size # Trains with this batch size.
)
# For validation the order doesn't matter, so we'll just read them sequentially.
self.validation_dataloader = DataLoader(
self.val_dataset, # The validation samples.
sampler = SequentialSampler(self.val_dataset), # Pull out batches sequentially.
batch_size = self.batch_size # Evaluate with this batch size.
)
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
self.model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 3,
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
self.optimizer = AdamW(self.model.parameters(),
lr = 5e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
# Number of training epochs. The BERT authors recommend between 2 and 4.
self.epochs = 2
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
self.total_steps = len(self.train_dataloader) * self.epochs
# Create the learning rate scheduler.
"""self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps = 0,
num_training_steps = self.total_steps)"""
# Set the seed value all over the place to make this reproducible.
self.seed_val = 42
random.seed(self.seed_val)
np.random.seed(self.seed_val)
torch.manual_seed(self.seed_val)
for self.epoch_i in range(0, self.epochs):
# Reset the total loss for this epoch.
self.total_train_loss = 0
self.model.train()
# For each batch of training data...
for self.step, self.batch in enumerate(self.train_dataloader):
self.b_input_ids = self.batch[0]
self.b_input_mask = self.batch[1]
self.b_labels = self.batch[2]
self.model.zero_grad()
self.loss, self.logits = self.model(self.b_input_ids,
token_type_ids=None,
attention_mask=self.b_input_mask,
labels=self.b_labels)
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
self.total_train_loss += self.loss.item()
# Perform a backward pass to calculate the gradients.
self.loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
self.optimizer.step()
# Update the learning rate.
#self.scheduler.step()
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
self.model_to_save = self.model.module if hasattr(self.model, 'module') else self.model # Take care of distributed/parallel training
with S3(s3root='s3://sentstorage/model') as s3:
self.model_to_save.save_pretrained('s3://sentstorage/model')
self.tokenizer.save_pretrained('s3://sentstorage/model')
self.next(self.end)
@catch(print_exception=False)
@step
def end(self):
print("Saved model to bucket")
if __name__ == '__main__':
Train()
|
"""obc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from obc import settings
#home
from home.views import regulamin, rodo, polityka, Home
from accounts.views import profil, Edycja, rejestracja, activate
from listy.views import lista_miast, profil_view, subkategoria, kategoria
from contact.views import add_or_change_contact
urlpatterns = [
path('grappelli/', include('grappelli.urls')),
path('admin/', admin.site.urls),
#home
path('', Home.as_view(), name="homepage"),
path('regulamin/', regulamin, name='regulamin'),
path('polityka-prywatnosci/', polityka, name='polityka'),
path('rodo/', rodo, name='rodo'),
#szukajka
url(r'^search/', include('haystack.urls')),
# auth + reset hasła
path('accounts/', include('django.contrib.auth.urls')),
path('rejestracja/', rejestracja, name="rejestracja"),
path('edycja/', Edycja, name='edycja'),
path('profil/', profil, name="profil"),
# aktywacja konta
path('verification/', include('verify_email.urls')),
path('activate/<uidb64>/<token>/', activate, name='activate'),
#listy
# listy
path('genres/', lista_miast, name="lista_miast"),
path('kategoria/<int:id>/', kategoria, name='kategoria'),
path('kategoria/<int:id>/', subkategoria, name='subkategoria'),
path('profil_view/<int:id>', profil_view, name='profil_view'),
path('miasta/', lista_miast.as_view(), name="miasta"), # pierwsza strona miast
path('<int:id>', profil_view, name='profil_view'),
#postman_menu
path('messages/', include('postman.urls'), name='postman'),
#contact
path('nowy/', add_or_change_contact, name="nowy"),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
"""
This file contains functions for (Quantized) Neural Networks
"""
__author__ = "Tibor Schneider"
__email__ = "sctibor@student.ethz.ch"
__version__ = "0.1.0"
__date__ = "2020/01/23"
__license__ = "Apache 2.0"
__copyright__ = """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Tibor Schneider, ETH Zurich
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
def batch_norm(x, scale, bias):
"""
Applies BatchNorm with scale and bias obtained from convert.batch_norm
Parameters:
- x: np.array(shape: [D, ...])
- scale: np.array(shape: [D])
- bias: np.array(shape: [D])
Returns: np.array, same shape as x, same dtype as x
"""
assert scale.shape == bias.shape
assert len(scale.shape) == 1
assert scale.shape[0] == x.shape[0]
y = np.zeros(x.shape, dtype=x.dtype)
for k in range(x.shape[0]):
y[k, :] = x[k, :] * scale[k] + bias[k]
return y
def apply_factor_offset(x, factor, offset=None, clip_balanced=True):
"""
Scales x according to the factor and offset.
Factor and Offset should be obtained from convert.div_factor or convert.div_factor_batch_norm
Rounding is always done by flooring towards zero (like c integer division)
Parameters:
- x: np.array(dtype=int)
- factor: int
- clip_balanced: if False, clip from -128 to 127, if True, clip from -127 to 127
- y: np.array(dtype=int)
"""
if not isinstance(factor, np.ndarray):
factor = np.ones((1, ), dtype=type(factor)) * factor
if offset is None:
offset = np.zeros(factor.shape, dtype=int)
if isinstance(offset, int):
offset = np.ones(factor.shape, dtype=int) * offset
assert offset.shape == factor.shape
assert len(factor.shape) == 1
y = np.zeros(x.shape, dtype=x.dtype)
if factor.shape[0] == 1:
y = (x + offset) / factor
else:
for k in range(factor.shape[0]):
y[k] = (x[k] + offset[k]) / factor[k]
y = y.astype(np.int)
if clip_balanced:
return np.clip(y, -127, 127)
return np.clip(y, -128, 127)
def relu(x, threshold=0):
"""
Applies ReLU operation: max(x, threshold)
Parameters:
x: np.array(size=[D, ...])
threshold: either float or np.array(size=[D])
"""
# convert threshold to an np.ndarray of shape (1, )
if isinstance(threshold, float):
threshold = np.array(threshold)
assert len(threshold.shape) == 1
# if the shape of the threshold is (1, ), then convert it to shape(D, )
if threshold.shape[0] == 1:
threshold = (np.ones((x.shape[0], )) * threshold).astype(x.dtype)
assert threshold.shape[0] == x.shape[0]
y = np.zeros(x.shape, dtype=x.dtype)
for k in range(x.shape[0]):
y[k] = np.maximum(x[k], threshold[k])
return y
def pool(x, shape, reduction="sum"):
"""
Applies pooling
Parameters:
- x: np.array(size=[K, T])
- shape: tuple of same dimensionality as x
- reduction: str, either "sum", "mean" or "max"
Returns: np.array
"""
assert len(x.shape) == len(shape)
assert len(x.shape) == 2
do_round = False
if reduction == "sum":
func = np.sum
elif reduction == "mean":
func = np.mean
if x.dtype == int:
do_round = True
elif reduction == "max":
func = np.max
else:
raise TypeError("Parameter \"reduction\" must be either \"sum\", \"mean\" or \"max\"!")
out_shape = tuple(d // s for d, s in zip(x.shape, shape))
y = np.zeros(out_shape, dtype=x.dtype)
for k in range(y.shape[0]):
for t in range(y.shape[1]):
y[k, t] = func(x[k * shape[0]:(k + 1) * shape[0], t * shape[1]:(t + 1) * shape[1]])
if do_round:
y = y.astype(np.int)
return y
def conv_time(x, w):
"""
Applies a Convolution in Time, where all channels are convolved with the same filter, with mode=same
Used in Layer 1
Parameters:
- x: np.array(shape: [CH, T])
- w: np.array(shape: [K, T'])
Returns: np.array(shape: [K, CH, T]), same dtype as x
"""
assert len(x.shape) == 2
assert len(w.shape) == 2
# determine padding
if w.shape[1] % 2 == 0: # even
padding = (w.shape[1] // 2 - 1, w.shape[1] // 2)
else: #odd
padding = ((w.shape[1] - 1) // 2, (w.shape[1] - 1) // 2)
y = np.zeros((w.shape[0], x.shape[0], x.shape[1]), dtype=x.dtype)
x = np.pad(x, ((0, 0), padding))
for k in range(w.shape[0]):
for ch in range(x.shape[0]):
y[k, ch, :] = np.convolve(x[ch, :], w[k, :], mode="valid")
return y
def depthwise_conv_space(x, w):
"""
Applies a Depthwise Convolution in Space, where each filter is applied at all time steps., with mode=valid
Used in Layer 2
Parameters:
- x: np.array(shape: [K1, CH, T])
- w: np.array(shape: [K2, CH])
Returns: np.array(shape: [K2, T]), same dtype as x
"""
assert len(w.shape) == 2
assert len(x.shape) == 3
assert x.shape[1] == w.shape[1]
assert w.shape[0] % x.shape[0] == 0 # K2 must be divisible by K1
D = w.shape[0] // x.shape[0]
y = np.zeros((w.shape[0], x.shape[2]), dtype=x.dtype)
for k in range(w.shape[0]):
for t in range(x.shape[2]):
y[k, t] = np.convolve(x[k // D, :, t], w[k], mode="valid")
return y
def depthwise_conv_time(x, w):
"""
Applies a Depthwise Convolution in Time, where each channel has it's own filter, with mode=same
Used in Layer 3
Parameters:
- x: np.array(shape: [K, T])
- w: np.array(shape: [K, T'])
Returns: np.array(shape: [K, T]), same dtype as x
"""
assert len(x.shape) == 2
assert len(w.shape) == 2
assert x.shape[0] == w.shape[0]
# determine padding
if w.shape[1] % 2 == 0: # even
padding = (w.shape[1] // 2 - 1, w.shape[1] // 2)
else: #odd
padding = ((w.shape[1] - 1) // 2, (w.shape[1] - 1) // 2)
y = np.zeros(x.shape, dtype=x.dtype)
x = np.pad(x, ((0, 0), padding))
for k in range(x.shape[0]):
y[k] = np.convolve(x[k], w[k], mode="valid")
return y
def pointwise_conv(x, w):
"""
Applies a pointwise convolution.
Used in Layer4
Parameters:
- x: np.array(shape: [K, T])
- w: np.array(shape: [K, K])
Returns: np.array(shape: [K, T]), same dtype as x
"""
assert len(x.shape) == 2
assert len(w.shape) == 2
assert x.shape[0] == w.shape[0]
assert x.shape[0] == w.shape[1]
y = np.zeros(x.shape, dtype=x.dtype)
for k_outer in range(w.shape[0]):
for k_inner in range(w.shape[1]):
y[k_outer] += x[k_inner] * w[k_outer, k_inner]
return y
def linear(x, w, b):
"""
Applies a set of dot products, corresponding to a linear (FC) layer
Used in layer 5
Parameters:
- x: np.array(shape: [K])
- w: np.array(shape: [N, K])
- b: np.array(shape: [N])
Returns: np.array(shape: [N]), same dtype as x
"""
assert len(w.shape) == 2
assert len(x.shape) == 1
assert len(b.shape) == 1
assert w.shape[1] == x.shape[0]
assert b.shape[0] == w.shape[0]
y = np.zeros((w.shape[0], ), dtype=x.dtype)
for n in range(w.shape[0]):
y[n] = np.dot(x, w[n]) + b[n]
return y
def quantize(x, scale_factor, num_levels=255):
"""
Quantizes the input linearly (without offset) with the given number of levels.
The quantization levels will be:
np.linspace(-scale_factor, scale_facotr, num_levels)
The output will contain only quantized values (not the integer representation)
Parameters:
- x: np.array(dtype=float), original vector
- scale_factor: float, the output will be quantized to range [-s, s]
- num_levels: int, number of quantization levels
Returns: np.array(dtype=float), where all values are within the quantized grid
"""
x_q = quantize_to_int(x, scale_factor, num_levels)
return dequantize(x_q, scale_factor, num_levels)
def quantize_to_int(x, scale_factor, num_levels=255):
"""
Quantizes the input linearly (without offset) with the given number of levels.
The quantization levels will be:
np.linspace(-scale_factor, scale_facotr, num_levels)
The output values will be one of:
[-(num_levels-1)/2, ..., -1, 0, 1, ..., (num_levels-1)/2]
As an example, num_levels = 255, the output range will be int8_t without -128
[-127, -126, ..., -1, 0, 1, ..., 126, 127]
The value will be floored towards zero, just like integer division in C
Parameters:
- x: np.array(dtype=float), original vector
- scale_factor: float, the output will be quantized to range [-s, s]
- num_levels: int, number of quantization levels, must be odd
Returns: np.array(dtype=int), where all values will be in the integer representation
"""
# num_levels must be odd!
assert num_levels % 2
x = x / scale_factor
x = np.clip(x, -1, 1)
x = x * (num_levels - 1) / 2
# x = x.round()
x = x.astype(np.int)
return x
def dequantize(x, scale_factor, num_levels=255):
"""
Reverse operation of quantize_to_int
Parameters:
- x: np.array(dtype=int), quantized vector in integer representation
- scale factor: float input will be mapped to this range
- num_levels: int: number of quantization levels, must be odd
Returns: np.array(dtype=float), in float representation
"""
assert num_levels % 2
x = x / ((num_levels - 1) / 2)
x = x * scale_factor
return x
|
from django.contrib import admin
from .models import Post, Comment, UserProfile, Notification, ThreadModel
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(UserProfile)
admin.site.register(Notification)
admin.site.register(ThreadModel)
|
import pgoapi
from pgoapi.utilities import f2i, get_cell_ids
from model.inventory import Inventory
class PgoWrapper(object):
def __init__(self, auth_type, username, password, messager):
self.logged_in = False
self.api = pgoapi.PGoApi()
self.last_result = None
self.inventory = None
self.username = username
self.password = password
self.auth_type = auth_type
self.messager = messager
def heartbeat(self, lat, lon, process_inventory=False):
req = self.api.create_request()
self.api.set_position(lat, lon, 10)
req.get_player()
req.get_hatched_eggs()
req.get_inventory()
req.check_awarded_badges()
if process_inventory:
self.process_inventory(req.call())
else:
req.call()
def process_inventory(self, result):
inventory = Inventory()
for item in result['responses']['GET_INVENTORY']['inventory_delta'].get('inventory_items', []):
data = item['inventory_item_data']
if 'pokemon_data' in data:
if not data['pokemon_data'].get('is_egg', False):
inventory.add_pokemon(data['pokemon_data'])
else:
inventory.add_egg(data['pokemon_data'])
elif 'item' in data:
inventory.add_item(data['item'])
elif 'pokemon_family' in data:
inventory.add_candy(data['pokemon_family'])
inventory.sort_pokemon()
self.messager.send_message(inventory.release_duplicates(self.api))
self.messager.send_message(inventory.empty_bag(self.api))
self.inventory = inventory
def login(self, lat, lon, alt=0):
if self.api.login(self.auth_type, self.username, self.password, lat, lon, alt):
self.logged_in = True
else:
self.logged_in = False
def get_last_result(self, type):
return self.last_result.get(type, [])
def refresh(self, lat, lng):
self.api.set_position(lat, lng, 10)
cell_ids = get_cell_ids(lat, lng)
timestamps = [0,] * len(cell_ids)
objects = self.api.get_map_objects(latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamps,
cell_id=cell_ids)
self.last_result = dict()
for cell in objects['responses']['GET_MAP_OBJECTS'].get('map_cells', []):
for fort in cell.get('forts', []):
if fort.get('type', 0) == 1:
self.last_result.setdefault('fort', []).append(fort)
else:
if fort.get('owned_by_team', 1) != 1:
self.last_result.setdefault('gym', []).append(fort)
for pokemon in cell.get('catchable_pokemons', []):
self.last_result.setdefault('pokemon', []).append(pokemon)
def spin_fort(self, fort, position):
if fort is None:
return
response_dict = self.api.fort_details(fort_id=fort['id'],
latitude=fort['latitude'],
longitude=fort['longitude'])
self.messager.send_message(response_dict)
if 'name' not in response_dict['responses']['FORT_DETAILS']:
return
name = response_dict['responses']['FORT_DETAILS']['name']
response_dict = self.api.fort_search(fort_id=fort['id'],
fort_latitude=fort['latitude'],
fort_longitude=fort['longitude'],
player_latitude=f2i(position[0]),
player_longitude=f2i(position[1]))
response_dict['name'] = name
self.messager.send_message(response_dict)
def catch_pokemon(self, pokemon, position):
if pokemon is None:
return
encounter_id = pokemon['encounter_id']
spawnpoint_id = pokemon['spawn_point_id']
player_latitude = position[0]
player_longitude = position[1]
response_dict = self.api.encounter(encounter_id=encounter_id, spawn_point_id=spawnpoint_id,
player_latitude=player_latitude, player_longitude=player_longitude)
encounter = response_dict['responses']['ENCOUNTER']
pokeball = self.inventory.get_pokeball(encounter['capture_probability'])
self.messager.send_message(response_dict)
status = 0
while status != 1 and status != 3:
response_dict = self.api.catch_pokemon(encounter_id=encounter_id,
pokeball=pokeball(),
normalized_reticle_size=1.950,
spawn_point_id=spawnpoint_id,
hit_pokemon=1,
spin_modifier=1,
normalized_hit_position=1)
status = response_dict['responses']['CATCH_POKEMON']['status']
self.messager.send_message(response_dict)
def __getattr__(self, item):
return getattr(self.api, item)
|
''' Does a simple linear regression plot
depends on statsmodels and pandas and patsy
See http://pandas.pydata.org/pandas-docs/stable/visualization.html for more details on pandas plotting
See http://statsmodels.sourceforge.net/stable/index.html for details on statsmodels
Regression http://pandas.pydata.org/pandas-docs/dev/computation.html
time series http://pandas.pydata.org/pandas-docs/stable/timeseries.html
This is for reference not actual use.
'''
import pandas as pd
from pd import DataFrame, Series
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import statsmodels
def insert_trendline(df, model):
df['regr'] = df.index*model.beta[0] + model.beta[1]
return df
def labl(axis):
a = axis
a.set_ylabel('European MobilePhone Subscriptions (millions)')
a.set_xlabel('Year')
a.set_title('European Mobile Phone Trend')
return a
df = pd.read_csv(csv_path)
start_year = datetime(1999,1,1)
end_year = datetime(2009,1,1)
years = pd.date_range(start_year,end = end_year, freq='A')
#using a data column as an index
col_ind = 'years'
ds = ds.set_index(col_ind)
ds = ds.reset_index()
column_name = 'measured'
model = pd.ols(y=ds[column_name], x=ds.ix[:,'index'])
model.beta #the linear regression model
model.beta.plot() # plots the trend line alone
plt.show()
column_list = ['regr', column_name]
ds[column_list].plot()
ext = DataFrame(ds, index = range(22))
R = insert_trendline(ext, model)
R = insert_trendline(ext, model).set_index('index')
norm_factor = 1000
data = R[column_list]/norm_factor
A = data.plot()
labl(A)
plt.show()
|
A = [1,3,4,5,7,6,4,5,10,1]
print(A)
# Boundary case
if A[0]>=A[1]:
print(A[0])
for i in range(0,len(A)-1):
if (A[i]>=A[i-1]) and (A[i]>=A[i+1]):
print(A[i])
# Boundary case
if A[len(A)-1]>=A[len(A)-2]:
print(A[len(A)-1])
|
from django.conf.urls import url
from django.urls import include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="Car Management API",
default_version='v1',
description="Car hire management system.",
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
]
urlpatterns.append(url(r'api/customers', include('api.customers.urls')))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 18:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alumno',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=80)),
('direccion', models.CharField(max_length=40)),
('telefono', models.CharField(max_length=40)),
('correo', models.EmailField(max_length=70)),
],
),
migrations.CreateModel(
name='Grado',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alumno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='asignacionc.Alumno')),
],
),
migrations.CreateModel(
name='Materia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom_materia', models.CharField(max_length=60)),
('descripcion', models.CharField(max_length=120)),
('alumnos', models.ManyToManyField(through='asignacionc.Grado', to='asignacionc.Alumno')),
],
),
migrations.AddField(
model_name='grado',
name='materia',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='asignacionc.Materia'),
),
]
|
"""app_enquetes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('polls.urls')),
path('admin/', admin.site.urls),
]
'''
A função include() permite referenciar outras URLconfs. Qualquer lugar que o Django encontrar include(), irá recortar todas as partes da URL encontrada até aquele ponto e enviar a string restante para URLconf incluído para processamento posterior.
A idéia por trás do include() é facilitar plugar URLs. Uma vez que polls está em sua própria URLconf (polls/urls.py), ele pode ser colocado depois de “/polls/”, ou depois de “/fun_polls/”, u depois de “/content/polls/”, ou qualquer outro início de caminho, e a aplicação ainda irá funcionar
Quando usar include() Deve-se sempre usar include() quando você incluir outros padrões de URL. admin.site.urls é a única exceção a isso.
'''
|
#this program is used to print the 2d array by using numpy
from data import functional
try:
row = int(input("enter the number of rows :"))
column = int(input("enter the number of columns :"))
functional.array(row, column) # calling the method and passing two values
except ValueError:
print("Input only accepts decimal numbers")
|
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from IPython.display import clear_output
from keras import *
from keras.layers import *
# PLOTTER
class PlotLosses(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.i += 1
clear_output(wait=True)
plt.plot(self.x, self.losses, label="loss")
plt.plot(self.x, self.val_losses, label="val_loss")
plt.legend()
plt.show()
plot_losses = PlotLosses()
# Load CIFAR
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# (1) Flatten & scale
def flatten_and_scale(example):
return example.flatten() / 255.0
train_x = []
test_x = []
for example in x_train:
train_x.append( flatten_and_scale(example) )
for example in x_test:
test_x.append( flatten_and_scale(example) )
train_x = np.array(train_x)
test_x = np.array(test_x)
# (2) To One-Hot
number_of_classes = len( set(y_train.flatten()) )
train_y = keras.utils.to_categorical(y_train.flatten(), number_of_classes)
test_y = keras.utils.to_categorical(y_test.flatten(), number_of_classes)
# (4) NN
from keras.models import Sequential
from keras.layers import Dense, Dropout
model = Sequential()
model.add(Dense(3072, input_dim=len(train_x[0]), activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dense(number_of_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_x,
train_y,
validation_data=(test_x, test_y),
epochs=3500,
batch_size=32000,
callbacks=[plot_losses])
# (5) All evaluation
from sklearn.metrics import classification_report
def print_eval(x, y, title):
yHat = model.predict_classes(x, batch_size=128, verbose=1)
report = classification_report(np.argmax(y, axis=1), yHat)
print("\n\nREPORT ", title, "\n", report)
print_eval(train_x, train_y, "Train")
print_eval(test_x, test_y, "Test")
# not good at all, lets add convolution
|
import os
import h5py
import argparse
import numpy as np
from utils import vtk_plot
parser = argparse.ArgumentParser()
parser.add_argument("--model_id", type=str, default='07200224_3dunet')
opt = parser.parse_args()
print(opt)
data_dir = 'data/test'
input_files = [f for f in os.listdir('data/test') if f.endswith('_input.h5')]
gt_files = [f.replace('_input', '_gt') for f in input_files]
pred_files = [f.replace('_input', '') for f in input_files]
# model_id = '07192114_3dunet'
# model_id = '07200224_3dunet'
# model_id = '07192006'
model_id = opt.model_id
test_dir = os.path.join('./test', model_id)
fig_dir = os.path.join('figs', model_id)
os.makedirs(fig_dir, exist_ok=True)
for input_fn, gt_fn, pred_fn in zip(input_files, gt_files, pred_files):
# print(input_fn)
input_path = os.path.join('data/test', input_fn)
gt_path = os.path.join('data/test', gt_fn)
pred_path = os.path.join(test_dir, pred_fn)
input = h5py.File(input_path, 'r').get('data')[()]
gt = h5py.File(gt_path, 'r').get('data')[()]
pred = h5py.File(pred_path, 'r').get('data')[()][0,0,:,:,:]
print(np.sum(np.where(input > 1)), np.max(input))
print(np.sum(np.where(pred > 1)), np.max(pred))
threshold = 0.0
save_input_path = os.path.join(fig_dir, input_fn.replace('.h5', '.png'))
save_gt_path = os.path.join(fig_dir, gt_fn.replace('.h5', '.png'))
save_pred_path = os.path.join(fig_dir, pred_fn.replace('.h5', '_pred.png'))
vtk_plot(input, threshold, save_path=save_input_path)
vtk_plot(gt, threshold, save_path=save_gt_path)
vtk_plot(pred, threshold, save_path=save_pred_path)
|
# Generated by Django 3.1.7 on 2021-04-01 03:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20210330_1844'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='photo',
field=models.ImageField(height_field=200, null=True, upload_to='photos', verbose_name='photo', width_field=300),
),
]
|
def reverse_words(string):
return ' '.join(a[::-1] for a in string.split(' '))
|
# -*- coding: utf-8 -*-
# @Time : 2020/5/24
# @Author : J
# @File : 图像的基本操作.py
# @Software: PyCharm
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread("../image.jpg")
# px = img[100,100] #行和列坐标来访问像素值
# print(px)
#
# blue = img[100,100,0] #访问指定通道的像素值
# print(blue)
#
#
# img[100,100] = [0,0,0] #修改像素值
#
#
# img.item(10,10,2) # (10,10,2)是坐标 修改某一点的像素
# img.itemset((10,10,2),100)
# print(img.shape)
# print(img.size)
# print(img.dtype)
#图像感兴趣区域ROI
eyes = img[285:345,335:395] #位置改变
img[273:333,100:160] = eyes
b,g,r = cv.split(img)
img = cv.merge((b,g,r))
cv.namedWindow("image",cv.WINDOW_NORMAL)#可调整窗口大小
cv.imshow("image",img)
cv.waitKey(0)
cv.destroyAllWindows()
# BLUE = [255,0,0]
# img1 = cv.imread('../image.png')
# #为图像设置边框(填充)
# replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE)
# reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT)
# reflect101 = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT_101)
# wrap = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_WRAP)
# constant= cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE)
# plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL')
# plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE')
# plt.subplot(233),plt.imshow(reflect,'gray'),plt.title('REFLECT')
# plt.subplot(234),plt.imshow(reflect101,'gray'),plt.title('REFLECT_101')
# plt.subplot(235),plt.imshow(wrap,'gray'),plt.title('WRAP')
# plt.subplot(236),plt.imshow(constant,'gray'),plt.title('CONSTANT')
# plt.show()
|
num1 = int(input("Enter num 1 : "))
num2 = int(input("Enter num 2 : "))
num3 = int(input("Enter num 3 : "))
print("Result of ",num1 ,"+" ,num2 ,"+" ,num3, "= ",num1+num2+num3)
print("2 X Num1 =",num1*2)
|
from odoo_gateway import Session
from simple_timer import Timer
session = Session(['-c', '/Users/eneldoserrata/PycharmProjects/marcos_odoo/.openerp_serverrc', '-d', 'rim'])
cr = session.cr
lot_ids = [l.id for l in session.models.stock_production_lot.search([])]
count = 0
total_time = False
moves = set()
skua_ab = {}
for lot_id in session.models.stock_production_lot.search([]):
timer = Timer()
for quant in session.models.stock_quant.search([('lot_id', '=', lot_id.id)]):
moves |= {move.id for move in quant.history_ids}
try:
from_qc_skua_cost = 0.00
for move_id in session.models.stock_move.search([("id", "in", sorted(list(moves)))]):
if move_id.location_id.usage == "internal" and move_id.location_id.usage == "supplier":
move_id.with_context({"from_qc": False}).product_price_update_before_done()
elif move_id.location_id.usage == "internal" and move_id.location_id.usage == "internal":
move_id.with_context({"from_qc": False}).product_price_update_before_done()
elif move_id.location_id.usage == "internal" and move_id.location_id.usage == "production":
from_qc_skua_cost = move_id.product_id.standard_price
context = {"from_qc": "skua", "from_qc_skua_cost": from_qc_skua_cost}
move_id.with_context(context).product_price_update_before_done()
elif move_id.location_id.usage == "production" and move_id.location_id.usage == "internal":
if from_qc_skua_cost == 0.00:
from_qc_skua_cost = move_id.product_id.standard_price
context = {"from_qc": "skub", "from_qc_skua_cost": from_qc_skua_cost}
move_id.with_context(context).product_price_update_before_done()
elif move_id.location_id.usage == "internal" and move_id.location_id.usage == "customer":
move_id.with_context({"from_qc": False}).product_price_update_before_done()
elif move_id.location_id.usage == "customer" and move_id.location_id.usage == "internal":
move_id.with_context({"from_qc": False}).product_price_update_before_done()
from_qc_skua_cost = 0.00
# make_qc(self, cr, uid, 8, 'U-PLT-A-255-65-R18', {'canfix': True, 'location_id': 7, 'uid': 1, 'name': 'Administrator', 'default_printer': 4}, context=None):
except:
print "except"
current_time = timer.duration
total_time += current_time
print "id: {}, Serial: {}, take: {}, total time: {}".format(lot_id.id, lot_id.name, timer.duration, total_time)
cr.commit()
moves = set()
|
def getFriends():
friends = []
f = open("friends.txt", "r")
for line in f:
if line != "\n":
line = line.rstrip()
friends.append(line)
f.close()
return friends
def addFriend(name):
f = open("friends.txt", "a")
f.write(name + "\n")
f.close()
def deleteFriend(name):
friends = getFriends()
f = open("friends.txt", "w")
if name in friends:
friends.remove(name)
for friend in friends:
f.write(friend + "\n")
f.close()
|
import matplotlib.pyplot as plt
speaker_results = open('F:\Projects\Active Projects\Project Intern_IITB\Desktop\\Vowel_opt_V3_MA.csv', 'r')
sr = speaker_results.read()
# print sr
list_data = sr.split('\n')
# print list_data
list_data.pop(0)
list_data.pop(-1)
list_data.pop(-1)
# print list_data
data = []
for j in list_data:
data.append((j.split(',')))
name = []
precision = []
recall = []
no_of_files =[]
precision_rouge = []
recall_rouge = []
for j in range(len(data)):
name.append(data[j][0])
precision.append(data[j][10])
recall.append(data[j][11])
precision_rouge.append(data[j][10])
# recall_rouge.append(data[j][11])
# no_of_files.append(data[j][3])
#
precision_rouge.sort()
for j in range(len(precision_rouge)):
recall_rouge.append(recall[precision.index(precision_rouge[j])])
# recall_rouge.sort()
# for j in range(len(recall_rouge)):
# precision_rouge.append(precision[recall.index(recall_rouge[j])])
axis_p = []
axis_r = []
for j in range(len(name)):
axis_p.append(j)
axis_r.append(j)
plt.scatter(axis_p,precision_rouge,color='red',label='Precision')
plt.scatter(axis_r,recall_rouge,color='blue',label='Recall')
for j in range(len(axis_p)):
plt.vlines(axis_p[j],0,precision_rouge[j],colors='black')
for j in range(len(axis_r)):
plt.vlines(axis_r[j],0,recall_rouge[j],colors='black')
plt.xlim(-0.5,len(axis_r)+0.5)
plt.ylim(0,1)
plt.grid()
plt.xlabel('Version No')
plt.ylabel('Precision and Recall')
plt.hlines(0.8,0,len(axis_r))
plt.hlines(0.9,0,len(axis_r))
plt.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.show()
|
from helpers import assert_raises
# Recursion + Python
# =============================
# A recursive function has one or more base cases, inputs for which the
# function produces input trivially, and one or recursive cases, for which the
# program recurs.
# Like any other functional language, Python allows recursion.
def fact(n):
return 1 if n == 0 else n * fact(n - 1)
assert fact(5) == 120
def fib(n):
return n if n in (0, 1) else fib(n-1) + fib(n-2)
assert fib(10) == 55
# Recursion is useful because it is simple to reason about (using induction).
# Algorithms written in this style are often more concise, and tend to specify
# _what_ values they need, rather than _how_ to generate them, usually relying
# on the interpreter itself for efficiency.
#
# In that sense, using recursion in Python is often ill-advised. Guido, our
# BDFL, does not really care for purity in programs, and trades it gladly
# for a language that is pleasant to use. To that end, Guido has actually
# set some limits on recursion that make it unpleasant to use.
#
# The Call Stack
# --------------
#
# When a function is called, the computer must "remember" the place it was
# called from, the return address, so that it can return to that location with
# the result once the call is complete. Typically, this information is saved on
# the call stack, a simple list of return locations in order of the times that
# the call locations they describe were reached.
#
# Naturally, this can lead to memory errors if, for instance, some process
# begins infinite recursion.
#
# Consequently, Python sets a maximum limit on the interpreter stack to prevent
# overflows on the C stack that might crash Python.
#
# (NOTE: The highest possible limit is platform dependent and setting a
# too-high limit can lead to a crash.)
import sys
assert sys.getrecursionlimit() == 1000
# NOTE: This is 996 because the stack has to actually call this
# If it were outside of this test, 999 would work
assert_raises(None, lambda: fact(996))
assert_raises(RuntimeError, lambda: fact(1000))
# There are a number of ways to get around this issue.
# One way is to simply use `reduce`:
import operator
def fact_r(n):
return reduce(operator.mul, xrange(1, n + 1), 1)
assert fact_r(10) == fact(10)
assert_raises(None, lambda: fact_r(1000))
# Note, fib is multiple recursive (it makes multiple calls to itself) which
# makes it difficult to translate. At least for now, I am expressing it as a
# bottom-up variant rather than top-down.
# > Single recursion is often much more efficient than multiple recursion, and can generally be replaced by an iterative computation, running in linear time and requiring constant space. Multiple recursion, by contrast, may require exponential time and space, and is more fundamentally recursive, not being able to be replaced by iteration without an explicit stack.
# > Multiple recursion can sometimes be converted to single recursion. It can be computed by single recursion by passing two successive values as parameters. This is more naturally framed as corecursion, building up from the initial values, at each step track two successive values
# Indirect recursion is when multiple functions call each other in a loop.
# See: Hanoi
# Structural versus generative recursion
# Structural recursion decomposes arguments into structural components and then process those components
# The argument to each recursive call is the content of a field of the original input
# These can be shown to terminate using structural induction
# Generative recursion generate an entirely new piece of data and recur on it.
# gcd is an example; it just generates a new number (the size of the structure is the same, but it is conceptually "smaller").
# It requires a predicate to terminate clearly.
def fib_r(n):
def f((a, b), _):
return b, a + b
return reduce(f, xrange(n), (0, 1))[-2]
assert fib_r(10) == fib(10)
assert_raises(None, lambda: fib_r(1000))
# Guido really doesn't like `reduce` though:
#
# So now reduce(). This is actually the one I've always hated most, because,
# apart from a few examples involving + or *, almost every time I see a
# reduce() call with a non-trivial function argument, I need to grab pen and
# paper to diagram what's actually being fed into that function before I
# understand what the reduce() is supposed to do. So in my mind, the
# applicability of reduce() is pretty much limited to associative operators,
# and in all other cases it's better to write out the accumulation loop
# explicitly. [1]
#
# Guido, for what it's worth, is very pragmatic:
#
# Third, I don't believe in recursion as the basis of all programming. This
# is a fundamental belief of certain computer scientists, especially those
# who love Scheme and like to teach programming by starting with a "cons"
# cell and recursion. But to me, seeing recursion as the basis of everything
# else is just a nice theoretical approach to fundamental mathematics
# (turtles all the way down), not a day-to-day tool.
#
# For practical purposes, Python-style lists (which are flexible arrays, not
# linked lists), and sequences in general, are much more useful to start
# exploring the wonderful world of programming than recursion. They are some
# of the most important tools for experienced Python programmers, too.
#
# Using a linked list to represent a sequence of value is distinctly
# unpythonic, and in most cases very inefficient. Most of Python's library is
# written with sequences and iterators as fundamental building blocks
# (and dictionaries, of course), not linked lists, so you'd be locking
# yourself out of a lot of pre-defined functionality by not using lists or
# sequences.
#
# If we take Guido's advice, we can choose to build our loop as a `while` or
# `for`:
def fact_while(n):
r = 1
while n > 0:
r *= n
n -= 1
return r
assert fact_while(10) == fact(10)
assert_raises(None, lambda: fact_while(1000))
def fib_while(n):
a, b = 0, 1
while n > 0:
a, b = b, a + b
n -= 1
return a
assert fib_while(10) == fib(10)
assert_raises(None, lambda: fib_while(1000))
def fact_for(n):
rv = 1
for x in xrange(1, n + 1):
rv *= x
return rv
assert fact_for(10) == fact(10)
assert_raises(None, lambda: fact_for(1000))
def fib_for(n):
a, b = 0, 1
for _ in xrange(n):
a, b = b, a + b
return a
assert fib_for(10) == fib(10)
assert_raises(None, lambda: fib_for(1000))
# These both work, but both are rather low level and express things in a
# primitive way. Python emphasizes readability; can we do better?
# The classical Pythonic solution is to turn everything into generators:
def gen_fact():
rv, x = 1, 1
while True:
yield rv
x += 1
rv *= x
def gen_fib():
a, b = 0, 1
while True:
a, b = b, a + b
yield a
# These are fine, except that they can be a small pain to actually use.
# They also are a very different way of thinking, which may or may not be good.
from itertools import izip
def gen_i(gen, n):
for _, x in izip(xrange(n), gen):
pass
return x
fact_gen = lambda n: gen_i(gen_fact(), n)
fib_gen = lambda n: gen_i(gen_fib(), n)
assert fact_gen(10) == fact(10)
assert_raises(None, lambda: fact_gen(1000))
assert fib_gen(10) == fib(10)
assert_raises(None, lambda: fib_gen(1000))
# Another commonly proposed solution to Python's recursion problem is tail call
# optimization.
#
# A tail call is a subroutine call that happens inside another procedure as its
# final action.
def fact_tr(n, rv=1):
return rv if n == 0 else fact_tr(n - 1, n * rv)
def fib_tr(n, a=0, b=1):
return a if n == 0 else fib_tr(n-1, b, a + b)
# Tail calls are significant because they can be implemented without adding a
# new stack frame to the call stack, making them as efficient as goto
# statements.
# Notably however, Python does _not_ do tail call optimization.
assert fact_tr(10) == fact(10)
assert_raises(RuntimeError, lambda: fact_tr(1000))
assert fib_tr(10) == fib(10)
assert_raises(RuntimeError, lambda: fib_tr(1000))
# Guido thinks tail call optimization is bad for a number of reasons...
# Nevertheless, if we want, it is quite possible to implement tail call
# optimization ourselves, although with Guido's caveats.
def tail_recursive(func):
"""
Create a tail recursive function.
The new function will not lengthen the call stack and so avoids issues
with Python's recursion limit.
To gain this functionality, the decorated function must be a generator.
This makes recursive calls lazy.
NOTE: This is not a decorator. The function must be able to call itself
directly. Usage should look something like:
def _fact(n, r=1):
yield r if n == 0 else _fact(n - 1, n * r)
fact = tail_recursive(_fact)
"""
def wrapped(*args, **kwargs):
g = func(*args, **kwargs)
try:
while True:
g = next(g)
except TypeError: # g is not an iterator
return g
return wrapped
# This implementation is very similar to Paul Butler's method[2], but it
# achieves laziness using generators rather than lambdas.
def _gcd(x, y):
yield x if y == 0 else _gcd(y, x % y)
gcd = tail_recursive(_gcd)
assert gcd(2000, 900) == 100
def _fact(n, r=1):
yield r if n == 0 else _fact(n - 1, n * r)
fact_tr2 = tail_recursive(_fact)
assert fact_tr2(10) == fact(10)
assert_raises(None, lambda: fact_tr2(1000))
def _fib(n, a=0, b=1):
yield a if n == 0 else _fib(n-1, b, a + b)
fib_tr2 = tail_recursive(_fib)
assert fib_tr2(10) == fib(10)
assert_raises(None, lambda: fib_tr2(1000))
# Ultimately, I only consider this a modest improvement as it still requires
# that we convert beautiful recursive functions into their tail-recursive
# counterparts. In many cases, the original idea we want to express has no
# direct translation.
#
# We also cannot memoize these implementations easily. Fortunately, the
# tail-recursive style seems to force us to redesign our algorithms into
# bottom-up implementations which mostly avoid the need.
# Another way of doing things is continuation passing style (CPS).
# CPS is a general method for turning any recursive function into a
# tail-recursive variant.
#
# This works especially well with Towers of Hanoi (which would otherwise be
# difficult to write in tail recursive style.
id = lambda x: x
def cps(f):
def wrapped(*args):
return tail_recursive(f)(id, *args)
return wrapped
def _hanoi(f, n):
def g(x):
yield f(2 * x + 1)
yield f(1) if n == 1 else _hanoi(g, n-1)
hanoi = cps(_hanoi)
assert hanoi(1) == 1
assert hanoi(4) == 15
def _fact_cps(f, n):
def g(x):
yield f(x * n)
yield f(1) if n == 0 else _fact_cps(g, n - 1)
fact_cps = cps(_fact_cps)
def _fib_cps(f, n):
def g((a, b)):
yield f((b, a + b))
yield f((0, 1)) if n == 0 else _fib_cps(g, n - 1)
fib_cps = cps(_fib_cps)
assert fact_cps(10) == fact(10)
assert_raises(None, lambda: fact_cps(1000))
assert fib_cps(10)[0] == fib(10)
assert_raises(None, lambda: fib_cps(1000)[0])
# We can also do some crazy stuff by just directing the call stack ourselves.
# This is kind of pointless; as it is not really memory efficient anyway, but
# it can be useful for mechanically translating recursive algorithms into
# iterative ones.
def _fact_x(call_stack, rv, f):
if len(call_stack) == 0:
yield rv
else:
n = call_stack.pop()
if n == 1:
yield _fact_x(call_stack, rv, f)
else:
call_stack.append(n - 1)
yield _fact_x(call_stack, f(rv, n), f)
fact_x = lambda n: tail_recursive(_fact_x)([n], 1, operator.mul)
assert fact_x(10) == fact(10)
assert_raises(None, lambda: fact_x(1000))
def _fib_x(call_stack, rv, f):
if len(call_stack) == 0:
yield rv
else:
n = call_stack.pop()
if n in (0, 1):
yield _fib_x(call_stack, f(rv, n), f)
else:
call_stack.append(n - 1)
call_stack.append(n - 2)
yield _fib_x(call_stack, rv, f)
fib_x = lambda n: tail_recursive(_fib_x)([n], 0, operator.add)
assert fib_x(10) == fib(10)
# NOTE: This is NOT bottom up; this is a pure translation. It takes a while.
# assert_raises(None, lambda: fib_x(1000))
# We can also look at more interesting examples, like the Ackerman function
def ackermann(x, y):
return ackermann(x, ackermann(x, y - 1))
def _ackermann(call_stack, rv):
if len(call_stack) == 0:
yield rv
else:
m, n = call_stack.pop()
if m == 0:
yield _ackermann(call_stack, n + 1)
elif n == 0:
call_stack.append((m - 1, 1))
yield _ackermann(call_stack, rv)
else:
# EEK
yield _ackermann(call_stack, rv)
# ---
# The job of the recursive cases can be seen as breaking down complex inputs
# into simpler ones.
# NOTE: interesting idea on complexity
# - Algebraic data types
# - Inductively defined data (nats, linked list, bnf)
# ---
# [1]: http://www.artima.com/weblogs/viewpost.jsp?thread=98196
# [2]: http://paulbutler.org/archives/tail-recursion-in-python/
# [3]: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html
# [4]: http://docs.python.org/2/library/sys.html#sys.setrecursionlimit
# [5]: http://www.cis.upenn.edu/~cis39903/static/10-rec-to-iter.pdf
|
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404
from .models import Profile, Neighborhood, Follow, Business, Post
from .forms import ProfileForm, NeighborhoodForm, PostBusinessForm, PostMessageForm
from django.contrib.auth.decorators import login_required
from wsgiref.util import FileWrapper
import mimetypes
from django.conf import settings
import os
# Create your views here.
#-----------------Landing page--------------#
@login_required(login_url='/accounts/login')
def index(request):
# images = Image.get_images()
current_user = request.user
title = 'WatchApp | Home'
hoods = Neighborhood.get_neighborhoods
est = Follow.objects.get(user=current_user)
business = Business.get_business_by_estate(est.estate)
# posts = Post.get_posts_by_estate(est.estate)
return render(request, 'index.html', {"est": est,"title": title,"user": current_user,"hoods":hoods, "business": business})
# {"posts":posts, })
#---------------Profile-----------------#
@login_required(login_url='/accounts/login')
def create_profile(request):
'''
View function to view details of a hood
'''
current_user = request.user
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid:
post = form.save(commit=False)
post.user = current_user
post.save()
return redirect(index)
else:
form = ProfileForm()
return render(request, 'create-profile.html', {"form":form})
@login_required(login_url='/accounts/login')
def follow(request,hood_id):
'''
View function to allow user move to a different neighborhood_name
'''
current_user = request.user
estate = Neighborhood.objects.get(id=hood_id)
following = Follow(user=current_user, estate=estate)
# check_if_exist = len(Follow.objects.all().filter(user=current_user))
# if check_if_exist > 0:
check_if_exists = Follow.objects.filter(user=current_user).exists()
if check_if_exists == True:
Follow.objects.all().filter(user=current_user).delete()
Follow.objects.update_or_create(user=current_user, estate=estate)
# following.save()
else:
following.save()
return redirect(index)
@login_required(login_url='/accounts/login')
def unfollow(request,id):
'''
View function unfollow other users
'''
current_user = request.user
estate = Neighborhood.objects.get(id=id)
following = Follow(user=current_user, estate=estate).delete()
return redirect(index)
#-------------------- Hood View Functions--------------------#
def view_neighborhoods(request):
# images = Image.get_images()
current_user = request.user
title = 'Timeline'
hoods = Neighborhood.get_neighborhoods
return render(request, 'estates.html', {"title": title, "user": current_user, "hoods":hoods })
@login_required(login_url='/accounts/login')
def create_hood(request):
'''
View function to create and update the profile of the user
'''
current_user = request.user
if request.method == 'POST':
form = NeighborhoodForm(request.POST, request.FILES)
if form.is_valid:
k = form.save(commit=False)
k.user = current_user
k.save()
return redirect(index)
else:
form = NeighborhoodForm()
return render(request, 'new-hood.html', {"form":form})
@login_required(login_url='/accounts/login')
def hood_details(request, hood_id):
'''
View function to view details of a hood
'''
if len(Follow.objects.all().filter(user=request.user))>0:
details = Neighborhood.get_specific_hood(hood_id)
exists = Follow.objects.all().get(user=request.user)
else:
details = Neighborhood.get_specific_hood(hood_id)
exists = 0
return render(request, 'hood-details.html',{"exists": exists,"details":details})
@login_required(login_url='/accounts/login')
def profile(request):
'''
View function to display the profile of the logged in user when they click on the user icon
'''
current_user = request.user # get the id of the current
try:
single_profile = Profile.objects.get(user=current_user.id)
title = f'{current_user.username}\'s'
info = Profile.objects.filter(user=current_user)
pics = Image.objects.filter(user=request.user.id).all()
except:
title = f'{current_user.username}'
# pics = Image.objects.filter(user=request.user.id).all()
info = Profile.objects.filter(user=7)
return render(request, 'my-profile.html', {"title": title, "current_user": current_user, "info": info, })
#-------------------Businesses------------#
@login_required(login_url='/accounts/login')
def create_business(request):
'''
View function to post a message
'''
current_user = request.user
est = Follow.objects.get(user=current_user)
if request.method == 'POST':
form = PostBusinessForm(request.POST, request.FILES)
if form.is_valid:
post = form.save(commit=False)
post.user = current_user
post.estate = est.estate
post.save()
return redirect(index)
else:
form = PostBusinessForm()
return render(request, 'new-business.html', {"form":form})
@login_required(login_url='/accounts/login')
def business_details(request, business_id):
'''
View function to view details of a hood
'''
details = Business.get_specific_business(business_id)
return render(request, 'business-details.html',{"details":details})
@login_required(login_url='/accounts/login')
def new_comment(request, hood_id):
form = PostMessageForm(request.POST, request.FILES)
return render(request, 'new-message.html', {"form":form})
def search_results(request):
if 'photos' in request.GET and request.GET['photos']:
search_term = request.GET.get('photos')
searched_photo = Images.search_by_title(search_term)
photos = Images.objects.filter(name=searched_photo).all()
message = f"{search_term}"
return render(request, 'searched.html', {"message": message, "photos": searched_photo})
else:
message = 'Try Again'
return render(request, 'searched.html', {"message": message})
|
'''******************************************
ATHON
Programa de Introdução a Linguagem Python
Disiplina: Lógica de Programação
Professor: Francisco Tesifom Munhoz
Data: Primeiro Semestre 2021
*********************************************
Atividade: Lista 2 (Ex 3)
Autor: Yuri Pellini
Data: 19 de Maio de 2021
Comentários:
******************************************'''
#Entrada
X=float(input("Coloque o tamanho do lado do triângulo em cm:"))
Y=float(input("Coloque o segundo valor:"))
Z=float(input("Coloque o último valor:"))
# Saida
if(X==Y and Y==Z):
print("Isso é um triângulo equilátero")
else:
if(X==Y or Y==Z or X==Z):
print("Isso é um triângulo isósceles")
else:
print("Isso é um triângulo escaleno")
|
# Generated by Django 2.2.4 on 2019-08-05 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
],
),
migrations.RenameField(
model_name='post',
old_name='titel',
new_name='title',
),
]
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
from .pyqt_utils import *
from ..utils.image_processor import *
class EQ_FittingTab(QWidget):
"""
Fitting Tabs : left or right
Display fitting graph and providing options
"""
def __init__(self, parent, side):
QWidget.__init__(self)
self.parent = parent
self.side = side
self.syncUI = False
self.editableVars = {}
self.initUI()
self.setAllToolTips()
self.setConnections()
def initUI(self):
"""
Initial all GUIs including : 4 plots and result table
"""
self.setContentsMargins(0, 0, 0, 0)
self.fittingTabLayout = QGridLayout(self)
self.fitSettingsGrp = QGroupBox("Settings")
self.fitSettingLayout = QGridLayout(self.fitSettingsGrp)
self.fixSigmaC = QCheckBox("Fixed Sigma C :")
# self.fixSigmaC.setChecked(True)
self.sigmaCSpinBx = QDoubleSpinBox()
self.sigmaCSpinBx.setEnabled(False)
self.sigmaCSpinBx.setMinimum(-100)
self.sigmaCSpinBx.setDecimals(6)
self.sigmaCSpinBx.setMaximum(100)
self.sigmaCSpinBx.setKeyboardTracking(False)
self.sigmaCSpinBx.setValue(1.)
self.sigmaCSpinBx.setObjectName('sigmaCSpinBx')
self.editableVars[self.sigmaCSpinBx.objectName()] = None
self.fixSigmaD = QCheckBox("Fixed Sigma D :")
self.sigmaDSpinBx = QDoubleSpinBox()
self.sigmaDSpinBx.setEnabled(False)
self.sigmaDSpinBx.setMinimum(-100)
self.sigmaDSpinBx.setMaximum(100)
self.sigmaDSpinBx.setDecimals(6)
self.sigmaDSpinBx.setKeyboardTracking(False)
self.sigmaDSpinBx.setValue(1.)
self.sigmaDSpinBx.setObjectName('sigmaDSpinBx')
self.editableVars[self.sigmaDSpinBx.objectName()] = None
self.fixSigmaS = QCheckBox("Fixed Sigma S :")
# self.fixSigmaS.setChecked(True)
self.sigmaSSpinBx = QDoubleSpinBox()
self.sigmaSSpinBx.setEnabled(False)
self.sigmaSSpinBx.setMinimum(-100)
self.sigmaSSpinBx.setMaximum(100)
self.sigmaSSpinBx.setDecimals(6)
self.sigmaSSpinBx.setKeyboardTracking(False)
self.sigmaSSpinBx.setValue(0.0001)
self.sigmaSSpinBx.setObjectName('sigmaSSpinBx')
self.editableVars[self.sigmaSSpinBx.objectName()] = None
self.fixGamma = QCheckBox("Fixed gamma :")
self.gammaSpinBx = QDoubleSpinBox()
self.gammaSpinBx.setEnabled(False)
self.gammaSpinBx.setMinimum(-100)
self.gammaSpinBx.setMaximum(100)
self.gammaSpinBx.setDecimals(6)
self.gammaSpinBx.setKeyboardTracking(False)
self.gammaSpinBx.setValue(1)
self.gammaSpinBx.setObjectName('gammaSpinBx')
self.editableVars[self.gammaSpinBx.objectName()] = None
self.fitSettingLayout.addWidget(self.fixSigmaC, 0, 0, 1, 1)
self.fitSettingLayout.addWidget(self.sigmaCSpinBx, 0, 1, 1, 1)
self.fitSettingLayout.addWidget(self.fixSigmaD, 1, 0, 1, 1)
self.fitSettingLayout.addWidget(self.sigmaDSpinBx, 1, 1, 1, 1)
self.fitSettingLayout.addWidget(self.fixSigmaS, 2, 0, 1, 1)
self.fitSettingLayout.addWidget(self.sigmaSSpinBx, 2, 1, 1, 1)
self.fitSettingLayout.addWidget(self.fixGamma, 3, 0, 1, 1)
self.fitSettingLayout.addWidget(self.gammaSpinBx, 3, 1, 1, 1)
self.fitSettingLayout.setRowMinimumHeight(0, 10)
self.skeletalGrp = QGroupBox("Skeletal Muscle (Z line)")
self.skeletalGrp.setEnabled(False)
self.skeletalLayout = QGridLayout(self.skeletalGrp)
self.fixedZline = QCheckBox("Fixed Center : ")
self.zlineSpnBx = QDoubleSpinBox()
self.zlineSpnBx.setObjectName('zlineSpnBx')
self.zlineSpnBx.setEnabled(False)
self.editableVars[self.zlineSpnBx.objectName()] = None
self.zlineSpnBx.setDecimals(0)
self.zlineSpnBx.setRange(0, 500)
self.zlineSpnBx.setKeyboardTracking(False)
self.fixedIntZ = QCheckBox("Fixed Intensity : ")
self.intZSpnBx = QDoubleSpinBox()
self.intZSpnBx.setObjectName('intZSpnBx')
self.intZSpnBx.setEnabled(False)
self.editableVars[self.intZSpnBx.objectName()] = None
self.intZSpnBx.setDecimals(3)
self.intZSpnBx.setRange(0, 10000000)
self.intZSpnBx.setKeyboardTracking(False)
self.fixedSigZ = QCheckBox("Fixed Sigma : ")
self.sigZSpnBx = QDoubleSpinBox()
self.sigZSpnBx.setObjectName('sigZSpnBx')
self.sigZSpnBx.setEnabled(False)
self.editableVars[self.sigZSpnBx.objectName()] = None
self.sigZSpnBx.setDecimals(6)
self.sigZSpnBx.setRange(-100, 100)
self.sigZSpnBx.setKeyboardTracking(False)
self.fixedGammaZ = QCheckBox("Fixed Gamma : ")
self.gammaZSpnBx = QDoubleSpinBox()
self.gammaZSpnBx.setObjectName('gammaZSpnBx')
self.editableVars[self.gammaZSpnBx.objectName()] = None
self.gammaZSpnBx.setDecimals(6)
self.gammaZSpnBx.setRange(-100, 100)
self.gammaZSpnBx.setKeyboardTracking(False)
self.skeletalLayout.addWidget(self.fixedZline, 0, 0, 1, 1)
self.skeletalLayout.addWidget(self.zlineSpnBx, 0, 1, 1, 1)
self.skeletalLayout.addWidget(self.fixedIntZ, 1, 0, 1, 1)
self.skeletalLayout.addWidget(self.intZSpnBx, 1, 1, 1, 1)
self.skeletalLayout.addWidget(self.fixedSigZ, 2, 0, 1, 1)
self.skeletalLayout.addWidget(self.sigZSpnBx, 2, 1, 1, 1)
self.skeletalLayout.addWidget(self.fixedGammaZ, 3, 0, 1, 1)
self.skeletalLayout.addWidget(self.gammaZSpnBx, 3, 1, 1, 1)
self.extraPeakGrp = QGroupBox("Extra Peak")
self.extraPeakGrp.setEnabled(False)
self.extraPeakLayout = QGridLayout(self.extraPeakGrp)
self.fixedZlineEP = QCheckBox("Fixed Center : ")
self.zlineSpnBxEP = QDoubleSpinBox()
self.zlineSpnBxEP.setObjectName('zlineSpnBx')
self.zlineSpnBxEP.setEnabled(False)
self.editableVars[self.zlineSpnBxEP.objectName()] = None
self.zlineSpnBxEP.setDecimals(0)
self.zlineSpnBxEP.setRange(0, 500)
self.zlineSpnBxEP.setKeyboardTracking(False)
self.fixedIntZEP = QCheckBox("Fixed Intensity : ")
self.intZSpnBxEP = QDoubleSpinBox()
self.intZSpnBxEP.setObjectName('intZSpnBx')
self.intZSpnBxEP.setEnabled(False)
self.editableVars[self.intZSpnBxEP.objectName()] = None
self.intZSpnBxEP.setDecimals(3)
self.intZSpnBxEP.setRange(0, 10000000)
self.intZSpnBxEP.setKeyboardTracking(False)
self.fixedSigZEP = QCheckBox("Fixed Sigma : ")
self.sigZSpnBxEP = QDoubleSpinBox()
self.sigZSpnBxEP.setObjectName('sigZSpnBx')
self.sigZSpnBxEP.setEnabled(False)
self.editableVars[self.sigZSpnBxEP.objectName()] = None
self.sigZSpnBxEP.setDecimals(6)
self.sigZSpnBxEP.setRange(-100, 100)
self.sigZSpnBxEP.setKeyboardTracking(False)
self.fixedGammaZEP = QCheckBox("Fixed Gamma : ")
self.gammaZSpnBxEP = QDoubleSpinBox()
self.gammaZSpnBxEP.setObjectName('gammaZSpnBx')
self.editableVars[self.gammaZSpnBxEP.objectName()] = None
self.gammaZSpnBxEP.setDecimals(6)
self.gammaZSpnBxEP.setRange(-100, 100)
self.gammaZSpnBxEP.setKeyboardTracking(False)
self.extraPeakLayout.addWidget(self.fixedZlineEP, 0, 0, 1, 1)
self.extraPeakLayout.addWidget(self.zlineSpnBxEP, 0, 1, 1, 1)
self.extraPeakLayout.addWidget(self.fixedIntZEP, 1, 0, 1, 1)
self.extraPeakLayout.addWidget(self.intZSpnBxEP, 1, 1, 1, 1)
self.extraPeakLayout.addWidget(self.fixedSigZEP, 2, 0, 1, 1)
self.extraPeakLayout.addWidget(self.sigZSpnBxEP, 2, 1, 1, 1)
self.extraPeakLayout.addWidget(self.fixedGammaZEP, 3, 0, 1, 1)
self.extraPeakLayout.addWidget(self.gammaZSpnBxEP, 3, 1, 1, 1)
# self.fittingTabLayout.addSpacing(10)
self.fittingTabLayout.addWidget(self.fitSettingsGrp, 0, 0)
# self.fittingTabLayout.addSpacing()
self.fittingTabLayout.addWidget(self.skeletalGrp, 1, 0)
# self.fittingTabLayout.addSpacing()
self.fittingTabLayout.addWidget(self.extraPeakGrp, 2, 0)
def setAllToolTips(self):
"""
Set Tooltips for widgets
"""
self.skeletalGrp.setToolTip("Fit model with the skeletal peaks")
self.sigmaCSpinBx.setToolTip("Select the constant sigma C for fitting model")
def setConnections(self):
"""
Set connection for interactive widgets
"""
self.sigmaCSpinBx.editingFinished.connect(lambda: self.fixedFittingParams('sigmaC', self.sigmaCSpinBx))
self.sigmaDSpinBx.editingFinished.connect(lambda: self.fixedFittingParams('sigmaD', self.sigmaDSpinBx))
self.sigmaSSpinBx.editingFinished.connect(lambda: self.fixedFittingParams('sigmaS', self.sigmaSSpinBx))
self.fixSigmaC.stateChanged.connect(self.fixSigmaCChecked)
self.fixSigmaD.stateChanged.connect(self.fixedParamChecked)
self.fixSigmaS.stateChanged.connect(self.fixSigmaSChecked)
self.fixGamma.stateChanged.connect(self.fixedParamChecked)
self.gammaSpinBx.editingFinished.connect(lambda: self.fixedFittingParams('gamma', self.gammaSpinBx))
self.fixedIntZ.stateChanged.connect(self.skeletalChecked)
self.fixedZline.stateChanged.connect(self.skeletalChecked)
self.fixedSigZ.stateChanged.connect(self.skeletalChecked)
self.fixedGammaZ.stateChanged.connect(self.skeletalChecked)
self.sigZSpnBx.editingFinished.connect(lambda: self.skeletalChanged('sigZ', self.sigZSpnBx))
self.intZSpnBx.editingFinished.connect(lambda: self.skeletalChanged('intZ', self.intZSpnBx))
self.zlineSpnBx.editingFinished.connect(lambda: self.skeletalChanged('zline', self.zlineSpnBx))
self.gammaZSpnBx.editingFinished.connect(lambda: self.skeletalChanged('gammaZ', self.gammaZSpnBx))
self.fixedIntZEP.stateChanged.connect(self.skeletalChecked)
self.fixedZlineEP.stateChanged.connect(self.skeletalChecked)
self.fixedSigZEP.stateChanged.connect(self.skeletalChecked)
self.fixedGammaZEP.stateChanged.connect(self.skeletalChecked)
self.sigZSpnBxEP.editingFinished.connect(lambda: self.skeletalChanged('sigZ_EP', self.sigZSpnBxEP))
self.intZSpnBxEP.editingFinished.connect(lambda: self.skeletalChanged('intZ_EP', self.intZSpnBxEP))
self.zlineSpnBxEP.editingFinished.connect(lambda: self.skeletalChanged('zline_EP', self.zlineSpnBxEP))
self.gammaZSpnBxEP.editingFinished.connect(lambda: self.skeletalChanged('gammaZ_EP', self.gammaZSpnBxEP))
def syncSpinBoxes(self, info):
"""
Synchronize spin boxes
"""
self.syncUI = True
side = self.side
self.fixSigmaC.setChecked(side+'_fix_sigmac' in info)
self.fixSigmaD.setChecked(side+'_fix_sigmad' in info)
self.fixSigmaS.setChecked(side+'_fix_sigmas' in info)
self.sigmaDSpinBx.setEnabled(side+'_fix_sigmad' in info)
self.sigmaSSpinBx.setEnabled(side+'_fix_sigmas' in info)
if 'fit_results' in info:
fit_result = info['fit_results']
self.sigmaCSpinBx.setValue(fit_result[side+'_sigmac'])
self.sigmaDSpinBx.setValue(fit_result[side+'_sigmad'])
self.sigmaSSpinBx.setValue(fit_result[side+'_sigmas'])
self.skeletalGrp.setEnabled(fit_result['isSkeletal'])
self.extraPeakGrp.setEnabled(fit_result['isExtraPeak'])
self.sigmaCSpinBx.setValue(fit_result[side+'_sigmac'])
self.gammaSpinBx.setValue(fit_result[side+'_gamma'])
if fit_result['isSkeletal']:
self.zlineSpnBx.setValue(fit_result[side+'_zline'])
self.sigZSpnBx.setValue(fit_result[side+'_sigmaz'])
self.intZSpnBx.setValue(fit_result[side+'_intz'])
self.gammaZSpnBx.setValue(fit_result[side+'_gammaz'])
if fit_result['isExtraPeak']:
self.zlineSpnBxEP.setValue(fit_result[side+'_zline_EP'])
self.sigZSpnBxEP.setValue(fit_result[side+'_sigmaz_EP'])
self.intZSpnBxEP.setValue(fit_result[side+'_intz_EP'])
self.gammaZSpnBxEP.setValue(fit_result[side+'_gammaz_EP'])
self.syncUI = False
def initSpinBoxes(self, info):
"""
Initialize spin box
"""
self.syncUI = True
side = self.side
if 'fit_results' in info:
fit_result = info['fit_results']
self.sigmaCSpinBx.setValue(fit_result[side+'_sigmac'])
if side+'_fix_sigmac' in info:
self.fixSigmaC.setChecked(True)
self.sigmaCSpinBx.setEnabled(True)
else:
self.fixSigmaC.setChecked(False)
self.sigmaCSpinBx.setEnabled(False)
self.sigmaDSpinBx.setValue(fit_result[side+'_sigmad'])
if side+'_fix_sigmad' in info:
self.fixSigmaD.setChecked(True)
self.sigmaDSpinBx.setEnabled(True)
else:
self.fixSigmaD.setChecked(False)
self.sigmaDSpinBx.setEnabled(False)
self.sigmaSSpinBx.setValue(fit_result[side+'_sigmas'])
if side+'_fix_sigmas' in info:
self.fixSigmaS.setChecked(True)
self.sigmaSSpinBx.setEnabled(True)
else:
self.fixSigmaS.setChecked(False)
self.sigmaSSpinBx.setEnabled(False)
self.gammaSpinBx.setValue(fit_result[side+'_gamma'])
# self.nPeakSpnBx.setValue(len(fit_result['areas']))
# self.modelSelect.setCurrentIndex(self.modelSelect.findText(fit_result["model"]))
self.skeletalGrp.setEnabled(fit_result['isSkeletal'])
self.extraPeakGrp.setEnabled(fit_result['isExtraPeak'])
if fit_result['isSkeletal']:
self.zlineSpnBx.setValue(fit_result[side+'_zline'])
self.sigZSpnBx.setValue(fit_result[side+'_sigmaz'])
self.intZSpnBx.setValue(fit_result[side+'_intz'])
if fit_result['isExtraPeak']:
self.zlineSpnBxEP.setValue(fit_result[side+'_zline_EP'])
self.sigZSpnBxEP.setValue(fit_result[side+'_sigmaz_EP'])
self.intZSpnBxEP.setValue(fit_result[side+'_intz_EP'])
self.gammaSpinBx.setEnabled(self.fixGamma.isChecked())
self.gammaSpinBx.setHidden(fit_result['model'] != 'Voigt')
self.fixGamma.setHidden(fit_result['model'] != 'Voigt')
self.fixedGammaZ.setHidden(fit_result['model'] != 'Voigt')
self.gammaZSpnBx.setEnabled(self.fixedGammaZ.isChecked())
self.gammaZSpnBx.setHidden(fit_result['model'] != 'Voigt')
self.fixedGammaZEP.setHidden(fit_result['model'] != 'Voigt')
self.gammaZSpnBxEP.setEnabled(self.fixedGammaZEP.isChecked())
self.gammaZSpnBxEP.setHidden(fit_result['model'] != 'Voigt')
if side+'_fix_sigmac' in info:
self.fixSigmaC.setChecked(True)
self.sigmaCSpinBx.setEnabled(True)
self.sigmaCSpinBx.setValue(info[side + '_fix_sigmac'])
if side+'_fix_sigmad' in info:
self.fixSigmaD.setChecked(True)
self.sigmaDSpinBx.setEnabled(True)
self.sigmaDSpinBx.setValue(info[side + '_fix_sigmad'])
if side+'_fix_sigmas' in info:
self.fixSigmaS.setChecked(True)
self.sigmaSSpinBx.setEnabled(True)
self.sigmaSSpinBx.setValue(info[side+'_fix_sigmas'])
if side+'_fix_gamma' in info:
self.fixGamma.setChecked(True)
self.gammaSpinBx.setEnabled(True)
self.gammaSpinBx.setValue(info[side+'_fix_gamma'])
if side+'_fix_zline' in info:
self.fixedZline.setChecked(True)
self.zlineSpnBx.setEnabled(True)
self.zlineSpnBx.setValue(info[side + '_fix_zline'])
if side+'_fix_intz' in info:
self.fixedIntZ.setChecked(True)
self.intZSpnBx.setEnabled(True)
self.intZSpnBx.setValue(info[side + '_fix_intz'])
if side+'_fix_gammaz' in info:
self.fixedGammaZ.setChecked(True)
self.gammaZSpnBx.setEnabled(True)
self.gammaZSpinBx.setValue(info[side + '_fix_gammaz'])
if side+'_fix_sigz' in info:
self.fixedSigZ.setChecked(True)
self.sigZSpnBx.setEnabled(True)
self.sigZSpnBx.setValue(info[side+'_fix_sigz'])
if side+'_fix_zline_EP' in info:
self.fixedZlineEP.setChecked(True)
self.zlineSpnBxEP.setEnabled(True)
self.zlineSpnBxEP.setValue(info[side + '_fix_zline_EP'])
if side+'_fix_intz_EP' in info:
self.fixedIntZEP.setChecked(True)
self.intZSpnBxEP.setEnabled(True)
self.intZSpnBxEP.setValue(info[side + '_fix_intz_EP'])
if side+'_fix_gammaz_EP' in info:
self.fixedGammaZEP.setChecked(True)
self.gammaZSpnBxEP.setEnabled(True)
self.gammaZSpinBxEP.setValue(info[side + '_fix_gammaz_EP'])
if side+'_fix_sigz_EP' in info:
self.fixedSigZEP.setChecked(True)
self.sigZSpnBxEP.setEnabled(True)
self.sigZSpnBxEP.setValue(info[side+'_fix_sigz_EP'])
self.syncUI = False
def fixSigmaCChecked(self):
"""
Triggered when fix sigma C is checked
"""
side = self.side
parent = self.parent
if self.fixSigmaC.isChecked():
if side+'_sigmac' in parent.bioImg.info:
del parent.bioImg.info[side+'_sigmac']
else:
if side+'_fix_sigmac' in parent.bioImg.info:
del parent.bioImg.info[side+'_fix_sigmac']
self.sigmaCSpinBx.setEnabled(self.fixSigmaC.isChecked())
def fixSigmaSChecked(self):
"""
Triggered when fix sigma S is checked
"""
side = self.side
parent = self.parent
if self.fixSigmaS.isChecked():
if side+'_sigmas' in parent.bioImg.info:
del parent.bioImg.info[side+'_sigmas']
else:
if side+'_fix_sigmas' in parent.bioImg.info:
del parent.bioImg.info[side+'_fix_sigmas']
self.sigmaSSpinBx.setEnabled(self.fixSigmaS.isChecked())
def fixedParamChecked(self):
"""
Enable/Disable spinboxes
"""
bioImg = self.parent.bioImg
if self.syncUI or bioImg is None:
return
self.sigmaDSpinBx.setEnabled(self.fixSigmaD.isChecked())
self.sigmaSSpinBx.setEnabled(self.fixSigmaS.isChecked())
self.gammaSpinBx.setEnabled(self.fixGamma.isChecked())
def fixedFittingParams(self, name, elem):
"""
Fixed Value Changed. Remove fit_results from info dict to make it be re-calculated
"""
# self.parent.refreshAllFittingParams()
self.log_changes(name, elem, prefix='(fitting)')
def skeletalChecked(self):
"""
Enable/Disable spinboxes
"""
if self.parent.bioImg is None or self.syncUI:
return
self.zlineSpnBx.setEnabled(self.fixedZline.isChecked())
self.sigZSpnBx.setEnabled(self.fixedSigZ.isChecked())
self.intZSpnBx.setEnabled(self.fixedIntZ.isChecked())
self.gammaZSpnBx.setEnabled(self.fixedGammaZ.isChecked())
if self.parent.extraPeakChkBx.isChecked():
self.zlineSpnBxEP.setEnabled(self.fixedZlineEP.isChecked())
self.sigZSpnBxEP.setEnabled(self.fixedSigZEP.isChecked())
self.intZSpnBxEP.setEnabled(self.fixedIntZEP.isChecked())
self.gammaZSpnBxEP.setEnabled(self.fixedGammaZEP.isChecked())
def skeletalChanged(self, name, elem):
"""
Reset all about z line and re-process image
"""
bioImg = self.parent.bioImg
if bioImg is None or self.syncUI:
return
self.log_changes(name, elem, prefix='(skeletal)')
def hideGamma(self, flag):
"""
Hide gamma settings if model is not Voigt
"""
self.gammaSpinBx.setHidden(flag)
self.fixGamma.setHidden(flag)
self.gammaZSpnBx.setHidden(flag)
self.fixedGammaZ.setHidden(flag)
self.gammaZSpnBxEP.setHidden(flag)
self.fixedGammaZEP.setHidden(flag)
def getFittingSettings(self, first_run=False):
"""
Get All settings that are necessary for EquatorImage to process
:return:
"""
settings = {}
side = self.side
if first_run: # To check the boxes when no cache and first img
self.fixSigmaS.setChecked(True)
self.sigmaSSpinBx.setEnabled(True)
self.fixSigmaC.setChecked(True)
self.sigmaCSpinBx.setEnabled(True)
# get all locked parameters
if self.fixSigmaC.isChecked():
settings[side+'_fix_sigmac'] = self.sigmaCSpinBx.value()
else:
settings[side + '_sigmac'] = self.sigmaCSpinBx.value()
if self.fixSigmaD.isChecked():
settings[side+'_fix_sigmad'] = self.sigmaDSpinBx.value()
else:
settings[side + '_sigmad'] = self.sigmaDSpinBx.value()
if self.fixSigmaS.isChecked():
settings[side+'_fix_sigmas'] = self.sigmaSSpinBx.value()
else:
settings[side + '_sigmas'] = self.sigmaSSpinBx.value()
if not self.gammaSpinBx.isHidden():
if self.fixGamma.isChecked():
settings[side+'_fix_gamma'] = self.gammaSpinBx.value()
else:
settings[side + '_gamma'] = self.gammaSpinBx.value()
if self.parent.skeletalChkBx.isChecked():
if self.fixedIntZ.isChecked():
settings[side+'_fix_intz'] = self.intZSpnBx.value()
if self.fixedSigZ.isChecked():
settings[side+'_fix_sigz'] = self.sigZSpnBx.value()
if self.fixedZline.isChecked():
settings[side+'_fix_zline'] = self.zlineSpnBx.value()
if self.fixedGammaZ.isChecked():
settings[side + '_fix_gammaz'] = self.gammaZSpnBx.value()
if self.parent.extraPeakChkBx.isChecked():
if self.fixedIntZEP.isChecked():
settings[side+'_fix_intz_EP'] = self.intZSpnBxEP.value()
if self.fixedSigZEP.isChecked():
settings[side+'_fix_sigz_EP'] = self.sigZSpnBxEP.value()
if self.fixedZlineEP.isChecked():
settings[side+'_fix_zline_EP'] = self.zlineSpnBxEP.value()
if self.fixedGammaZEP.isChecked():
settings[side + '_fix_gammaz_EP'] = self.gammaZSpnBxEP.value()
return settings
def init_logging(self):
"""
Initialize the logging
"""
for objName in self.editableVars:
self.editableVars[objName] = self.findChild(QAbstractSpinBox, objName).value()
#print(self.side, self.editableVars)
def write_log(self, msg):
"""
Write the log
"""
if hasattr(self.parent.__class__, 'write_log') and \
callable(getattr(self.parent.__class__, 'write_log')):
self.parent.write_log(self.side + ' ' + msg)
def log_changes(self, name, obj, prefix=''):
"""
Change the log file and rewrite it
"""
newValue = obj.value()
varName = obj.objectName()
if self.editableVars[varName] == newValue:
return
self.write_log(f'{prefix}{name}Changed: {self.editableVars[varName]} -> {newValue}')
self.editableVars[varName] = newValue
|
total_cost = 1 + 3 * 4
print(total_cost)
total_cost = 1 + (3 * 4)
print(total_cost)
print("BODMAS Rule")
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import sys
import torch
import numbers
import collections
import numpy as np
import utils.k_space_motion as ks_motion
from data.utils_ext import _is_tensor_image_volume
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
def crop(volume, k, i, j, d, h, w):
"""
Args:
volume (torch.tensor): Image volume to be cropped. Size is (C, T, H, W)
k (int): k in (k,i,j) i.e coordinates of the back upper left corner.
i (int): i in (k,i,j) i.e coordinates of the back upper left corner.
j (int): j in (k,i,j) i.e coordinates of the back upper left corner.
d (int): Depth of the cropped region.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
"""
assert _is_tensor_image_volume(volume)
return volume[..., k:k + d, i:i + h, j:j + w]
def resize(volume, target_size, interpolation_mode, min_side=True, ignore_depth=False):
r"""
Resize the image volume using the target size and interpolation mode.
It uses the torch.nn.functional.interpolate function.
Args:
volume (torch.tensor): the image volume
target_size (Tuple[int, int, int]): the target size
min_side (int): does it use minimum or maximum side if target_size
interpolation_mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'`` | ``'area'``. Default: ``'nearest'``
ignore_depth (bool): Ignore resizing in the depth dimension when size is int
Returns:
volume (torch.tensor): Resized volume. Size is (C, T, H, W)
"""
assert isinstance(target_size, int) or len(target_size) == 3, "target size must be int or " \
"tuple (depth, height, width)"
assert isinstance(min_side, bool), "min_size must be bool"
assert isinstance(ignore_depth, bool), "ignore_depth is bool"
if isinstance(target_size, Sequence) and len(target_size) == 3 and ignore_depth:
print('warning: ignore_depth is valid when target_size is int')
if isinstance(target_size, int):
_, d, h, w = volume.shape
dim_min = min(d, h, w) if min_side else max(d, h, w)
if dim_min == target_size:
return volume
if dim_min == w:
ow = target_size
oh = int(target_size * h / w)
od = int(target_size * d / w) if not ignore_depth else d
elif dim_min == h:
oh = target_size
ow = int(target_size * w / h)
od = int(target_size * d / h) if not ignore_depth else d
else:
od = target_size
ow = int(target_size * w / d)
oh = int(target_size * h / d)
target_size = (od, oh, ow)
if interpolation_mode == 'nearest':
return torch.nn.functional.interpolate(
volume.unsqueeze(dim=0),
size=target_size,
mode=interpolation_mode,
).squeeze(dim=0)
else:
return torch.nn.functional.interpolate(
volume.unsqueeze(dim=0),
size=target_size,
mode=interpolation_mode,
align_corners=False
).squeeze(dim=0)
def resized_crop(volume, k, i, j, d, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the image volume
Args:
volume (torch.tensor): Image volume to be cropped. Size is (C, T, H, W)
k (int): k in (k,i,j) i.e coordinates of the back upper left corner.
i (int): i in (k,i,j) i.e coordinates of the back upper left corner.
j (int): j in (k,i,j) i.e coordinates of the back upper left corner.
d (int): Depth of the cropped region.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized volume
interpolation_mode (str): algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'`` | ``'area'``. Default: ``'nearest'``
Returns:
volume (torch.tensor): Resized and cropped volume. Size is (C, T, H, W)
"""
assert _is_tensor_image_volume(volume), "volume should be a 4D torch.tensor"
volume = crop(volume, k, i, j, d, h, w)
volume = resize(volume, size, interpolation_mode)
return volume
# noinspection PyTypeChecker
def center_crop(volume, crop_size):
assert _is_tensor_image_volume(volume), "volume should be a 4D torch.tensor"
if not isinstance(crop_size, (numbers.Number, tuple)):
raise TypeError('Got inappropriate crop_size arg')
if isinstance(crop_size, Sequence) and not len(crop_size) == 3:
raise ValueError("crop_size must be an int or 3 element tuple, not a " +
"{} element tuple".format(len(crop_size)))
if isinstance(crop_size, numbers.Number):
crop_size = tuple([int(crop_size)]*3)
d, h, w = volume.shape[1:]
td, th, tw = crop_size
assert d >= td and h >= th and w >= tw, "depth, height and width must not be smaller than crop_size"
k = int(round((d - td) / 2.0))
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(volume, k, i, j, td, th, tw)
def to_tensor(volume):
"""
Convert tensor data type rto float and permute the dimenions of volume tensor
Args:
volume (torch.tensor, dtype=torch.int): Size is (T, H, W, C)
Return:
volume (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
_is_tensor_image_volume(volume)
if not volume.dtype == torch.float32:
raise TypeError("volume tensor should have data type torch.float32. Got %s" % str(volume.dtype))
return volume.permute(3, 0, 1, 2)
def normalize(volume, mean, std, inplace=False):
"""
Args:
volume (torch.tensor): Image volume to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
inplace (bool): inplace operation
Returns:
normalized volume (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_image_volume(volume), "volume should be a 4D torch.tensor"
if not inplace:
volume = volume.clone()
mean = torch.as_tensor(mean, dtype=volume.dtype, device=volume.device)
std = torch.as_tensor(std, dtype=volume.dtype, device=volume.device)
volume.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return volume
def normalize_minmax(volume, max_div, inplace=False):
"""
Args:
volume (torch.tensor): Image volume to be normalized. Size is (C, T, H, W)
max_div (bool): whether divide by the maximum (max of one).
inplace (bool): inplace operation
Returns:
normalized volume (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_image_volume(volume), "volume should be a 4D torch.tensor"
if not inplace:
volume = volume.clone()
volume_reshaped = volume.reshape(volume.size(0), -1)
minimum = volume_reshaped.min(1)[0]
volume.sub_(minimum[:, None, None, None])
maximum = volume_reshaped.max(1)[0] - minimum
maximum[maximum < 1.0] = 1.0
if max_div and not (maximum == 1.0).all():
volume.div_(maximum[:, None, None, None])
return volume
def flip(volume, dim=3):
"""
Args:
volume (torch.tensor): Image volume to be normalized. Size is (C, T, H, W)
dim (int): the axis to flip the volume over it.
Returns:
flipped volume (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_image_volume(volume), "volume should be a 4D torch.tensor"
return volume.flip(dim)
def pad(volume, padding, fill=0, padding_mode='constant'):
r"""Pad the given Tensor volume on all sides with specified padding mode and fill value.
Args:
volume (Torch Tensor): Volume to be padded.
padding (int or tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill: Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode: Type of padding. Should be: 'constant', 'reflect', 'replicate' or 'circular'. Default is constant.
check torch.nn.functional.pad for further details ### Deprecated - check np.pad
Returns:
Torch Tensor: Padded volume.
"""
_is_tensor_image_volume(volume)
if not isinstance(padding, (numbers.Number, tuple)):
raise TypeError('Got inappropriate padding arg')
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError('Got inappropriate fill arg')
if not isinstance(padding_mode, str):
raise TypeError('Got inappropriate padding_mode arg')
if isinstance(padding, Sequence) and len(padding) not in [2, 4, 6]:
raise ValueError("Padding must be an int or a 2, 4, or 6 element tuple, not a " +
"{} element tuple".format(len(padding)))
assert padding_mode in ['constant', 'reflect', 'replicate', 'circular'], \
'Padding mode should be either constant, reflect, replicate or circular'
if isinstance(padding, int):
padding = [padding]*6
return torch.nn.functional.pad(volume, padding, mode=padding_mode, value=fill)
# For more information check: https://github.com/scikit-image/scikit-image/blob/master/skimage/exposure/exposure.py
def scale_tensor_intensity(volume, input_range, output_range):
def within_range(x):
return volume.min().item() <= x <= volume.max().item()
assert isinstance(volume, torch.Tensor), 'only accept torch tensors'
assert isinstance(input_range, (tuple, list)), 'input_range must be either tuple or list'
assert isinstance(output_range, (tuple, list)), 'output_range must be either tuple or list'
assert len(input_range) == 2, 'len of input_range must be two'
assert len(output_range) == 2, 'len of output_range must be two'
assert all(map(within_range, input_range)), 'input_range values must be in [0, 1]'
assert all(map(within_range, output_range)), 'output_range values must be in [0, 1]'
in_lower, in_upper = tuple(map(float, input_range))
out_lower, out_upper = tuple(map(float, output_range))
volume = volume.clamp(in_lower, in_upper)
if in_lower == in_upper:
return volume.clamp(out_lower, out_upper)
else:
volume = (volume - in_lower) / (in_upper - in_lower)
return volume * (out_upper - out_lower) + out_lower
def gamma_correction(volume, gamma):
assert isinstance(gamma, float), 'gamma must be float'
assert 0 < gamma, 'gamma must be greater than zero'
in_lower, in_upper = volume.min().item(), volume.max().item()
vol_range = in_upper - in_lower
volume = (volume - in_lower) / vol_range
volume = volume ** gamma
return volume * vol_range + in_lower
def log_correction(volume, inverse):
assert isinstance(inverse, bool), 'inverse must be bool'
in_lower, in_upper = volume.min().item(), volume.max().item()
vol_range = in_upper - in_lower
volume = (volume - in_lower) / vol_range
if inverse:
volume = (2 ** volume - 1)
else:
volume = np.log2(1 + volume)
return volume * vol_range + in_lower
def sigmoid_correction(volume, inverse, gain, cutoff):
assert isinstance(inverse, bool), 'inverse must be bool'
in_lower, in_upper = volume.min().item(), volume.max().item()
vol_range = in_upper - in_lower
volume = (volume - in_lower) / vol_range
if inverse:
volume = 1 - 1 / (1 + np.exp(gain * (cutoff - volume)))
else:
volume = 1 / (1 + np.exp(gain * (cutoff - volume)))
return volume * vol_range + in_lower
def histogram(volume, num_bins=256, is_normalized=False):
hist, bin_edges = np.histogram(volume.numpy().flatten(), bins=num_bins)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
if is_normalized:
hist = hist / np.sum(hist)
return hist, bin_centers
def cumulative_distribution(volume, num_bins=256):
hist, bin_centers = histogram(volume, num_bins)
img_cdf = hist.cumsum()
img_cdf = img_cdf / img_cdf[-1]
return img_cdf, bin_centers
def equalize_hist(volume, num_bins=256):
in_lower, in_upper = volume.min().item(), volume.max().item()
vol_range = in_upper - in_lower
cdf, bin_centers = cumulative_distribution(volume, num_bins)
volume_int = np.interp(volume.numpy().flatten(), bin_centers, cdf)
volume_int = volume_int.reshape(volume.shape)
volume_int = torch.from_numpy(volume_int)
return volume_int * vol_range + in_lower
# For more information check: https://github.com/dipy/dipy/blob/master/dipy/sims/voxel.py
def additive_noise(volume, sigma, noise_type='rician', out_of_bound_mode='normalize'):
assert out_of_bound_mode in ('normalize', 'clamp',), 'undefined out_of_bound_mode'
noise_function = {
'gaussian': lambda x, n1, n2: x + n1,
'rician': lambda x, n1, n2: np.sqrt((x + n1) ** 2 + n2 ** 2),
'rayleigh': lambda x, n1, n2: x + np.sqrt(n1 ** 2 + n2 ** 2),
}
in_lower, in_upper = volume.min().item(), volume.max().item()
vol_range = in_upper - in_lower
volume = (volume - in_lower) / vol_range
sigma = volume.std() * sigma
noise_one = np.random.normal(0, sigma, size=volume.shape)
noise_two = np.random.normal(0, sigma, size=volume.shape)
volume = noise_function[noise_type](volume, noise_one, noise_two)
if out_of_bound_mode == 'normalize':
noise_in_lower, noise_in_upper = volume.min().item(), volume.max().item()
noise_vol_range = noise_in_upper - noise_in_lower
volume = (volume - noise_in_lower) / noise_vol_range
elif out_of_bound_mode == 'clamp':
volume = volume.clamp(0, 1)
else:
raise NotImplementedError
volume = volume * vol_range + in_lower
return volume
def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype, dim: int,
ignore_background=True):
"""
This coverts a categorical annotation tensor to one-hot annotation tensor.
It is adapted from MONAI at the link below:
Reference:
https://github.com/Project-MONAI/MONAI/blob/09f39dcb84092b07cda480c99644f9f7f8cceab6/monai/networks/utils.py#L24
Args:
labels: the input label tensor to convert
num_classes: number of classes
dtype: dtype to return the output tensor
dim: where to put the new dimension for labels
ignore_background: drops the first sheet for background. Assumes the first index is background.
Returns: one-hot tensor
"""
assert labels.dim() > 0, "labels should have dim of 1 or more."
shape_tensor = list(labels.shape)
assert shape_tensor[dim] == 1, "labels should have a channel with length equals to one."
shape_tensor[dim] = num_classes
labels_one_hot = torch.zeros(size=shape_tensor, dtype=dtype, device=labels.device)
labels = labels_one_hot.scatter_(dim=dim, index=labels.long(), value=1)
if ignore_background:
keep_ind = torch.tensor(range(1, num_classes)) # always assumes index 0 is background
labels = labels.index_select(dim=dim, index=keep_ind)
return labels
def k_space_motion_artifact(volume, time, **kwargs):
"""
Args:
volume (torch.Tensor): Volume to be transformed and resampled. Must be 4D
with a channel dimension i.e. (C, D, H, W).
time (float): Time at which the motion occurs during scanning. Should be between [0.5, 1), where 0
represents the beginning of the scan and 1 represents the end. Time >= 0.5 assures that the
most prominent object in the image is in the original position of the image so that ground truth
annotations don't need to be adjusted.
Returns:
volume (torch.Tensor): Motion-artifacted image. Shape is the same as the input.
"""
assert _is_tensor_image_volume(volume), 'volume should be a 4D torch.Tensor with a channel dimension'
assert isinstance(time, float), 'time must be float between 0.0 (inclusive) and 1.0 (exclusive).'
assert 0.5 <= time < 1.0, 'time must be float between 0.0 (inclusive) and 1.0 (exclusive).'
return ks_motion.apply_motion_from_affine_params(volume, time, **kwargs)
|
#!/usr/bin/python
import os, sys, getopt, ConfigParser, logging, commands
def usage(supported):
print "\n"
print "glite-wn-info [-c <configfile>] [-h] [-v] -n <name>"
print "glite-wn-info [--config <configfile>] [--help] [--verbose] --name <name>"
print "\t-c <configfile>"
print "\t\tSpecify a configuration file. Default search path is"
print "\t\t/etc/glite-wn-info.conf, $GLITE_LOCATION/etc/glite-wn-info.conf"
print "\t\tand /opt/glite/etc/glite-wn-info.conf."
print "\t-h\tPrint this help."
print "\t-v\tBe verbose."
print "\t-n <name>\tA supported value for the WN to return."
print "\t\t\tOne of:"
for v in supported:
print "\t\t\t -n %s" % v
print "\nExample: Return the GlueSubClusterUniqueId for this node"
print "\tglite-wn-info -n GlueSubClusterUniqueId\n"
def main():
# Set Up Defaults
logging.basicConfig()
log = logging.getLogger("glite-wn-info")
supported = ['GlueSubClusterUniqueId']
defconfig = ['/etc/glite-wn-info.conf']
if os.environ.get('GLITE_LOCATION'):
defconfig.append('%s/etc/glite-wn-info.conf' % os.environ['GLITE_LOCATION'])
defconfig.append('/opt/glite/etc/glite-wn-info.conf')
# Move Over the Options
try:
opts, args = getopt.getopt(sys.argv[1:], "c:hvn:", ["config=","help","verbose","name="])
except getopt.GetoptError, err:
log.exception(str(err))
usage(supported)
sys.exit(2)
for o,a in opts:
if o in ("-h","--help"):
usage(supported)
sys.exit()
elif o in ("-v","--verbose"):
log.setLevel(logging.DEBUG)
elif o in ("-c","--config"):
conf = a
elif o in ("-n","--name"):
name = a
else:
assert False, "unhandled option"
# Check the -n flag is set correctly.
try:
name
supported.index(name)
except:
log.exception("A predefined name must be specified to be retrived with the -n flag.")
usage(supported)
sys.exit(2)
# Decide which configuration file to use.
try:
conf
config = conf
log.debug("Using specified configuration file %s." % config)
except:
log.debug("Using default search path for configuration file of")
log.debug(defconfig)
for f in defconfig:
log.debug("Looking for file %s" % f)
if os.path.isfile(f):
config = f
log.debug("%s configuration file found and will be used" % config)
break
else:
log.debug("%s configuration file not present" % f)
try:
config
except:
log.exception("No configuration file could be found")
usage(supported)
sys.exit(2)
# Open the configuation file.
try:
cfg = ConfigParser.ConfigParser()
cfg.readfp(open(config))
except:
log.exception("Error opening or parsing %s" % config)
sys.exit(2)
try:
dynamic = cfg.get('dynamic',name) ;
except:
log.debug("No dynamic value set for '%s' in configuration file." % name)
else:
log.debug("Running dynamic command '%s'" % dynamic)
(stat,out) = commands.getstatusoutput(dynamic)
if stat:
log.debug("Dynamic command '%s' failed with non-zero return code" % dynamic)
else:
log.debug("Dynamic value found, '%s', returning it" % out)
print out
sys.exit(0)
try:
static = cfg.get('static',name) ;
except:
log.exception("No static value set for '%s'." % name)
sys.exit(2)
else:
log.debug("Static value present in configuration for '%s'" % name)
log.debug("Value to be returned is '%s'" % static)
print static
sys.exit(0)
# Start the program.
if __name__ == "__main__":
main()
|
from __future__ import division
import numpy as np
import scipy.spatial as spatial
import sys
import time
import os
import pandas as pd
from joblib import Memory, Parallel, delayed
# from https://pypi.python.org/pypi/bintrees/2.0.2
from bintrees import FastAVLTree, FastBinaryTree
# from https://github.com/juhuntenburg/brainsurfacescripts
from vtk_rw import read_vtk, write_vtk
from graphs import graph_from_mesh
from simplification import add_neighbours, find_voronoi_seeds, competetive_fast_marching
from utils import log, tupler
import pdb
'''
Maps vertices from a simplified mesh to the closest corresponding
vertices of the original mesh using a KDTree. These vertices are
then used as seeds for a Voronoi tesselation of the complex mesh which is
implemented as a competitive fast marching in a balanced binary (AVL)tree.
A mapping is created which associates each vertex of the complex mesh
with the closest vertex of the simple mesh.
-----------------------------------------------------------------------------
Binary search trees: https://en.wikipedia.org/wiki/Binary_search_tree
Balanced binary trees: https://en.wikipedia.org/wiki/AVL_tree,
Using them as heaps: http://samueldotj.com/blog/?s=binary+tree
Implementation used: https://pypi.python.org/pypi/bintrees/2.0.2 (cython version)
'''
# main function for looping over subject and hemispheres
def create_mapping((sub, hemi)):
print sub, hemi
complex_file = '/scr/ilz3/myelinconnect/struct/surf_%s/orig/mid_surface/%s_%s_mid.vtk'
simple_file = '/scr/ilz3/myelinconnect/groupavg/indv_space/%s/lowres_%s_d_def.vtk'# version d
log_file = '/scr/ilz3/myelinconnect/all_data_on_simple_surf/labels/logs/log_worker_%s.txt'%(str(os.getpid()))
seed_file = '/scr/ilz3/myelinconnect/all_data_on_simple_surf/seeds/%s_%s_highres2lowres_seeds.npy'
label_file = '/scr/ilz3/myelinconnect/all_data_on_simple_surf/labels/%s_%s_highres2lowres_labels.npy'
surf_label_file = '/scr/ilz3/myelinconnect/all_data_on_simple_surf/labels/%s_%s_highres2lowres_labels.vtk'
# load the meshes
log(log_file, 'Processing %s %s'%(sub, hemi))
log(log_file, '...loading data', logtime=False)
complex_v, complex_f, complex_d = read_vtk(complex_file%(hemi, sub, hemi))
simple_v, simple_f, simple_d = read_vtk(simple_file%(sub, hemi))
# find those points on the individuals complex mesh that correspond best
# to the simplified group mesh in subject space
log(log_file, '...finding unique voronoi seeds')
try:
voronoi_seed_idx = np.load(seed_file%(sub, hemi))
voronoi_seed_coord = complex_v[voronoi_seed_idx]
except IOError:
voronoi_seed_idx, inaccuracy, log_file = find_voronoi_seeds(simple_v,
simple_f,
complex_v,
complex_f,
log_file = log_file)
np.save(seed_file%(sub, hemi), voronoi_seed_idx)
# find coordinates of those points in the highres mesh
voronoi_seed_coord = complex_v[voronoi_seed_idx]
# double check differences
log(log_file, '...checking unique vs nearest mapping')
dist = np.linalg.norm((voronoi_seed_coord - simple_v), axis=1)
if ((np.mean(dist)-np.mean(inaccuracy[:,0])>0.1)):
log(log_file, 'Unique seeds very far from nearest seeds! %f'%(np.mean(dist)-np.mean(inaccuracy[:,0])))
# convert highres mesh into graph containing edge length
log(log_file, '...creating graph')
complex_graph = graph_from_mesh(complex_v, complex_f, edge_length=True)
# find the actual labels
log(log_file, '...competetive fast marching')
labels = competetive_fast_marching(complex_v, complex_graph, voronoi_seed_idx)
# write out labelling file and surface with labels
log(log_file, '...saving data')
np.save(label_file%(sub, hemi), labels)
write_vtk(surf_label_file%(sub, hemi), complex_v, complex_f,
data=labels[:,1, np.newaxis])
log(log_file, 'Finished %s %s'%(sub, hemi))
return log_file
#create_mapping(('BP4T','rh'))
if __name__ == "__main__":
#cachedir = '/scr/ilz3/myelinconnect/working_dir/complex_to_simple/'
#memory = Memory(cachedir=cachedir)
subjects = pd.read_csv('/scr/ilz3/myelinconnect/subjects.csv')
subjects=list(subjects['DB'])
subjects.remove('KSMT')
hemis = ['rh', 'lh']
Parallel(n_jobs=16)(delayed(create_mapping)(i)
for i in tupler(subjects, hemis))
|
"""
Sử dụng matplotlib.pyplot.plot vẽ các đồ thị hàm số f(x) = (e^(−x/10))*sin(πx) and g(x) = x*e^(−x/3) trong khoảng [0, 10] trên cùng một biểu đồ. Bao gồm trục x, trục y, và các chú thích các đường biểu diễn của từng hàm số. Lưu đồ thì thành một file plot.jpg (“Jpeg”)
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import utils
def f_func(x):
func_name = "$\\exp(-x/10) * \\sin(\pi x)$"
func = np.exp(-x / 10) * np.sin(np.pi * x)
return func_name, func
def g_func(x):
func_name = "$x * \\exp(-x/3)$"
func = x * np.exp(-x / 3)
return func_name, func
def plot_multi_functions(functions, output_path="./Ex5_Output/plot.jpg"):
for func_name, (x,y) in functions.items():
print("Plot Func name : {}".format(func_name))
plt.plot(x, y, label=func_name)
plt.legend()
plt.title("Plot multi functions")
plt.xlabel("x")
plt.ylabel("y")
# Save figure to output path
dir_path = output_path[:output_path.rfind("/")]
utils.mkdirs(dir_path)
output_path = os.path.abspath(output_path)
print("Save file to {} done".format(output_path))
plt.savefig(output_path, dpi=200)
plt.show()
if __name__ == '__main__':
functions = {}
x = np.arange(0, 10, 0.1)
func_name, y = f_func(x)
functions.update({func_name: (x, y)})
func_name, y = g_func(x)
functions.update({func_name: (x, y)})
output_path = "./Ex5_Output/plot.jpg"
plot_multi_functions(functions, output_path)
|
import sys
reload(sys)
sys.path.append('./plugins/')
sys.setdefaultencoding('utf-8')
import bayes
import controller
import os
import sae
import web
import jieba
web.config.debug = True
urls = (
'/', 'Index'
)
app_root = os.path.dirname(__file__)
templates_root = os.path.join(app_root, 'templates')
render = web.template.render(templates_root)
class Index:
def GET(self):
return render.index()
def POST(self):
data = web.input()
words = data.queryword;
words_cut = controller.CutWords(words)
combine = controller.GetCategories(words_cut)
#combine = bayes.GetCategories(words_cut)
return render.queryword(combine)
app = web.application(urls, globals()).wsgifunc()
application = sae.create_wsgi_app(app)
|
import unittest
from katas.kyu_6.dashatize_it import dashatize
class DashatizeTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(dashatize(274), '2-7-4')
def test_equal_2(self):
self.assertEqual(dashatize(5311), '5-3-1-1')
def test_equal_3(self):
self.assertEqual(dashatize(86320), '86-3-20')
def test_equal_4(self):
self.assertEqual(dashatize(974302), '9-7-4-3-02')
def test_equal_5(self):
self.assertEqual(dashatize(None), 'None')
def test_equal_6(self):
self.assertEqual(dashatize(0), '0')
def test_equal_7(self):
self.assertEqual(dashatize(-1), '1')
def test_equal_8(self):
self.assertEqual(dashatize(-28369), '28-3-6-9')
|
def calcCost(weight):
return (weight // 3) - 2
def recursiveFuelCost(fuel):
extraFuel = calcCost(fuel)
return 0 if extraFuel <= 0 else extraFuel + recursiveFuelCost(extraFuel)
def CalculateFuelCost(moduleArr):
modulesTotal = 0
additionalFuelTotal = 0
for mass in moduleArr:
moduleFuel = calcCost(mass)
modulesTotal += moduleFuel
additionalFuelTotal += recursiveFuelCost(moduleFuel)
return modulesTotal, additionalFuelTotal
if __name__ == '__main__':
with open('input.txt') as f:
moduleArr = [int(val) for val in f]
modulesTotal, additionalFuel = CalculateFuelCost(moduleArr)
print('part 1: ' + str(modulesTotal))
print('part 2: ' + str(modulesTotal + additionalFuel))
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
from OwhatLab.utils.mysqlUtil import MysqlClient
class OwhatLabPipeline(object):
def process_item(self, item, spider):
return item
class OwhatLabUserPipeline(object):
def __init__(self):
#self.mysqlClient = MysqlClient(host='10.8.26.106', user='scrapy', password='Scrapy_123', database='test_kuaishou')
#self.mysqlClient = MysqlClient(host='10.8.26.23', user='hive', password='Hive_123', database='test_kuaishou')
#self.mysqlClient = MysqlClient(host='127.0.0.1', user='root', password='',
#database='test_owhatlab')
self.mysqlClient = MysqlClient(host='10.8.26.23', user='hive', password='Hive_123',
database='test_OwhatLab')
#print('connect mysql success!')
#这个方法一般在爬虫程序中yield调用的时候被触发,其中一个参数item就是yield传过来的,可以在这里处理抓到的数据,比如说进一步格式化,数据保存等。
def process_item(self, item, spider):
#print(json.dumps(dict(item)))
self.mysqlClient.insertOneInfoRecord(item)
#print('insert success!')
return item
#爬虫结束执行时触发,在这里可以进行文件关闭,数据库关闭等处理。
def close_spider(self, spider):
self.mysqlClient.close()
|
#!/usr/bin/python3
''' Establishing connection to database '''
from database_py.core import Getter
with Getter('test', 'test').credentials() as start:
start
|
from rply import LexerGenerator
from globs import *
lg = LexerGenerator()
lg.add('FLOAT', '-?\d+\.\d+')
lg.add('INTEGER', '-?\d+')
lg.add('NULL', 'null(?!\w)')
lg.add('STRING', '(""".*?""")|(".*?")|(\'.*?\')')
lg.add('PRINT', 'print(?!\w)')
lg.add('BOOLEAN', f"{TRUE}(?!\w)|{FALSE}(?!\w)")
lg.add('IF', 'if(?!\w)')
lg.add('ELSE', 'else(?!\w)')
lg.add('AND', "and(?!\w)")
lg.add('OR', "or(?!\w)")
lg.add('NOT', "not(?!\w)")
lg.add('WHILE', 'while(?!\w)')
lg.add('END', 'end(?!\w)')
lg.add('IDENTIFIER', "[a-zA-Z_][a-zA-Z0-9_]*")
lg.add('PLUS', '\+')
lg.add('==', '==')
lg.add('COLON', ':')
lg.add('!=', '!=')
lg.add('>=', '>=')
lg.add('<=', '<=')
lg.add('>', '>')
lg.add('<', '<')
lg.add('=', '=')
lg.add('[', '\[')
lg.add(']', '\]')
lg.add('{', '\{')
lg.add('}', '\}')
lg.add(',', ',')
lg.add('DOT', '\.')
lg.add('MINUS', '-')
lg.add('MUL', '\*')
lg.add('DIV', '/')
lg.add('MOD', '%')
lg.add('(', '\(')
lg.add(')', '\)')
lg.add('NEWLINE', '\n')
lg.ignore(r'[ \t\r\f\v]+')
lg.ignore(r'#.*\n')
lexer = lg.build()
# with open('test.txt', 'r', encoding='utf-8') as f:
# text = f.read()
# tokens = lexer.lex(text)
# for token in tokens:
# print(token)
|
import os,sys
from scipy.ndimage import zoom
import json
from .io import readImage,mkdir
from .seg import rgbToSeg
import numpy as np
import shutil,json
from imageio import imwrite
def readTileVolume(fns, z0p, z1p, y0p, y1p, x0p, x1p, tile_sz, tile_type = np.uint8,\
tile_st = [0, 0], tile_ratio = 1, tile_resize_mode = 1, tile_seg = False, tile_bd='reflect', tile_blank = '', volume_sz = None):
if not isinstance(tile_sz, (list,)):
tile_sz = [tile_sz, tile_sz]
if not isinstance(tile_ratio, (list,)):
tile_ratio = [tile_ratio, tile_ratio]
# [row,col]
# no padding at the boundary
# st: starting index 0 or 1
bd = None
if volume_sz is not None:
bd = [max(-z0p,0), max(0,z1p-volume_sz[0]),\
max(-y0p,0), max(0,y1p-volume_sz[1]),\
max(-x0p,0), max(0,x1p-volume_sz[2])]
z0, y0, x0 = max(z0p,0), max(y0p,0), max(x0p,0)
z1, y1, x1 = min(z1p,volume_sz[0]), min(y1p,volume_sz[1]), min(x1p,volume_sz[2])
else:
z0, y0, x0, z1, y1, x1 = z0p, y0p, x0p, z1p, y1p, x1p
result = np.zeros((z1 - z0, y1 - y0, x1 - x0), tile_type)
c0 = x0 // tile_sz[1] # floor
c1 = (x1 + tile_sz[1]-1) // tile_sz[1] # ceil
r0 = y0 // tile_sz[0]
r1 = (y1 + tile_sz[0]-1) // tile_sz[0]
z1 = min(len(fns)-1, z1)
for z in range(z0, z1):
pattern = fns[z]
for row in range(r0, r1):
for column in range(c0, c1):
if '%' in pattern:
filename = pattern % (row + tile_st[0], column + tile_st[1])
elif '{' in pattern:
filename = pattern.format(row=row + tile_st[0], column=column + tile_st[1])
else:
filename = pattern
if os.path.exists(filename):
patch = readImage(filename)
if tile_seg:
patch = rgbToSeg(patch)
if tile_ratio[0] != 1:
patch = zoom(patch, tile_ratio, order = tile_resize_mode)
# exception: last tile may not have the right size
psz = patch.shape
xp0 = column * tile_sz[1]
xp1 = min(xp0+psz[1], (column+1)*tile_sz[1])
yp0 = row * tile_sz[0]
yp1 = min(yp0+psz[0], (row+1)*tile_sz[0])
x0a = max(x0, xp0)
x1a = min(x1, xp1)
y0a = max(y0, yp0)
y1a = min(y1, yp1)
try:
result[z-z0, y0a-y0 : y1a-y0, x0a-x0 : x1a-x0] = \
patch[y0a-yp0 : y1a-yp0, x0a-xp0 : x1a-xp0]
except:
import pdb; pdb.set_trace()
else:
print('Non-exist: %s'%filename)
# blank case
if tile_blank != '':
blank_st = 0
blank_lt = result.shape[0]-1
while blank_st<= blank_lt and not np.any(result[blank_st]>0):
blank_st += 1
if blank_st == blank_lt+1:
print('!! This volume is all 0 !!')
else:
result[:blank_st] = result[blank_st:blank_st+1]
while blank_lt >= blank_st and not np.any(result[blank_lt]>0):
blank_lt -= 1
result[blank_lt:] = result[blank_lt-1:blank_lt]
for z in range(blank_st+1, blank_lt):
if not np.any(result[z]>0):
result[z] = result[z-1]
# boundary case
if bd is not None and max(bd)>0:
result = np.pad(result,
((bd[0], bd[1]),
(bd[2], bd[3]),
(bd[4], bd[5])),'reflect')
return result
def writeTileInfo(sz, numT, imN, tsz=1024, tile_st=[0,0],zPad=[0,0], im_id=None, outName=None,st=0,ndim=1,rsz=1,dt='uint8'):
# one tile for each section
# st: starting index
if im_id is None:
im_id = range(zPad[0]+st,st,-1)+range(st,sz[0]+st)+range(sz[0]-2+st,sz[0]-zPad[1]-2+st,-1)
else: # st=0
if zPad[0]>0:
im_id = [im_id[x] for x in range(zPad[0],0,-1)]+im_id
if zPad[1]>0:
im_id += [im_id[x] for x in range(sz[0]-2,sz[0]-zPad[1]-2,-1)]
sec=[imN(x) for x in im_id]
out={'image':sec, 'depth':sz[0]+sum(zPad), 'height':sz[1], 'width':sz[2], "tile_st":tile_st,
'dtype':dt, 'n_columns':numT[1], 'n_rows':numT[0], "tile_size":tsz, 'ndim':ndim, 'tile_ratio':rsz}
if outName is None:
return out
else:
with open(outName,'w') as fid:
json.dump(out, fid)
|
from sqlalchemy import Column, Integer, String, Float, ForeignKey, DateTime, Table, Boolean
from sqlalchemy.orm import relationship
from settings.database import Base
from utils.models import DateAware
# from sales.models import Sale
# Create your model here.
class User(DateAware):
__tablename__ = 'users'
username = Column(String(100), unique=True, nullable=False)
password = Column(String(500), nullable=False)
is_superuser = Column(Boolean, default=False, nullable=False)
is_manager = Column(Boolean, default=False, nullable=False)
is_seller = Column(Boolean, default=True, nullable=False)
is_costumer = Column(Boolean, default=False, nullable=False)
is_active = Column(Boolean, default=True, nullable=False)
manager = relationship("Manager", uselist=False, back_populates="user")
seller = relationship("Seller", uselist=False, back_populates="user")
customer = relationship("Costumer", uselist=False, back_populates="user")
thumbnail = Column(String(256))
def __init__(self, username, password, is_superuser=False, is_manager=False, is_seller=True, is_costumer=False):
self.username = username
self.password = password
self.is_superuser = is_superuser
self.is_manager = is_manager
self.is_seller = is_seller
self.is_costumer = is_costumer
def __repr__(self):
return f'<User {self.username}>'
class Manager(DateAware):
__tablename__ = 'managers'
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
user = relationship("User", back_populates="manager")
def __init__(self, user_id):
self.user_id = user_id
class Seller(DateAware):
__tablename__ = 'sellers'
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
user = relationship("User", back_populates="seller")
# sale = relationship("Sale")
def __init__(self, user_id):
self.user_id = user_id
class Costumer(DateAware):
__tablename__ = 'costumers'
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
user = relationship("User", back_populates="customer")
def __init__(self, user_id):
self.user_id = user_id
|
apart = [[101, 102, 103, 104, 105], [201, 202, 203, 204], [301, 302, 303, 304], [401, 402, 403, 404], [501]]
#이차원리스트의 요소가 줄어들든 늘어나든 관계없이 모든 호실에 전단지를 부착해주세요
#이중for => range보다 그냥 for를 사용
floor = 1 #층수
for i in apart:
for j in i:
print("%d호 부착완료"%j)
print("%d층에 모두 부착완료\n"%floor)
floor += 1
|
from open_pension_crawler.OpenPensionCrawlSpiderBase import OpenPensionCrawlSpiderBase
class YlInvestSpider(OpenPensionCrawlSpiderBase):
name = 'yl-invest'
allowed_domains = ['yl-invest.co.il']
start_urls = ['http://www.yl-invest.co.il']
file_prefix = 'yl_'
regex = r'[0-9]{9}_(b|g|p|m)[0-9]{4}_(01|02|03|04)[0-9]{2}.(xlsx|xls)'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.