text
stringlengths 8
6.05M
|
|---|
from animal import Animal
class Lion(Animal):
def __init__(self, name, age, habitat, heath_level, hapiness_level, melena):
super().__init__(name, age, habitat, heath_level, hapiness_level)
self.melena = melena
def eating(self, eat):
super().eating()
if eat < 5:
self.heath_level += 5
self.hapiness_level += 5
elif eat <= 20:
self.heath_level += 10
self.hapiness_level += 15
else:
self.heath_level += 20
self.hapiness_level += 25
return self
def display_info(self):
return super().display_info()+('y tengo una hermosa melena' if self.melena == True else 'y no tengo melena')
"""
leon = Lion('simba', 5,'sabana', 50, 30, False)
print (leon.heath_level)
print (leon.hapiness_level)
leon.eating(10)
print (leon.heath_level)
print (leon.hapiness_level)
"""
|
# The following program is the solution to the Binary Search Tree Practice quiz of the Data Structures & Algorithms class by Grow with Google/Udacity
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BST(object):
def __init__(self, root):
self.root = Node(root)
self.found = False
def insert(self, root, new_val):
if root is None:
return
if new_val < root.value and root.left is None:
root.left = Node(new_val)
return
elif new_val > root.value and root.right is None:
root.right = Node(new_val)
return
self.insert(root.left,new_val)
self.insert(root.right,new_val)
def search(self, find_val):
self.preorder_search(self.root,find_val)
answer = self.found
self.found = False
return answer
def preorder_search(self, root, find_val):
"""Helper method - use this to create a
recursive search solution."""
if root == None:
return
if root.value == find_val:
self.found = True
self.preorder_search(root.left,find_val)
self.preorder_search(root.right,find_val)
# Set up tree
tree = BST(4)
# Insert elements
tree.insert(tree.root,2)
tree.insert(tree.root,1)
tree.insert(tree.root,3)
tree.insert(tree.root,5)
# Check search
# Should be True
print(tree.search(4))
# Should be False
print(tree.search(6))
|
import os
from os.path import join, dirname
from dotenv import load_dotenv
from datetime import datetime
from functools import partial
from threading import Thread
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, curdoc
from tornado import gen
from proton.reactor import Container
from amqp.WeatherRecordReceiver import WeatherRecordReceiver
from dto.WeatherRecordDto import WeatherRecordDto
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
data_source = ColumnDataSource({
'timestamps': [],
'temperature_values': [],
'humidity_values': [],
'air_pressure_values': [],
})
plot = figure(
title = "Dashboard for weather station #" + os.getenv('WEATHER_STATION_ID'),
plot_width = 1280,
tools="xpan,xwheel_zoom,xbox_zoom,reset",
x_axis_type = 'datetime'
)
plot.x_range.follow = "end"
plot.circle(x = 'timestamps', y = 'temperature_values', color = 'red', source = data_source)
plot.line(x = 'timestamps', y = 'temperature_values', color = 'red', source = data_source)
plot.circle(x = 'timestamps', y = 'humidity_values', color = 'blue', source = data_source)
plot.line(x = 'timestamps', y = 'humidity_values', color = 'blue', source = data_source)
plot.circle(x = 'timestamps', y = 'air_pressure_values', color = 'purple', source = data_source)
plot.line(x = 'timestamps', y = 'air_pressure_values', color = 'purple', source = data_source)
@gen.coroutine
def update_data(dto: WeatherRecordDto):
data_source.stream({
'timestamps': [dto.timestamp],
'temperature_values': [dto.temperature],
'humidity_values': [dto.humidity],
'air_pressure_values': [dto.air_pressure],
})
document = curdoc()
def handle_weather_record(dto: WeatherRecordDto):
document.add_next_tick_callback(partial(update_data, dto = dto))
def run_amqp_receiver():
print('starting the AMQP receiver')
amqp_receiver = Container(
WeatherRecordReceiver(
url = os.getenv('AMQP_URL'),
queue = os.getenv('AMQP_QUEUE'),
username = os.getenv('AMQP_USERNAME'),
password = os.getenv('AMQP_PASSWORD'),
container_id = os.getenv('AMQP_CONTAINER_ID'),
weather_station_id = int(os.getenv('WEATHER_STATION_ID')),
record_received_callback = handle_weather_record
)
)
try:
amqp_receiver.run()
except KeyboardInterrupt:
print('stopping the AMQP receiver')
print('stopped the AMQP receiver')
document.add_root(plot)
document.title = "weather station dashboard"
amqp_receiver_thread = Thread(target = run_amqp_receiver)
amqp_receiver_thread.start()
|
class Codec:
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
"""
num = 0
r_str=""
for c in longUrl:
num = num*128 + ord(c)
return str(hex(num).rstrip("L"))
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
num = int(shortUrl, 16)
ret_s = ""
print(shortUrl)
while num != 0:
c = num %128
ret_s = str(chr(c)) + ret_s
num /= 128
return ret_s
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
|
#-*-coding=utf-8-*-
from django.core.urlresolvers import reverse
from admin_tools.menu import items, Menu
class CustomMenu(Menu):
def __init__(self, **kwargs):
super(CustomMenu, self).__init__(**kwargs)
self.children.append(
items.MenuItem(title=u'Main', url=reverse('admin:index'))
)
self.children.append(
items.MenuItem(
title=u'Catalog',
children=[
items.MenuItem(title=u'Categories', url='/admin/catalog/category/'),
items.MenuItem(title=u'Brands', url='/admin/catalog/brand/'),
items.MenuItem(title=u'Items', url='/admin/catalog/item/'),
items.MenuItem(title=u'User reviews', url='/admin/catalog/userreview/'),
]
),
)
self.children.append(
items.MenuItem(
title=u'Commerce',
children=[
items.MenuItem(title=u'Orders', url='/admin/comm/order/'),
items.MenuItem(title=u'Baskets', url='/admin/comm/basket/'),
]
),
)
self.children.append(
items.MenuItem(
title=u'References',
children=[
items.MenuItem(title=u'Countries', url='/admin/accounts/country/'),
items.MenuItem(title=u'States', url='/admin/accounts/state/'), ]
)
)
self.children.append(
items.MenuItem(
title=u'Users',
children=[
items.MenuItem(title=u'Users', url='/admin/auth/user/'),
items.MenuItem(title=u'Groups', url='/admin/auth/group/'),
items.MenuItem(title=u'Addresses', url='/admin/accounts/address/'),
]
)
)
|
import sys
def sum(n, array):
arraysum = 0;
for i in range(n):
print i
arraysum += array[i]
print arraysum
def main():
input1 = [45, 368, 527, 525, 209, 206, 174, 625, 1216, 206, 554, 930, 1101, 239, 591, 371, 971, 659, 620, 903, 424, 488, 590, 514, 807, 252, 394, 898, 557, 137, 165, 592, 495, 682, 1107, 695, 878, 1272, 19]
sum(39, input1)
if __name__ == '__main__':
main()
|
import pandas as pd
import datetime
def read_tweets():
"""
读取twitter生成user列表
:return: user列表
"""
user_tweet_list = []
with open('Tweets/R_DeodorantCancer.txt') as f2:
for line, column in enumerate(f2):
column = column.replace('\n', '')
user_t_id, tweet_id, content, time = column.split('\t')[:]
user_tweet_list.append(user_t_id)
user_tweet_list = list(map(int, user_tweet_list))
user_tweet_list = set(user_tweet_list) # 去除重复用户id
print("tweet_read_complete")
return user_tweet_list
def read_links(tweet_user_list):
"""
抽取出
:param tweet_user_list: 上一个方法生成的list
:return:
"""
reader = pd.read_csv('links.csv', header=None, names=['user_id', 'following_id'], iterator=True)
loop = True
chunkSize = 500000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped.")
df = pd.concat(chunks, ignore_index=True)
usr_l = []
usr_r = []
user_index = []
for user_id in tweet_user_list:
# usr_l = df[df['user_id'] == user_id].index.tolist()
# usr_r = df[df['following_id'] == user_id].index.tolist()
a = df[(df.user_id == user_id) | (df.following_id == user_id)]
a.to_csv('deodorant_link.csv', mode='a+', header=None, index=False)
# for usr_l_index in usr_l:
# for usr_r_index in usr_r:
# if usr_l_index == usr_r_index:
# user_index.append(usr_l_index)
# return user_index
if __name__ == "__main__":
start = datetime.datetime.now()
user_id_from_twitter = read_tweets() # 生成user列表
read_links(user_id_from_twitter)
print('process complete')
end = datetime.datetime.now()
print(end - start)
|
# coding=utf-8
import pymysql
from tkinter import *
class loginPage(object):
def __init__(self, master, info='欢迎进入注册页面'):
self.master = master
self.mainlabel = Label(master, text=info, justify=CENTER)
self.mainlabel.grid(row=0, columnspan=3)
self.user = Label(master, text='注册用户名:', borderwidth=3)
self.user.grid(row=1, sticky=W)
self.pwd = Label(master, text='注册密码:', borderwidth=3)
self.pwd.grid(row=2, sticky=W)
self.userEntry = Entry(master)
self.userEntry.grid(row=1, column=1, columnspan=3)
self.userEntry.focus_set()
self.pwdEntry = Entry(master, show='*')
self.pwdEntry.grid(row=2, column=1, columnspan=3)
self.loginButton = Button(master, text='确定注册', borderwidth=2, command=self.login)
self.loginButton.grid(row=3, column=1)
self.clearButton = Button(master, text='清除', borderwidth=2, command=self.clear)
self.clearButton.grid(row=3, column=2)
def login(self):
self.username = self.userEntry.get().strip()
self.passwd = self.pwdEntry.get().strip()
db = pymysql.connect("localhost", "root", "12345678", "yule")
cursor = db.cursor()
# sql = "INSERT INTO user(name, pwd) VALUES ('%s', '%s')" % (self.username, self.passwd)
sql="CREATE USER '%s'@'localhost' IDENTIFIED BY '%s';" % (self.username, self.passwd)
print(sql)
try:
cursor.execute(sql)
print ("数据插入成功!!!")
cursor.execute("GRANT GRANT OPTION ON *.* TO '%s'@'localhost';" % (self.username))
cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER ON *.* TO '%s'@'localhost';" % (self.username))
db.commit()
from tkinter import messagebox
messagebox.showinfo(title='成功注册', message='成功注册 ')
root.destroy()
except:
print ("数据插入失败!!!")
db.rollback()
db.close()
def clear(self):
self.userEntry.delete(0, END)
self.pwdEntry.delete(0, END)
if __name__ == '__main__':
root = Tk()
root.title("注册")
root.geometry('250x120+650+330')
myLogin = loginPage(root)
mainloop()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 23:27:36 2021
@author: mtran
"""
from Patient import *
import matplotlib.pyplot as plt
if __name__ == "__main__":
# Enter the file path to the patient here
foldername = "../../histopathology_dataset/"
# Enter the patients number here
patient_id = "8863"
patch_size_px = (50, 50, 3)
patch_size_mm = (50, 50)
# Generate a patient wholeslide image
patient = Patient(foldername, patient_id, patch_size_mm, patch_size_px)
out_image = patient.generate_wholeslide_image(class_vis = False).astype("uint8")
plt.imshow(out_image)
plt.show()
out_image_classes = patient.generate_wholeslide_image(class_vis = True).astype("uint8")
plt.imshow(out_image_classes)
plt.show()
plt.imsave("Images/" + patient_id + ".png", out_image)
|
import pandas as pd
def truncate_cell_line_names(data, index=True, separator='_', preserve_str_location=0):
'''
This is for truncating cell line names such as 'CAL120_breast' to 'CAL120'
Split the input Str by separator, and preserve the No. preserve_str_location th part.
Input is DaraFrame, List, or Index. (Todo)
If input data is in DataFrame, a option is given as: trucate the index(0) or the columns (1).
Return the same type as input.
The cell line names has to be in the index or column. otw will try truc the index.
@ Parameters:
index: boolean, true for the case when CLs names in the row index, false for them in col names
separator: will be splited accd to this separator
preserve_str_location: after splition, which one is the name to keep.
@ Returns:
Same data type.
'''
if isinstance(data,pd.DataFrame) or isinstance(data, pd.Series):
if index or isinstance(data, pd.Series):
data.index = [x[preserve_str_location] for x in data.index.str.split(separator)]
return data
else:
data.columns = [x[preserve_str_location] for x in data.columns.str.split(separator)]
return data
elif isinstance(data,pd.Index):
''' Todo: check if this is passed as Ref or Copy.
'''
return [x[preserve_str_location] for x in data.str.split(separator)]
elif isinstance(data,list):
return [x.split(separator)[preserve_str_location] for x in data]
else:
raise TypeError
def transform_underscore(data, underscore_to_minus=True, target='columns'):
"""
This is for unify the symbols, change all the - or _ in the index or columns.
@ Parameter:
data: DataFrame;
underscore_to_minusminus: Change _ to - , if False, viseversa.
target: Do it on index, or columns, or both.
@ Returns:
Same type as given.
"""
if isinstance(data,pd.DataFrame):
if target in ['columns','column','col']:
if underscore_to_minus:
data.columns = data.columns.str.replace('_','-')
else:
data.columns = data.columns.str.replace('-','_')
elif target in ['index','idx']:
if underscore_to_minus:
data.columns = data.columns.str.replace('_','-')
else:
data.columns = data.columns.str.replace('-','_')
return data
elif isinstance(data,list):
print('To do')
def transform_invalid_char_in_df(df, which_col=None, inv_chars=[' ','/','\\'], to_char='_'):
"""Strip the strs, and replace the invalid chars into underscore"""
if not isinstance(df,pd.DataFrame):
df = pd.DataFrame(df)
if which_col is None:
which_col = df.columns
if isinstance(which_col,str):
which_col = [which_col]
for each_col in which_col:
df[each_col] = df[each_col].str.strip()
for each_inv in inv_chars:
df[each_col] = df[each_col].str.replace(each_inv,to_char)
return df
def _check_match(A, B, print_right_here=1):
"""list like A B
等长吗,等集合吗,等顺序吗
"""
rst = [False,False,False]
if len(A)==len(B):
rst[0]=True
if set(A)==set(B):
rst[1]=True
if rst[0] and rst[1]:
A = list(A)
B = list(B)
if A==B:
rst[2]=True
if print_right_here:
if rst[2]:
print("Excat match.")
elif rst[1] and rst[0]:
print("Not same sequence.")
elif (not rst[0]) and rst[1]:
print("Duplicated elements.")
elif not rst[1]:
print("Different elements.")
return tuple(rst)
def check_dataset_matched(A, B):
""""""
print('\n>>> Check data sets index and cols matched:')
if isinstance(A,pd.DataFrame) and isinstance(B,pd.DataFrame):
print(">> Check matching: two DataFrames. ")
print("> Indexs:")
idxs = _check_match(A.index,B.index)
print("> Columns:")
cols = _check_match(A.columns,B.columns)
elif isinstance(A,pd.DataFrame) and isinstance(B,pd.Series):
print('>> Check matching: DataFrame -> Series. ')
idxs = _check_match(A.index,B.index)
cols = (1,1,1)
elif isinstance(A,pd.Series) and isinstance(B,pd.DataFrame):
print('>> Check matching: Series -> DataFrame. ')
idxs = _check_match(A.index,B.index)
cols = (1,1,1)
elif isinstance(A,pd.Series) and isinstance(B,pd.Series):
print('>> Check matching: Series -> Series. ')
idxs = _check_match(A.index,B.index)
cols = (1,1,1)
else:
raise TypeError("now only support pandas things.")
return all(idxs) and all(cols) # a py thing. all(iterable), any(iterable), logic compu of iter boolean.
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 13:25:00 2020
@author: logam
"""
from scipy import ndimage
import matplotlib.pyplot as plt
import numpy as np
import kernel_function as kf
import cv2
import random
original = cv2.imread('test2.png')#
img = cv2.imread('test2.png', cv2.IMREAD_GRAYSCALE)#
plt.figure(dpi=700)
#kf.test()
canny = cv2.Canny(img,50,250)
dims = canny.shape
#result_pic = np.zeros_like(img)
result_pic = 0 * np.ones((dims[0],dims[1],3), np.uint8)
def edge_seg(row_cur, col_cur, row_start, col_start, color):
#set pixel in current picture black so it wont be used again
canny[row_cur, col_cur] = 0
result_pic[row_cur,col_cur] = color
for coord in kf.get_eight_neighbours(row_cur, col_cur, dims):
#pass, if we got to the start again (break?!)
if (coord[0] == row_start and coord[1] == col_start):
break
elif (canny[coord[0]][[coord[1]]] == 255):
edge_seg(coord[0], coord[1], row_start, col_start, color)
else:
pass
"""
def edge_seg(row_cur, col_cur, row_start, col_start, color):
#set pixel in current picture black so it wont be used again
canny[row_cur, col_cur] = 0
start = True
while ((row_cur != row_start and col_cur != col_start) or start == True):
for coord in kf.get_eight_neighbours(row_cur, col_cur, dims):
if (canny[coord[0]][[coord[1]]] == 255):
canny[coord[0]][[coord[1]]] = 0
result_pic[row_cur, col_cur] = color
row_cur = coord[0]
col_cur = coord[1]
cv2.imshow("progress",result_pic)
cv2.waitKey(1)
start = False
"""
def random_color():
rgbl=[255,0,0]
random.shuffle(rgbl)
return tuple(rgbl)
for row in range (dims[0]):
for col in range (dims[1]):
cur_val = canny[row][col]
if cur_val == 255:
#print(cur_val)
#whiteFrame[row,col] = (225, 50, 90)
edge_seg(row, col, row, col, random_color())
result_pic2 = ndimage.binary_fill_holes(result_pic[:,:,0]).astype(int)
plt.subplot(231),plt.imshow(original),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(232),plt.imshow(result_pic),plt.title('Connected Edges')
plt.xticks([]), plt.yticks([])
plt.subplot(233),plt.imshow(result_pic2, cmap = plt.get_cmap("gray")),plt.title('Filled Regions')
plt.xticks([]), plt.yticks([])
|
#
# @lc app=leetcode.cn id=7 lang=python3
#
# [7] 整数反转
#
# @lc code=start
class Solution:
def reverse(self, x: int) -> int:
# 1032/1032 cases passed (32 ms)
# Your runtime beats 92.87 % of python3 submissions
# Your memory usage beats 16.12 % of python3 submissions (15 MB)
if x >= 0:
res = int(str(x)[::-1])
else:
res = -int(str(-x)[::-1])
if res >= 2 ** 31 or res < -2**31:
return 0
return res
# @lc code=end
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from config import config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://'+config.DBUSER+':'+config.DBPASSWD+'@'+config.DBHOST+'/'+config.DBNAME
db = SQLAlchemy(app)
event_organizers_table = db.Table('event_organizers',
db.Column('organizer_id',db.Integer, db.ForeignKey("members.id")),
db.Column('event_id',db.Integer, db.ForeignKey("events.id"))
)
class Member(db.Model):
__tablename__ = 'members'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(length=256, convert_unicode=True))
is_organizer = db.Column(db.Boolean,default=False)
gender = db.Column(db.String(length=16, convert_unicode=True))
age = db.Column(db.Integer)
dob = db.Column(db.String(length=256, convert_unicode=True))
id_proof = db.Column(db.String(length=256, convert_unicode=True))
id_proof_number = db.Column(db.String(length=256, convert_unicode=True))
email = db.Column(db.String(length=256),unique=True,nullable=False)
fb_profile = db.Column(db.String(length=512, convert_unicode=True))
contact = db.Column(db.String(length=60, convert_unicode=True))
emergency_contact = db.Column(db.String(length=60, convert_unicode=True))
emergency_contact_person = db.Column(db.String(length=256, convert_unicode=True))
bloodgroup = db.Column(db.String(length=60))
swimming_ability = db.Column(db.Unicode(256))
bike_type = db.Column(db.String(length=60, convert_unicode=True))
bike_model = db.Column(db.String(length=256, convert_unicode=True))
bike_registration_number = db.Column(db.String(length=256))
camera_model = db.Column(db.String(length=256, convert_unicode=True))
cycle_type = db.Column(db.String(length=60, convert_unicode=True))
cycle_model = db.Column(db.String(length=256, convert_unicode=True))
tshirt_size = db.Column(db.String(length=16, convert_unicode=True))
def __init__(self,email,name=None,fb_profile=None):
self.email=email
self.name=name
self.fb_profile=fb_profile
def __repr__(self):
return self.email
class Event(db.Model):
__tablename__ = 'events'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(length=256, convert_unicode=True),nullable=False)
category = db.Column(db.String(length=256, convert_unicode=True))
start_date = db.Column(db.Date)
end_date = db.Column(db.Date)
location = db.Column(db.String(length=256, convert_unicode=True))
trek_difficulty = db.Column(db.String(length=60, convert_unicode=True))
swimming_batch_level = db.Column(db.String(length=60, convert_unicode=True))
primary_organizer_id = db.Column(db.Integer,db.ForeignKey("members.id"))
organizers = db.relationship("Member", secondary = event_organizers_table, backref="organized_events")
#trek = relationship("Trek", uselist=False, backref="event")
primary_organizer = db.relationship("Member", backref="events_as_primary_organizer" )
def __repr__(self):
return self.name
class CoastalCleanupZone(db.Model):
__tablename__ = "coastal_cleanup_zones"
id = db.Column(db.Integer,primary_key=True)
event_id = db.Column(db.Integer,db.ForeignKey("events.id"))
zone_name = db.Column(db.String(length=256, convert_unicode=True))
zone_lead1 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_lead2 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_lead3 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_lead4 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_greenlead1 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_greenlead2 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_greenlead3 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_greenlead4 = db.Column(db.Integer,db.ForeignKey("members.id"))
zone_headcount = db.Column(db.Integer,db.ForeignKey("members.id"))
class Worksheet(db.Model):
__tablename__ = 'worksheets'
id = db.Column(db.Integer,primary_key=True)
gdata_resourceId = db.Column(db.String(length=256, convert_unicode=True),nullable=False)
name = db.Column(db.String(length=256, convert_unicode=True))
filename = db.Column(db.String(length=256, convert_unicode=True))
event_id = db.Column(db.Integer,db.ForeignKey("events.id"))
event = db.relationship("Event", backref="worksheets" )
class Registration(db.Model):
__tablename__ = 'registrations'
id = db.Column(db.Integer,primary_key=True)
datetime = db.Column(db.String(length=256, convert_unicode=True))
member_id = db.Column(db.Integer,db.ForeignKey("members.id"),nullable=False)
event_id = db.Column(db.Integer,db.ForeignKey("events.id"),nullable=False)
joining_at = db.Column(db.Unicode(256))
volunteer_for = db.Column(db.String(length=256, convert_unicode=True))
selected = db.Column(db.String(length=60, convert_unicode=True))
payment_status = db.Column(db.String(length=60, convert_unicode=True))
bring_along = db.Column(db.String(length=60, convert_unicode=True))
biketrip_joining_as = db.Column(db.String(length=60, convert_unicode=True))
photographer = db.Column(db.Boolean)
dropout = db.Column(db.Boolean)
triathlon_event = db.Column(db.String(length=60, convert_unicode=True))
swimming_batch = db.Column(db.String(length=256, convert_unicode=True))
prev_experience = db.Column(db.UnicodeText)
final_organizer_comments = db.Column(db.UnicodeText)
member = db.relationship("Member", backref="registrations")
event = db.relationship("Event", backref="registrations" )
if __name__ == '__main__':
db.create_all()
|
#!/usr/bin/env python3
"""\
Quickly show test inputs and outputs.
Usage:
show_test_cases.py [<cases>]... [options]
Arguments:
<cases>
The test cases to show. By default, all cases will be shown.
Options:
-l --load
Only show test cases for the load() function.
-d --dump
Only show test cases for the dump() function.
-s --success
Only show test cases that are meant to be successfully loaded/dumped.
-e --error
Only show test cases that are meant to trigger errors
-o --output
Show the expected output for each case. The expected inputs,
respectively, are always shown.
"""
import docopt
from pathlib import Path
from textwrap import indent
ROOT_DIR = Path(__file__).parent
import sys; sys.path.append(str(ROOT_DIR / 'api'))
import nestedtext_official_tests as official
def show_file(input_output, load_dump, is_success, path):
success_error = "success" if is_success else "error"
print(f" {load_dump.title()} {success_error} ({input_output}):")
print(indent(path.read_text(), ' │'))
if __name__ == '__main__':
args = docopt.docopt(__doc__)
cases = official.load_test_cases(args['<cases>'])
for case in cases:
if args['--success'] and not case.is_success_case(): continue
if args['--error'] and not case.is_error_case(): continue
print(case.dir)
if not args['--dump'] and case.is_load_case():
show_file(
'input', 'load',
case.is_load_success(),
case['load']['in']['path'],
)
if args['--output']:
key = 'out' if case.is_load_success() else 'err'
show_file(
'output', 'load',
case.is_load_success(),
case['load'][key]['path'],
)
if not args['--load'] and case.is_dump_case():
show_file(
'input', 'dump',
case.is_dump_success(),
case['dump']['in']['path'],
)
if args['--output']:
key = 'out' if case.is_dump_success() else 'err'
show_file(
'output', 'dump',
case.is_dump_success(),
case['dump'][key]['path'],
)
|
from mailsend import Mailsend
|
import os
import static.train_model as train
# A function to do it
def gender_predictor_mnb(a, cv, clf):
test_name = [a]
vector = cv.transform(test_name).toarray()
if clf.predict(vector) == 0:
return "Female"
else:
return "Male"
def gender_predictor_dt(a, dv, dclf):
# Build Features and Transform them
test_name = [a]
transform_dv =dv.transform(train.features(test_name))
vector = transform_dv.toarray()
if dclf.predict(vector) == 0:
return "Female"
else:
return "Male"
|
#!/usr/bin/env python
# Lists all flavours available
import os
import pyrax
# Credentials
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
cs = pyrax.cloudservers
# Gets flavours
flvs = cs.flavors.list()
# Prints out each flavour
for flv in flvs:
print "Name:", flv.name
print " ID:", flv.id
print " RAM:", flv.ram
print " Disk:", flv.disk
print " VCPUs:", flv.vcpus
print
|
import datetime
import tempfile
import pathlib
### Configuration
GOAL = 4500
### Calculations
data = GC.seasonMetrics()
# Keep only runs. Don't use GC filters cause that makes it HELLA slow
distances = [x for i, x in enumerate(data["Distance"]) if data["Sport"][i] == "Run"]
today_distance = sum(distances)
today = datetime.date.today()
first_day = datetime.date(year=today.year, month=1, day=1)
last_day = datetime.date(year=today.year, month=12, day=31)
days_passed = (today - first_day).days + 1
days_to_go = (last_day - today).days
total_days = (last_day - first_day).days + 1
km_per_day = today_distance / days_passed
last_distance_estimate = km_per_day * total_days
today_goal = GOAL / total_days * days_passed
### Create html file
temp_file = tempfile.NamedTemporaryFile(
mode="w+t", prefix="GC_", suffix=".html", delete=False
)
temp_file.write("<html><body>")
temp_file.write("<p>")
temp_file.write("You have run {:.1f} km so far.".format(today_distance))
temp_file.write("</p>")
temp_file.write("<p>")
temp_file.write("That is an average of {:.1f} km per day.".format(km_per_day))
temp_file.write("</p>")
temp_file.write("<p>")
if today_goal < today_distance:
temp_file.write(
"You are {:.1f} km ahead of your {:d} km goal.".format(
today_distance - today_goal, GOAL
)
)
else:
temp_file.write(
"You are {:.1f} km BEHIND on your {:d} km goal.".format(
today_goal - today_distance, GOAL
)
)
temp_file.write("</p>")
temp_file.write("<p>")
temp_file.write(
"At this rate, you will end up at {:.1f} km.".format(last_distance_estimate)
)
temp_file.write("</p>")
temp_file.write("</body></html>")
temp_file.close()
### Output
print("DEBUG: File saved at", pathlib.Path(temp_file.name).as_uri())
GC.webpage(pathlib.Path(temp_file.name).as_uri())
|
#!/usr/bin/python3
"""Platform for light integration."""
from datetime import timedelta
import logging
# Import the device class from the component that you want to support
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.util import color as colorutil
from .const import ATTRIBUTION, DOMAIN
from .sengledapi.sengledapi import SengledApi
# Add to support quicker update time. Is this to Fast?
SCAN_INTERVAL = timedelta(seconds=30)
ON = "1"
OFF = "0"
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sengled Light platform."""
_LOGGER.debug("""Creating new Sengled light component""")
# Add devices
add_entities(
[
SengledBulb(light)
for light in await hass.data[DOMAIN][
"sengledapi_account"
].discover_devices()
],
True,
)
class SengledBulb(LightEntity):
"""Representation of a Sengled Bulb."""
def __init__(self, light):
"""Initialize a Sengled Bulb."""
self._light = light
self._name = light._friendly_name
self._state = light._state
self._brightness = light._brightness
self._avaliable = light._avaliable
self._device_mac = light._device_mac
self._device_model = light._device_model
self._color_temperature = light._color_temperature
self._color = light._color
self._device_rssi = light._device_rssi
self._rgb_color_r = light._rgb_color_r
self._rgb_color_g = light._rgb_color_g
self._rgb_color_b = light._rgb_color_b
self._alarm_status = light._alarm_status
self._wifi_device = light._wifi_device
self._support_color = light._support_color
self._support_color_temp = light._support_color_temp
self._support_brightness = light._support_brightness
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def unique_id(self):
return self._device_mac
@property
def available(self):
"""Return the connection status of this light"""
return self._avaliable
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
if self._device_model == "E13-N11":
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self._state,
"available": self._avaliable,
"device model": self._device_model,
"rssi": self._device_rssi,
"mac": self._device_mac,
"alarm status ": self._alarm_status,
}
else:
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self._state,
"available": self._avaliable,
"device model": self._device_model,
"rssi": self._device_rssi,
"mac": self._device_mac,
}
@property
def color_temp(self):
"""Return the color_temp of the light."""
if self._color_temperature is None:
return colorutil.color_temperature_kelvin_to_mired(2000)
else:
return colorutil.color_temperature_kelvin_to_mired(self._color_temperature)
@property
def hs_color(self):
"""Return the hs_color of the light."""
if self._wifi_device:
a, b, c = self._color.split(":")
return colorutil.color_RGB_to_hs(int(a), int(b), int(c))
else:
return colorutil.color_RGB_to_hs(
int(self._rgb_color_r), int(self._rgb_color_g), int(self._rgb_color_b)
)
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def supported_features(self):
"""Flags Supported Features"""
features = ""
if self._support_brightness:
features = SUPPORT_BRIGHTNESS
if self._support_color_temp and self._support_brightness:
features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
if (
self._support_brightness
and self._support_color_temp
and self._support_color
):
features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return features
async def async_turn_on(self, **kwargs):
"""Turn on or control the light."""
if (
ATTR_BRIGHTNESS not in kwargs
and ATTR_HS_COLOR not in kwargs
and ATTR_COLOR_TEMP not in kwargs
):
await self._light.async_toggle(ON)
if ATTR_BRIGHTNESS in kwargs:
await self._light.async_set_brightness(kwargs[ATTR_BRIGHTNESS])
if ATTR_HS_COLOR in kwargs:
hs = kwargs.get(ATTR_HS_COLOR)
color = colorutil.color_hs_to_RGB(hs[0], hs[1])
await self._light.async_set_color(color)
if ATTR_COLOR_TEMP in kwargs:
color_temp = colorutil.color_temperature_mired_to_kelvin(
kwargs[ATTR_COLOR_TEMP]
)
await self._light.async_color_temperature(color_temp)
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
await self._light.async_toggle(OFF)
async def async_update(self):
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
await self._light.async_update()
self._state = self._light.is_on()
self._avaliable = self._light._avaliable
self._state = self._light._state
self._brightness = self._light._brightness
self._color_temperature = self._light._color_temperature
self._color = self._light._color
self._rgb_color_r = self._light._rgb_color_r
self._rgb_color_g = self._light._rgb_color_g
self._rgb_color_b = self._light._rgb_color_b
self._device_rssi = self._light._device_rssi
self._alarm_status = self._light._alarm_status
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(DOMAIN, self._device_mac)},
"model": self._device_model,
"manufacturer": "Sengled",
}
|
# Generated by Django 2.1.7 on 2019-06-28 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='hone',
new_name='phone',
),
]
|
import tensorflow as tf
def create_unet(in_shape=[256, 256, 3], out_channels=1, depth=5, training=True, name='UNET'):
'''
Creates a UNET model.
# Params:
in_shape = [batch_size, height, width, channels]
depth = number of downsample blocks
training = whether or not model is training (for b.n. and dropout)
name = name to use for scope of model and all variables
# Returns:
model logits
'''
with tf.name_scope(name):
inputs = tf.placeholder(tf.float32, shape=[None]+in_shape, name='input')
net = inputs
# unet down
down_outputs = []
for i in range(depth):
with tf.name_scope('down_'+str(i)):
# downsample image if not the input
if i > 0:
net = tf.contrib.layers.max_pool2d(net, kernel_size=(3, 3), padding='SAME')
# conv block 1
net = tf.contrib.layers.conv2d(net,num_outputs=32*(2**i), kernel_size=(3, 3), activation_fn=None)
net = tf.nn.elu(net, name='act_1')
net = tf.contrib.layers.batch_norm(net, is_training=training)
# conv block 2
net = tf.contrib.layers.conv2d(net,num_outputs=32*(2**i), kernel_size=(3, 3), activation_fn=None)
net = tf.nn.elu(net, name='act_2')
net = tf.contrib.layers.batch_norm(net, is_training=training)
# save the output
down_outputs.append(net)
# start up with the last output
net = down_outputs.pop()
# unet up
for i in range(depth-1):
with tf.name_scope('up_'+str(i)):
# upsample the ouput
bat, h, w, chan = net.get_shape().as_list()
net = tf.contrib.layers.conv2d_transpose(net, chan, (2, 2), stride=(2, 2), padding='same')
# tf.keras.layers.UpSampling2D(net, size=(2, 2))
# concatenate and dropout
net = tf.concat([net, down_outputs.pop()], axis=-1, name='concat')
net = tf.contrib.layers.dropout(net, keep_prob=0.9, is_training=False)
# conv block 1
net = tf.contrib.layers.conv2d(net,num_outputs=32*(2**(depth-(i+2))), kernel_size=(3, 3), activation_fn=None)
net = tf.nn.elu(net, name='act_1')
# conv block 2
net = tf.contrib.layers.conv2d(net,num_outputs=32*(2**(depth-(i+2))), kernel_size=(3, 3), activation_fn=None)
net = tf.nn.elu(net, name='act_2')
# final output layer, out_channels classes
net = tf.contrib.layers.conv2d(net, num_outputs=out_channels, kernel_size=(1, 1), activation_fn=None)
# net = tf.nn.sigmoid(net)
return inputs, net
if __name__ == '__main__':
# test the compilation of the model
net = create_unet()
|
import unittest
import numpy.testing as testing
import numpy as np
import tempfile
import shutil
import os
import healsparse
from healsparse.fits_shim import HealSparseFits
class FitsShimTestCase(unittest.TestCase):
def test_read_header(self):
"""
Test reading a fits header
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
filename = os.path.join(self.test_dir, 'test_array.fits')
data0 = np.zeros(10, dtype=np.int32)
data1 = np.zeros(10, dtype=np.float64)
header = healsparse.fits_shim._make_header({'AA': 0,
'BB': 1.0,
'CC': 'test'})
self.write_testfile(filename, data0, data1, header)
with HealSparseFits(filename) as fits:
exts = [0, 1, 'COV', 'SPARSE']
for ext in exts:
header_test = fits.read_ext_header(ext)
for key in header:
self.assertEqual(header_test[key], header[key])
def test_read_dtype(self):
"""
Test reading a dtype
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
header = None
for t in ['array', 'recarray', 'widemask']:
filename = os.path.join(self.test_dir, 'test_%s.fits' % (t))
data0 = np.zeros(10, dtype=np.int32)
if t == 'array':
dtype = np.dtype(np.float64)
elif t == 'recarray':
dtype = np.dtype([('a', 'f4'),
('b', 'i4')])
else:
dtype = np.dtype(healsparse.WIDE_MASK)
data1 = np.zeros(10, dtype=dtype)
self.write_testfile(filename, data0, data1, header)
with HealSparseFits(filename) as fits:
exts = [1, 'SPARSE']
for ext in exts:
dtype_test = fits.get_ext_dtype(ext)
if t == 'recarray':
# We need to allow for byte-order swapping
var_in = np.zeros(1, dtype=dtype)
var_out = np.zeros(1, dtype=dtype_test)
self.assertEqual(len(dtype_test), len(dtype))
for n in dtype.names:
self.assertEqual(var_in[n][0].__class__,
var_out[n][0].__class__)
else:
self.assertEqual(dtype_test, dtype)
def test_read_data(self):
"""
Test reading data
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
header = None
for t in ['array', 'recarray', 'widemask']:
filename = os.path.join(self.test_dir, 'test_%s.fits' % (t))
data0 = np.ones(10, dtype=np.int32)
if t == 'array':
dtype = np.dtype(np.float64)
elif t == 'recarray':
dtype = np.dtype([('a', 'f4'),
('b', 'i4')])
else:
dtype = np.dtype(healsparse.WIDE_MASK)
data1 = np.ones(10, dtype=dtype)
self.write_testfile(filename, data0, data1, header)
with HealSparseFits(filename) as fits:
exts = [0, 'COV', 1, 'SPARSE']
for ext in exts:
data_test = fits.read_ext_data(ext)
data_sub = fits.read_ext_data(ext, row_range=[2, 5])
if ext == 0 or ext == 'COV':
testing.assert_array_almost_equal(data_test, data0)
testing.assert_array_almost_equal(data_sub, data0[2: 5])
else:
if t == 'recarray':
for n in dtype.names:
testing.assert_array_almost_equal(data_test[n], data1[n])
testing.assert_array_almost_equal(data_sub[n], data1[n][2: 5])
else:
testing.assert_array_almost_equal(data_test, data1)
testing.assert_array_almost_equal(data_sub, data1[2: 5])
def test_is_image(self):
"""
Test if an extension is an image.
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
filename = os.path.join(self.test_dir, 'test_array.fits')
data0 = np.zeros(10, dtype=np.int32)
data1 = np.zeros(10, dtype=[('a', 'f8'), ('b', 'i4')])
header = None
self.write_testfile(filename, data0, data1, header)
with HealSparseFits(filename) as fits:
exts = [0, 1, 'COV', 'SPARSE']
is_images = [True, False, True, False]
for i, ext in enumerate(exts):
self.assertEqual(fits.ext_is_image(ext), is_images[i])
def test_append(self):
"""
Test if we can append
"""
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
header = None
if healsparse.fits_shim.use_fitsio:
# Test appending to image and recarray
for t in ['array', 'recarray', 'widemask']:
filename = os.path.join(self.test_dir, 'test_%s.fits' % (t))
data0 = np.zeros(10, dtype=np.int32)
if t == 'array':
dtype = np.float64
elif t == 'recarray':
dtype = np.dtype([('a', 'f4'),
('b', 'i4')])
else:
dtype = healsparse.WIDE_MASK
data1 = np.zeros(10, dtype=dtype)
self.write_testfile(filename, data0, data1, header)
with HealSparseFits(filename, mode='rw') as fits:
extra_data1 = np.ones(10, dtype=dtype)
fits.append_extension(1, extra_data1)
full_data = np.append(data1, extra_data1)
with HealSparseFits(filename) as fits:
data_test = fits.read_ext_data(1)
if t == 'recarray':
for n in dtype.names:
testing.assert_array_almost_equal(data_test[n], full_data[n])
else:
testing.assert_array_almost_equal(data_test, full_data)
else:
# Test that we get an error with astropy.io.fits
filename = os.path.join(self.test_dir, 'test_array.fits')
data0 = np.zeros(10, dtype=np.int32)
data1 = np.zeros(10, dtype=np.float64)
self.write_testfile(filename, data0, data1, header)
self.assertRaises(RuntimeError, HealSparseFits, filename, mode='rw')
def write_testfile(self, filename, data0, data1, header):
"""
Write a testfile, using astropy.io.fits only. This is in place
until we get full compression support working in both.
"""
_header = healsparse.fits_shim._make_header(header)
_header['EXTNAME'] = 'COV'
healsparse.fits_shim.fits.writeto(filename, data0,
header=_header)
_header['EXTNAME'] = 'SPARSE'
healsparse.fits_shim.fits.append(filename, data1,
header=_header, overwrite=False)
def write_testfile_unused(self, filename, data0, data1, header):
"""
Write a testfile.
"""
if healsparse.fits_shim.use_fitsio:
healsparse.fits_shim.fitsio.write(filename, data0,
header=header, extname='COV')
healsparse.fits_shim.fitsio.write(filename, data1,
header=header, extname='SPARSE')
else:
_header = healsparse.fits_shim._make_header(header)
_header['EXTNAME'] = 'COV'
healsparse.fits_shim.fits.writeto(filename, data0,
header=_header)
_header['EXTNAME'] = 'SPARSE'
healsparse.fits_shim.fits.append(filename, data1,
header=_header, overwrite=False)
def setUp(self):
self.test_dir = None
def tearDown(self):
if self.test_dir is not None:
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir, True)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.0.8 on 2020-08-09 16:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gallery', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=100)),
('image', models.FileField(default='', upload_to='')),
('description', models.CharField(max_length=200)),
('datetime', models.DateTimeField(default='')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='my_images', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=500)),
('image', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='image_category', to='gallery.Image')),
],
),
]
|
from flask import Flask, jsonify, request
import sqlalchemy as db
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
import dotenv
from flask_jwt_extended import JWTManager
from .config import Config
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec import APISpec
from flask_apispec.extension import FlaskApiSpec
import logging
from celery import Celery
dotenv.load_dotenv()
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
app.config.from_object(Config)
client = app.test_client()
engine = Config.SQLALCHEMY_DATABASE_URI
# engine = create_engine('sqlite:///db.sqlite')
session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
Base = declarative_base()
Base.query = session.query_property()
jwt = JWTManager()
docs = FlaskApiSpec()
app.config.update(
{
"APISPEC_SPEC": APISpec(
title="videoblog",
version="v1",
openapi_version="2.0",
plugins=[MarshmallowPlugin()],
),
"APISPEC_SWAGGER_URL": "/swagger/",
}
)
from .models import *
Base.metadata.create_all(bind=engine)
def setup_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(levelname)s:%(message)s")
file_handler = logging.FileHandler("log/api.log")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
logger = setup_logger()
celery = Celery(
__name__, broker="amqp://async_python:12345@localhost:5672", backend="rpc://"
)
celery.conf.task_routes = {"pipeline.*": {"queue": "pipeline"}}
@app.teardown_appcontext
def shutdown_session(exception=None):
session.remove()
from .main.views import videos
from .users.views import users
app.register_blueprint(videos)
app.register_blueprint(users)
docs.init_app(app)
jwt.init_app(app)
|
from ABC.Instruction import Instruction
from ABC.NodeAST import NodeAST
from ST.Type import getTypeString
class Primitive(Instruction):
def __init__(self, type, value, line, column):
self.type = type
self.value = value
self.line = line
self.column = column
self.array = False
self.arrayDim = 0
self.arraySize = 0
self.ReportSymbol = None
def interpreter(self, tree, table):
return self.value
def getNode(self):
node = NodeAST("PRIMITIVO")
node.addValueChild(getTypeString(self.type))
node.addValueChild(str(self.value))
return node
|
'''
This is a tic tac toe game for 2 players
player 1 = X
player 2 = O
type start() to start the game
The game splits the board into 9 squares, and players enter which square they'd like to mark next
1 | 2 | 3
---------
4 | 5 | 6
---------
7 | 8 | 9
'''
board = dict.fromkeys([1, 2, 3, 4, 5, 6, 7, 8, 9], ' ')
def print_board():
print('| ' + board[7] + ' | ' + board[8] + ' | ' + board[9] + ' |')
print('-------------')
print('| ' + board[4] + ' | ' + board[5] + ' | ' + board[6] + ' |')
print('-------------')
print('| ' + board[1] + ' | ' + board[2] + ' | ' + board[3] + ' |')
print('-------------')
def start():
print("*****The game splits the board into 9 squares, and players enter which square they'd like to mark next. "
"\nFirst player to go is X and second player is O"
"\n7 | 8 | 9"
"\n---------"
"\n4 | 5 | 6"
"\n---------"
"\n1 | 2 | 3*****")
print('')
print('***GAME START***')
for moves in range(1, 10):
win = False
player_input = int(input('Please enter your next move: '))
if player_input > 9 or player_input < 0:
#restarting the game if player enters invalid numbers
print('Please enter a number between 1 and 9. Restarting the game...')
restart()
else:
if moves % 2 != 0:
board[player_input] = 'X'
print_board()
else:
board[player_input] = 'O'
print_board()
if moves >= 5:
winning = [[board[1], board[2], board[3]], [board[4], board[5], board[6]], [board[7], board[8], board[9]],
[board[1], board[4], board[7]], [board[2], board[5], board[8]], [board[3], board[6], board[9]],
[board[1], board[5], board[9]], [board[3], board[5], board[7]]]
for items in winning:
if set(items) == {'X'}:
print('Player X won')
win = True
break
elif set(items) == {'O'}:
print('Player O won')
win = True
break
if win:
break
else:
print('It is a tie!')
play_again = input("Want to play again? (Y/N): ")
if play_again == "y" or play_again == "Y":
restart()
#restart function clears the board and runs the start() function again
def restart():
global board
board = dict.fromkeys([1, 2, 3, 4, 5, 6, 7, 8, 9], ' ')
start()
start()
|
import os
import random
def load_dataset(path_dataset):
"""Load dataset into memory from text file"""
dataset = []
with open(path_dataset) as f:
words, tags = [], []
# Each line of the file corresponds to one word and tag
for line in f:
if line != '\n':
line = line.strip('\n')
if len(line.split()) > 1:
word = line.split()[0]
tag = line.split()[-1]
else:
continue
try:
if len(word) > 0 and len(tag) > 0:
word, tag = str(word), str(tag)
words.append(word)
tags.append(tag)
except Exception as e:
print("An exception was raised, skipping a word: {}".format(e))
else:
if len(words) > 0:
assert len(words) == len(tags)
dataset.append((words, tags))
words, tags = [], []
return dataset
def save_dataset(dataset, save_dir):
"""Write sentences.txt and tags.txt files in save_dir from dataset
Args:
dataset: ([(["a", "cat"], ["O", "O"]), ...])
save_dir: (string)
"""
# Create directory if it doesn't exist
print('Saving in {}...'.format(save_dir))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Export the dataset
with open(os.path.join(save_dir, 'sentences.txt'), 'w') as file_sentences, \
open(os.path.join(save_dir, 'tags.txt'), 'w') as file_tags:
for words, tags in dataset:
file_sentences.write('{}\n'.format(' '.join(words)))
file_tags.write('{}\n'.format(' '.join(tags)))
print('- done.')
def build_tags(data_dir, tags_file):
"""Build tags from dataset"""
data_types = ['train', 'val', 'test']
tags = set()
for data_type in data_types:
tags_path = os.path.join(data_dir, data_type, 'tags.txt')
with open(tags_path, 'r') as file:
for line in file:
tag_seq = filter(len, line.strip().split(' '))
tags.update(list(tag_seq))
tags = sorted(tags)
with open(tags_file, 'w') as file:
file.write('\n'.join(tags))
return tags
if __name__ == "__main__":
path_train = "data/train_bio"
path_test = "data/test_bio"
msg = "{} or {} file not found. Make sure you have downloaded the right dataset"\
.format(path_train, path_test)
assert os.path.isfile(path_train) and os.path.isfile(path_test), msg
# Load the dataset into memory
print('Loading MSRA dataset into memory...')
dataset_train_val = load_dataset(path_train)
dataset_test = load_dataset(path_test)
print('- done.')
# Make a list that decides the order in which we go over the data
order = list(range(len(dataset_train_val)))
random.seed(2020)
random.shuffle(order)
# Split the dataset into train, val(split with shuffle) and test
total_train_len = len(dataset_train_val)
split_val_len = int(total_train_len * 0.05)
val_dataset = [dataset_train_val[idx] for idx in order[:split_val_len]]
train_dataset = [dataset_train_val[idx] for idx in order[split_val_len:]]
test_dataset = dataset_test
save_dataset(train_dataset, 'data/train')
save_dataset(val_dataset, 'data/val')
save_dataset(test_dataset, 'data/test')
# Build tags from dataset
build_tags('data', 'data/tags.txt')
|
# Make School OOP Coding Challenge Python Problem 7
import sys
# Zookeeper class contains a name instance variable
class Zookeeper(object):
# Initializer
def __init__(self, name):
self.name = name
# Takes a list of animals and food to feed prints out message and
# feeds and puts to sleep all the animals
def feedAnimals(self, animals, food):
print "%s is feeding %s to %s animals" % (self.name, food, len(animals))
# Iterate over animals list
for animal in animals:
animal.eat(food)
animal.sleep()
# Animal class
class Animal(object):
# Initializer
def __init__(self, name, food):
self.name = name
self.favoriteFood = food
# Sleep
def sleep(self):
print "%s sleeps for 8 hours" % self.name
# Eat
def eat(self, food):
print "%s eats %s" % (self.name, food)
if self.favoriteFood == food:
print "YUM! %s wants more %s" % (self.name, food)
# Tiger class
class Tiger(Animal):
def __init__(self, name):
super(Tiger, self).__init__(name, "meat")
# Bear class
class Bear(Animal):
def __init__(self, name):
super(Bear, self).__init__(name, "fish")
def sleep(self):
print "%s hibernates for 4 months" % self.name
# Unicorn class
class Unicorn(Animal): # implement Initializer and override sleep
def __init__(self, name):
super(Unicorn, self).__init__(name, "marshmallows")
def sleep(self):
print "%s sleeps in a cloud" % self.name
# Giraffe class
class Giraffe(Animal): # implement Initializer and override eat
def __init__(self, name):
super(Giraffe, self).__init__(name, "leaves")
# call the superclass's eat to reuse code.
def eat(self, food):
super(Giraffe, self).eat(food)
if self.favoriteFood != food:
print "YUCK! %s spits out %s" % (self.name, food)
# Bee class
class Bee(Animal): # initializer and override eat and sleep
def __init__(self, name):
super(Bee, self).__init__(name, "pollen")
def eat(self, food):
super(Bee, self).eat(food)
if self.favoriteFood != food:
print "YUCK! %s spits out %s" % (self.name, food)
def sleep(self):
print "%s never sleeps" % self.name
# Tests
def test():
def getline():
# Read line from stdin and strip whitespace
return sys.stdin.readline().strip()
# Get number of animals
animalCount = int(getline())
animals = []
# Iterate through the input for each animal
for count in range(animalCount):
# Get the animal's info
species = getline()
name = getline()
animal = None
# Check the species and create respective object
if species == "tiger":
animal = Tiger(name)
elif species == "bear":
animal = Bear(name)
elif species == "unicorn":
animal = Unicorn(name)
elif species == "giraffe":
animal = Giraffe(name)
elif species == "bee":
animal = Bee(name)
else:
# Create a generic Animal
animal = Animal(name, "kibble")
# Add the animal to the list of animals
animals.append(animal)
# Get the zookeeper's name and food to feed the animals
name = getline()
food = getline()
# Create a Zookeeper object and test instance methods
zookeeper = Zookeeper(name)
zookeeper.feedAnimals(animals, food)
# main
if __name__ == "__main__":
test()
|
# Set(集合)
# 集合(set)是一个无序不重复元素的序列。
# 基本功能是进行成员关系测试和删除重复元素。
# 可以使用大括号 { } 或者 set() 函数创建集合,注意:创建一个空集合必须用 set() 而不是 { },因为 { } 是用来创建一个空字典。
student = {'Tom', 'Jim', 'Mary', 'Tom', 'Jack', 'Rose'}
print(student) # 输出集合,重复的元素被自动去掉
if('Rose' in student) :
print("Rose 在集合中")
else :
print("Rose 不在集合中")
# set可以进行集合运算
a = set('abracadabra')
b = set('alacazam')
print(a)
print(b)
print(a - b) # a和b的差集
print(a | b) # a和b的并集
print(a & b) # a和b的交集
print(a ^ b) # a和b中不同时存在的元素
|
"""
---TASK DETAILS---
--- Day 1: No Time for a Taxicab ---
You're airdropped near Easter Bunny Headquarters in a city somewhere.
"Near", unfortunately, is as close as you can get.
The Document indicates that you should start at the given coordinates (where you just landed) and face North.
Then, follow the provided sequence: either turn left (L) or right (R) 90 degrees.
Then walk forward the given number of blocks, ending at a new intersection.
Given that you can only walk on the street grid of the city, how far is the shortest path to the destination?
For example:
Following R2, L3 leaves you 2 blocks East and 3 blocks North, or 5 blocks away.
R2, R2, R2 leaves you 2 blocks due South of your starting position, which is 2 blocks away.
R5, L5, R5, R3 leaves you 12 blocks away.
How many blocks away is Easter Bunny HQ?
--- Part Two ---
Then, you notice the instructions continue on the back of the Recruiting Document.
Easter Bunny HQ is actually at the first location you visit twice.
For example, if your instructions are R8, R4, R4, R8, the first location you visit twice is 4 blocks away, due East.
How many blocks away is the first location you visit twice?
"""
x = 0
y = 0
facing = 0
visited = []
firstDuplicateDistance = ""
def move(a):
global x
global y
a = int(a)
if facing == 0:
x += a
elif facing == 1:
y += a
elif facing == 2:
x -= a
else:
y -= a
def changeDir(dir):
global facing
j = directArr[i]
if j[0] == "L":
facing += 1
elif j[0] == "R":
facing -= 1
facing %= 4
move(j[1:])
directions = input("Enter directions: ")
directArr = directions.split(', ')
# Part 1
for i in range(len(directArr)):
changeDir(i)
print("Part 1 = " + str((abs(x)) + (abs(y))))
# # Part 2
# for i in visited:
# if (visited[i] == (str(x) + ":" + str(y))) and (firstDuplicateDistance == ""):
# firstDuplicateDistance = (str(abs(x) + abs(y)))
# else:
# visited.append(str(x) + ":" + str(y))
#
# print(firstDuplicateDistance)
|
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import InventoryCreationForm, InventoryEditFromAdmin, ToolDistributionForm, ToolDistributionFromAdmin
from .handler import InventoryManager
from thm.decorators import is_superuser
logger = logging.getLogger(__name__)
# Create your views here.
@login_required
@is_superuser
def createInventory(request):
"""View to create inventory"""
user = request.user
im = InventoryManager()
if request.method == "GET":
inventory_form = InventoryCreationForm()
return render(request, 'createinventory.html', locals())
elif request.method == "POST":
inventory_form = InventoryCreationForm(request.POST, request.FILES)
if inventory_form.is_valid():
inventory_form.save()
inventories = im.getAllInvetories()
return render(request, 'inventorylist.html', locals())
if inventory_form.errors:
logging.warn("Form has errors, %s", inventory_form.errors)
return render(request, 'createinventory.html', locals())
@login_required
@is_superuser
def viewInventory(request, inventory_id=None):
"""View to show the invetories
"""
user = request.user
im = InventoryManager()
inventory_edit=False
if request.method == "POST" and user.is_superuser:
inventory = im.getInventoryDetails(inventory_id)
inventory_form = InventoryEditFromAdmin(request.POST, request.FILES, instance=inventory)
if inventory_form.is_valid():
inventory_form.save()
inventories = im.getAllInvetories()
return render(request, 'inventorylist.html', locals())
elif inventory_id is not None and user.is_superuser:
inventory = im.getInventoryDetails(inventory_id)
inventory_form = InventoryEditFromAdmin(instance=inventory)
inventory_edit=True
return render(request, 'viewinventory.html', locals())
else:
inventories = im.getAllInvetories()
return render(request, 'inventorylist.html', locals())
@login_required
@is_superuser
def distributeTool(request, tool_id=None):
"""View to show the Tool Distribution
"""
user = request.user
im = InventoryManager()
distribution_edit = False
if request.method == "POST" and user.is_superuser:
tool_distribution_form = ToolDistributionForm(request.POST)
if tool_distribution_form.is_valid():
tool_distribution = tool_distribution_form.save(commit=False)
tool_distribution.save()
tools_selected = request.POST.getlist("tools")
for tool_id in tools_selected:
inventory = im.getInventoryDetails(inventory_id=tool_id)
tool_distribution.tools.add(inventory)
# tool_distribution.save()
tool_distribution_form.save_m2m()
tools = im.getAllTools()
return render(request, 'distributionlist.html', locals())
if tool_distribution_form.errors:
logging.warn("Form has errors, %s", tool_distribution_form.errors)
else:
tool_distribution_form = ToolDistributionForm()
return render(request, 'toolsdistribution.html', locals())
@login_required
@is_superuser
def viewToolDistribution(request, tool_id=None):
"""View to show the tools distributed
"""
user = request.user
im = InventoryManager()
tools_edit=False
if request.method == "POST" and user.is_superuser:
tool = im.getToolDetails(tool_id)
tool_distribution_form = ToolDistributionFromAdmin(request.POST, instance=tool)
if tool_distribution_form.is_valid():
tool_distribution = tool_distribution_form.save(commit=False)
tool_distribution.save()
tools_selected = request.POST.getlist("tools")
for tool_id in tools_selected:
inventory = im.getInventoryDetails(inventory_id=tool_id)
tool_distribution.tools.add(inventory)
tool_distribution_form.save_m2m()
tools = im.getAllTools()
render(request, 'distributionlist.html', locals())
elif tool_id is not None and user.is_superuser:
tool = im.getToolDetails(tool_id)
tool_distribution_form = ToolDistributionFromAdmin(instance=tool)
tools = im.getAllTools(tool_handyman=tool)
tool_distributed=True
return render(request, 'viewToolDistribution.html', locals())
else:
tools = im.getAllTools()
return render(request, 'distributionlist.html', locals())
|
#!/usr/bin/env python
from experiment.artifacts import ExperimentArtifacts
from experiment.experiment import Experiment
from experiment.model import ProjectModel
from utils.arg_parser import TrainArgParser
from utils.logger import logger
if __name__ == '__main__':
logger.info(f"Begin train.py")
arg_parser = TrainArgParser()
args = arg_parser.get_arguments()
run_tag = f"{args.project_name}-{args.run_tag}"
dataset_path = args.input_dir
model_name = dataset_path.stem
model = ProjectModel()
artifacts_handler = ExperimentArtifacts(
run_tag=run_tag,
model_name=model_name,
base_path=args.output_dir,
)
experiment = Experiment(
model=model,
input_dir=dataset_path,
artifacts_handler=artifacts_handler,
)
experiment.run()
logger.info(f"End train.py")
|
# -*- python -*-
# Create an instance of the Dragon, have it:
# - walk() three times,
# - run() twice,
# - fly() twice,
# - displayHealth().
# When the Dragon's displayHealth() function is called, it should say 'this is a dragon!' before it displays the default information.
# You can achieve this by calling the parent's displayHealth() function.
import Animal
class Dragon(Animal.Animal):
def __init__( self, name ):
super( Dragon, self ).__init__( name )
self.health = 170
def fly( self ):
self.health -= 10
def displayHealth( self ):
print "This is a dragon!",
super( Dragon, self ).displayHealth()
|
"""JOB_PORTAL URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path,include
from django.conf import settings
#for recieving image
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('', views.index,name="index"),
path('reqruiter_login/', views.reqruiter_login,name="reqruiter_login"),
path('user_login/', views.user_login,name="user_login"),
path('user_signup/', views.user_signup,name="user_signup"),
path('reqruiter_signup/', views.reqruiter_signup,name="reqruiter_signup"),
path('dashboard/', views.dashboard,name="dashboard"),
path('Rdashboard/', views.Rdashboard,name="Rdashboard"),
path('add_job/', views.add_job,name="add_job"),
path('job_list/', views.job_list,name="job_list"),
path('edit_job/<int:pid>/', views.edit_job,name="edit_job"),
path('edit_studentprofile/<int:pid>/', views.edit_studentprofile,name="edit_studentprofile"),
path('edit_reqruiterprofile/<int:pid>/', views.edit_reqruiterprofile,name="edit_reqruiterprofile"),
path('delete_job/<int:pid>/', views.delete_job,name="delete_job"),
path('delete_candidate/<int:pid>/', views.delete_candidate,name="delete_candidate"),
path('delete_myjob/<int:pid>/', views.delete_myjob,name="delete_myjob"),
path('user_logout/', views.user_logout,name="user_logout"),
path('reqruiter_logout/', views.reqruiter_logout,name="reqruiter_logout"),
path('change_password/', views.change_password,name="change_password"),
path('change_password2/', views.change_password2,name="change_password2"),
path('latest_job/', views.latest_job,name="latest_job"),
# path('latest_job/', latest_job,name="latest_job"),
path('apply/<pid>/', views.apply,name="apply"),
path('view_job/', views.view_job,name="view_job"),
path('candidate_apply/', views.candidate_apply,name="candidate_apply"),
path('contact/', views.contact,name="contact"),
path('searchk/', views.searchk,name="searchk"),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)#for image recieving
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import xgboost as xgb
data = pd.read_csv('Data/train.csv', sep=',', header=0)
GRADES = ['Ex', 'Gd', 'TA', 'Fa', 'Po']
data['LotFrontage'].fillna(0, inplace=True)
data['LotShape'] = np.where(data['LotShape'] == 'Reg', 1, 0)
data['CentralAir'] = np.where(data['CentralAir'] == 'Y', 1, 0)
data['GarageCars'] = np.where(data['GarageCars'] > 2, 1, 0)
data['TotRmsAbvGrd'] = np.where(data['TotRmsAbvGrd'] > 6, 1, 0)
data['Electrical'] = np.where(data['Electrical'] == 'SBrkr', 1, 0)
neighborhoods = ['Crawfor', 'ClearCr', 'Somerst', 'Veenker', 'Timber', 'StoneBr', 'NridgHt', 'NoRidge']
data['Neighborhood'] = np.where(data['Neighborhood'].isin(neighborhoods), 1, 0)
data['MasVnrArea'].fillna(0, inplace=True)
data['BldgType'] = np.where(data['BldgType'] == '1Fam', 1, 0)
data['HouseStyle'] = np.where((data['HouseStyle'] == '2.5Fin') | (data['HouseStyle'] == '2Story'), 1, 0)
data['HalfBath'] = np.where(data['HalfBath'] == 1, 1, 0)
data['FullBath'] = np.where(data['FullBath'] > 1, 1, 0)
data['LandSlope'] = np.where((data['LandSlope'] == 'Sev') | (data['LandSlope'] == 'Mod'), 1, 0)
data['1stFlrSF'] = np.log(data['1stFlrSF'])
data['2ndFlrSF'] = np.log1p(data['2ndFlrSF'])
data['ExterQual'] = np.where(data['ExterQual'].isin(GRADES[0:2]), 1, 0)
data['KitchenQual'] = np.where(data['KitchenQual'].isin(GRADES[0:2]), 1, 0)
data['BsmtQual'] = np.where(data['BsmtQual'].isin(GRADES[0:1]), 1, 0)
data['BsmtFullBath'] = np.where(data['BsmtFullBath'] > 0, 1, 0)
data['BedroomAbvGr'] = np.where(data['BedroomAbvGr'] > 1, 1, 0)
data['Foundation'] = np.where(data['Foundation'] == 'PConc', 1, 0)
data['YearBuilt'] = np.log(data['YearBuilt'])
data['BsmtFinType1'] = np.where(data['BsmtFinType1'] == 'GLQ', 1, 0)
data['HeatingQC'] = np.where(data['HeatingQC'].isin(GRADES[0:1]), 1, 0)
data['GarageQual'] = np.where(data['GarageQual'].isin(GRADES[0:3]), 1, 0)
config = ['CulDSac', 'FR3']
data['LotConfig'] = np.where(data['LotConfig'].isin(config), 1, 0)
data['BsmtFinSF1'] = np.log1p(data['BsmtFinSF1'])
data['LandContour'] = np.where(data['LandContour'] == 'HLS', 1, 0)
data['SaleCondition'] = np.where((data['SaleCondition'] == 'Partial') | (data['SaleCondition'] == 'Normal'), 1, 0)
data['Functional'] = np.where(data['Functional'] == 'Typ', 1, 0)
conditions = ['PosA', 'PosN', 'RRNn', 'RRNe']
data['Condition1'] = np.where(data['Condition1'].isin(conditions), 1, 0)
sale_types = ['New', 'Con', 'CWD', 'ConLI']
data['SaleType'] = np.where(data['SaleType'].isin(sale_types), 1, 0)
zonnings = ['FV', 'RL']
data['MSZoning'] = np.where(data['MSZoning'].isin(zonnings), 1, 0)
classes = [60, 120, 75, 20]
data['MSSubClass'] = np.where(data['MSSubClass'].isin(classes), 1, 0)
X = data[['LotArea', 'OverallQual', 'Fireplaces', 'OpenPorchSF', 'LotShape', 'CentralAir', 'GarageCars',
'GrLivArea', 'Electrical', 'LotFrontage', 'Neighborhood', 'MasVnrArea', 'OverallCond', 'BldgType',
'HouseStyle', 'HalfBath', 'FullBath', 'LandSlope', '1stFlrSF', '2ndFlrSF', 'ExterQual', 'KitchenQual',
'BsmtQual', 'BsmtFullBath', 'BedroomAbvGr', 'Foundation', 'YearBuilt', 'BsmtFinType1', 'HeatingQC',
'GarageQual', 'LotConfig', 'BsmtFinSF1', 'LandContour', 'SaleCondition', 'Functional', 'Condition1',
'SaleType', 'MSZoning', 'MSSubClass']]
y = data['SalePrice'].values
print(X.head())
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.2,
random_state=21
)
mdl = LinearRegression(fit_intercept=True)
mdl = mdl.fit(X_train, y_train)
test_prediction = mdl.predict(X_test)
train_prediction = mdl.predict(X_train)
print('Error on a train set: ', np.sqrt(mean_squared_error(np.log(y_train), np.log(train_prediction))))
print('Error on a test set: ', np.sqrt(mean_squared_error(np.log(y_test), np.log(test_prediction))))
train_errors = np.abs(y_train - train_prediction)
percentile99 = np.percentile(train_errors, 99)
is_big_error = np.where(train_errors > percentile99, 1, 0)
# delete outliers
inline_mask = train_errors <= percentile99
X_train, y_train = X_train[inline_mask], y_train[inline_mask]
# X_train.to_csv('X_train.csv')
# y_train.to_csv('y_train.csv')
test_errors = np.abs(y_test - test_prediction)
percentile99 = np.percentile(test_errors, 99)
is_big_error = np.where(test_errors > percentile99, 1, 0)
inline_mask = test_errors <= percentile99
X_test, y_test = X_test[inline_mask], y_test[inline_mask]
mdl = LinearRegression(fit_intercept=True)
mdl = mdl.fit(X_train, y_train)
test_prediction = mdl.predict(X_test)
train_prediction = mdl.predict(X_train)
print('Error on a train set: ', np.sqrt(mean_squared_error(np.log(y_train), np.log(train_prediction))))
print('Error on a test set: ', np.sqrt(mean_squared_error(np.log(y_test), np.log(test_prediction))))
X_train['SalePrice'] = y_train
X_test['SalePrice'] = y_test
# X_train.to_csv('final_train.csv', index=False)
# X_test.to_csv('final_test.csv', index=False)
dtrain = xgb.DMatrix(X_train, y_train)
dtest = xgb.DMatrix(X_test, y_test)
params = {
'objective': 'reg:squarederror',
'learning_rate': 0.24,
'subsample': 0.75,
'colsample_bytree': 0.8,
'colsample_bylevel': 0.85,
'lambda': 0.95,
'gamma': 1.5,
'max_depth': 3,
'min_child_weight': 2,
'eval_metric': 'rmse',
'seed': 21
}
mdl = xgb.train(
params=params,
dtrain=dtrain,
num_boost_round=500,
early_stopping_rounds=20,
evals=[(dtrain, 'Train'), (dtest, 'Test')]
)
cv_results = xgb.cv(
params=params,
dtrain=dtrain,
num_boost_round=500,
early_stopping_rounds=20,
nfold=4,
verbose_eval=True
)
|
print j,g
print i,h
from skimage import img_as_ubyte
print out.min(), imgr.min(), out.max(), img.max(), type(out[0,0]), type(imgr[0,0])
print type(op), op.shape, type(op[0,0])
print op.max(), op.min()
boole= imgr < 188
ac=img_as_ubyte(boole)
plt.imshow(boole,cmap='gray')
print 'lol', ac.shape, type(ac), ac.shape, type(ac[1,1])
io.imsave(r'C:\Users\vbhv\Desktop\abc\Band,,,_thresh_without_sobel.jpg',ac)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" iCEBreaker Platform definitions.
The iCEBreaker Bitsy is a non-core board. To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.icebreaker:IceBreakerBitsyPlatform"
The full size iCEBreaker does not have an explicit USB port. Instead, you'll need to connect a USB breakout.
The full iCEBreaker is an -unsupported- platform! To use it, you'll need to set your LUNA_PLATFORM variable:
> export LUNA_PLATFORM="luna.gateware.platform.icebreaker:IceBreakerPlatform"
"""
import os
import logging
import subprocess
from amaranth import *
from amaranth.build import *
from amaranth.vendor.lattice_ice40 import LatticeICE40Platform
from amaranth_boards.resources import *
from amaranth_boards.icebreaker import ICEBreakerPlatform as _IceBreakerPlatform
from amaranth_boards.icebreaker_bitsy import ICEBreakerBitsyPlatform as _IceBreakerBitsyPlatform
from .core import LUNAPlatform
class IceBreakerDomainGenerator(Elaboratable):
""" Creates clock domains for the iCEBreaker. """
def elaborate(self, platform):
m = Module()
# Create our domains...
m.domains.sync = ClockDomain()
m.domains.usb = ClockDomain()
m.domains.usb_io = ClockDomain()
m.domains.fast = ClockDomain()
# ... ensure our clock is never instantiated with a Global buffer.
platform.lookup(platform.default_clk).attrs['GLOBAL'] = False
# ... create our 48 MHz IO and 12 MHz USB clocks...
clk48 = Signal()
clk12 = Signal()
m.submodules.pll = Instance("SB_PLL40_2F_PAD",
i_PACKAGEPIN = platform.request(platform.default_clk, dir="i"),
i_RESETB = Const(1),
i_BYPASS = Const(0),
o_PLLOUTGLOBALA = clk48,
o_PLLOUTGLOBALB = clk12,
# Create a 48 MHz PLL clock...
p_FEEDBACK_PATH = "SIMPLE",
p_PLLOUT_SELECT_PORTA = "GENCLK",
p_PLLOUT_SELECT_PORTB = "SHIFTREG_0deg",
p_DIVR = 0,
p_DIVF = 63,
p_DIVQ = 4,
p_FILTER_RANGE = 1,
)
# ... and constrain them to their new frequencies.
platform.add_clock_constraint(clk48, 48e6)
platform.add_clock_constraint(clk12, 12e6)
# We'll use our 48MHz clock for everything _except_ the usb domain...
m.d.comb += [
ClockSignal("usb_io") .eq(clk48),
ClockSignal("fast") .eq(clk48),
ClockSignal("sync") .eq(clk48),
ClockSignal("usb") .eq(clk12)
]
return m
class IceBreakerPlatform(_IceBreakerPlatform, LUNAPlatform):
name = "iCEBreaker"
clock_domain_generator = IceBreakerDomainGenerator
default_usb_connection = "usb_pmod_1a"
additional_resources = [
# iCEBreaker official pmod, in 1A and 1B.
DirectUSBResource("usb_pmod_1a", 0, d_p="47", d_n="45", pullup="4",
attrs=Attrs(IO_STANDARD="SB_LVCMOS")),
DirectUSBResource("usb_pmod_1b", 0, d_p="34", d_n="31", pullup="38",
attrs=Attrs(IO_STANDARD="SB_LVCMOS")),
# Other USB layouts.
DirectUSBResource("tnt_usb", 0, d_p="31", d_n="34", pullup="38",
attrs=Attrs(IO_STANDARD="SB_LVCMOS")),
DirectUSBResource("keckmann_usb", 0, d_p="43", d_n="38", pullup="34",
attrs=Attrs(IO_STANDARD="SB_LVCMOS")),
]
def __init__(self, *args, **kwargs):
logging.warning("This platform is not officially supported, and thus not tested. Your results may vary.")
logging.warning("Note also that this platform does not use the iCEBreaker's main USB port!")
logging.warning("You'll need to connect a cable or pmod. See the platform file for more info.")
super().__init__(*args, **kwargs)
self.add_resources(self.additional_resources)
class IceBreakerBitsyPlatform(_IceBreakerBitsyPlatform, LUNAPlatform):
name = "iCEBreaker Bitsy"
clock_domain_generator = IceBreakerDomainGenerator
default_usb_connection = "usb"
def toolchain_program(self, products, name):
dfu_util = os.environ.get("DFU_UTIL", "dfu-util")
with products.extract("{}.bin".format(name)) as bitstream_filename:
subprocess.check_call([dfu_util, "-d", "1209:6146", "-a", "0", "-R", "-D", bitstream_filename])
|
#5 Список стипендиатов
name = ['Войкин Владимир','Разенко Виктория','Ложкина Юлия','Калинина Татьяна', 'Тишина Дарья', 'Самостроенко Алена']
x = [[2,4,3,3],[5,5,4,5],[3,5,4,4],[4,4,4,5],[4,3,4,4],[4,5,4,5]]
w = ['Русский язык','Математика','Физика', 'История']
y = 0
print('Стипендиаты:')
for i in range(len(name)):
k = 0
for j in range(len(x[i])):
if (x[i][j] == 5) or (x[i][j] == 4):
k+=1
if k == 4:
y+=1
print('Кол-во: ', y, 'человек')
#6 Список отстающих
print('Отстают по дисциплине:')
for i in range(len(name)):
k = 0
for j in range(len(x[i])):
if x[i][j] == 2:
k+=1
if k >= 1:
print(name[i])
#4 Список студентов на повышенную стипендию
print('Получают повышенную стипендию: ')
for i in range(len(name)):
k = 0
for j in range(len(x[i])):
if x[i][j] == 5:
k+=1
if k == 4:
print(name[i])
#1 Список группы с оценками
for i in range(len(name)):
summ = 0
for j in range(len(x[i])):
summ += x[i][j]
print(name[i],':',x[i])
#2 Средний балл студентов
for i in range(len(name)):
summ = 0
for j in range(len(x[i])):
summ += x[i][j]
srd = summ/4
print(name[i],':','Средний балл: ',srd)
#3 Средний балл группы по дисциплине
for q in range(len(x[i])):
sr=0
for u in range(len(x)):
sr+=x[u][q]
print(w[q],':',round(sr/6,2))
|
from django.contrib import admin
from . models import ClienteBD
admin.site.register(ClienteBD)
# Register your models here.
|
#!/usr/bin/env python3
# Advent of code Year 2019 Day 4 solution
# Author = seven
# Date = December 2019
with open((__file__.rstrip("code.py") + "input.txt"), 'r') as input_file:
input = input_file.read()
start, end = tuple([int(i) for i in input.split('-')])
def int_to_list(num):
return [int(d) for d in str(num)]
def list_to_int(num_as_list):
return int(''.join(map(str, num_as_list)))
hits = []
n = int_to_list(start)
pos = 0
while True:
# check if end hit
as_int = list_to_int(n)
if as_int > end:
break
# check if valid
prev = n[0]
is_increasing = True
has_double = False
for digit in range(1, 6):
if n[digit] == prev:
has_double = True
if n[digit] < prev:
is_increasing = False
break
prev = n[digit]
if is_increasing and has_double:
hits.append(as_int)
# find pos to increase
if all([9 == digit_to_right for digit_to_right in n[pos:]]):
# increase self and set all to the right to self
n[pos] += 1
for right_digit_pos in n[pos:]:
n[right_digit_pos] = n[pos]
else:
as_int += 1
n = int_to_list(as_int)
print("Part One : " + str(len(hits)))
filtered_hit_num = 0
# Remove larger groups of matching digits
for num_as_int in hits:
n = int_to_list(num_as_int)
count = 1
has_valid_double = False
valid_double = None
prev = n[0]
for digit in range(1, 6):
if n[digit] == prev:
if has_valid_double:
if valid_double == n[digit]:
has_valid_double = False
count += 1
else:
count = 1
elif count < 2 or not valid_double == n[digit]:
has_valid_double = True
valid_double = n[digit]
count = 2
prev = n[digit]
if has_valid_double:
filtered_hit_num += 1
print("Part Two : " + str(filtered_hit_num))
|
from datetime import datetime, timedelta
from os import environ
from pytz import utc
from rq.decorators import job
from qmk_redis import redis
from qmk_storage import list_objects, delete
# Configuration
STORAGE_TIME_HOURS = int(environ.get('S3_STORAGE_TIME', 24))
@job('default', connection=redis)
def cleanup_storage():
storage_time = timedelta(hours=STORAGE_TIME_HOURS)
now = datetime.now(utc)
files = list_objects()
if files:
i = 0
for file in files:
file_age = now - file['LastModified']
if 'qmk_api_tasks_test_compile' in file['Key'] or file_age > storage_time:
i += 1
print('Deleting #%s: %s (Age: %s)' % (i, file['Key'], file_age))
delete(file['Key'])
return True
if __name__ == '__main__':
cleanup_storage()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 11:21
# @Author : Administrator
# @Site :
# @File : collections_overview
# @Software: PyCharm
from collections import *
|
from __future__ import unicode_literals
from codecs import open as codecs_open
from setuptools import setup, find_packages
with codecs_open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(name='tilereduce',
version='0.0.1',
description="Run tile-reduce map jobs in Python ",
long_description=long_description,
classifiers=[],
keywords='',
author="Jacob Wasserman",
author_email='jwasserman@gmail.com',
url='https://github.com/jwass/tilereduce',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'mapbox-vector-tile>=0.1.0',
],
extras_require={
'test': ['pytest'],
},
)
|
#!/usr/bin/python
########################################
# Patch/descriptor extraction utility. #
# #
# Author: ilja.kuzborskij@idiap.ch #
########################################
from argparse import ArgumentParser
from glob import glob
from os.path import splitext, join, basename, exists
from os import makedirs, remove
from common import init_logging, get_logger
import random
import math
from itertools import product
from PIL import Image
import numpy as np
from h5py import File as HDF5File
from h5py import special_dtype
from common import get_desc_name
import CaffeExtractorPlus, NewCaffeExtractor
def get_arguments():
log = get_logger()
parser = ArgumentParser(description='Patch/descriptor extraction utility.')
parser.add_argument("--patches", dest="patches", type=int, default=1000,
help="Number of patches to extract per image.")
parser.add_argument("--patch-size", dest="patch_size", type=int, default=16,
help="Size of the patch.")
parser.add_argument("--image-dim", dest="image_dim", type=int,
help="Size of the largest image dimension.")
parser.add_argument("--levels", dest="levels", type=int, default=3,
help="Number of hierarchical levels to extract patches from. Procedure starts from <patch-size> and divides it by 2 at each level.")
parser.add_argument("--descriptor", dest="descriptor", default='DECAF',
choices=['DECAF'],
help="Type of feature descriptor.")
parser.add_argument("--input-dir", dest="input_dir",
help="Directory with JPEG images.")
parser.add_argument("--output-dir", dest="output_dir",
help="Directory to put HDF5 files to.")
parser.add_argument("--num-train-images", dest="num_train_images", type=int,
help="Number of train images.")
parser.add_argument("--num-test-images", dest="num_test_images", type=int,
help="Number of test images.")
parser.add_argument("--split", dest="split", type=int,
help="Split to extract.")
parser.add_argument("--oversample", dest="oversample", action='store_true',
help="Add patch flipping.")
parser.add_argument("--decaf-oversample", dest="decaf_oversample", action='store_true',
help="Caffe oversampling. Flip X, Y, etc.")
parser.add_argument("--layer-name", dest="layer_name",
help="Decaf layer name.")
parser.add_argument("--network-data-dir", dest="network_data_dir",
help="Directory holding the network weights.")
parser.add_argument("--patch-method", dest="patch_method",
help="What method to use to extract patches.")
args = parser.parse_args()
if not args.input_dir:
log.error('input-dir option is required, but not present.')
exit()
if not args.output_dir:
log.error('output-dir option is required, but not present.')
exit()
if not args.image_dim:
log.error('image-dim option is required, but not present.')
exit()
if not args.num_train_images:
log.error('num_train_images option is required, but not present.')
exit()
if not args.num_test_images:
log.error('num_test_images option is required, but not present.')
exit()
return args
class Dataset:
##################################################################################
# Class responsible for storing descriptors and their metadata to the HDF5 file. #
# The process of storing is incremental by calling append(). #
##################################################################################
def __init__(self, input_dir, output_dir, num_files, patches, feature_type,
patch_dim=128, patch_type='uint8', pos_type='uint16'):
self.log = get_logger()
output_subdir = output_dir
try:
makedirs(output_subdir)
except:
pass
output_filename = join(output_subdir, basename(input_dir.strip('/')) + '.hdf5')
self.log.debug('Saving extracted descriptors to %s', output_filename)
if exists(output_filename):
self.mode = 'appending'
self.log.warn('File "%s" already exists. Trying to continue.', output_filename)
self.hfile = HDF5File(output_filename, 'a', compression='gzip', fillvalue=0.0)
self.patches6 = self.hfile['patches6']
self.patches7 = self.hfile['patches7']
self.positions = self.hfile['positions']
self.image_index = self.hfile['image_index']
self.keys = self.hfile['keys']
self.key_set = set(self.keys)
else:
self.mode = 'creating'
dt = special_dtype(vlen=bytes)
patches += 10 #for safety
self.hfile = HDF5File(output_filename, 'w', compression='gzip', compression_opts=9, fillvalue=0.0)
self.patches6 = self.hfile.create_dataset('patches6', (num_files * patches, patch_dim), dtype=patch_type, chunks=True)
self.patches7 = self.hfile.create_dataset('patches7', (num_files * patches, patch_dim), dtype=patch_type, chunks=True)
self.positions = self.hfile.create_dataset('positions', (num_files * patches, 2), dtype=pos_type, chunks=True)
self.image_index = self.hfile.create_dataset('image_index', (num_files, 2), dtype='uint64') # Start, End positions of an image
self.keys = self.hfile.create_dataset('keys', (num_files, ), dtype=dt)
self.key_set = set()
self.patches6.attrs['cursor'] = 0
self.patches6.attrs['feature_type'] = feature_type
self.patches6.attrs['n_patches'] = patches
self.output_filename = output_filename
def __exit__(self, type, value, traceback):
self.hfile.close()
def __contains__(self, key):
return key in self.key_set
def append(self, key, patches6, patches7, pos):
num_patches = patches6.shape[0]
assert patches6.shape[0]==patches7.shape[0]
num_keys = len(self.key_set)
assert(num_patches == pos.shape[0])
start = self.patches6.attrs['cursor']
end = self.patches6.attrs['cursor'] + num_patches
self.patches6[start:end, :] = patches6
self.patches7[start:end, :] = patches7
self.positions[start:end, :] = pos
self.image_index[num_keys, 0] = start
self.image_index[num_keys, 1] = end
self.keys[num_keys] = key
self.key_set.add(key)
self.patches6.attrs['cursor'] += num_patches
def close(self):
self.hfile.close()
def extract_decaf(input_dir, output_dir, network_data_dir, files, num_patches, patch_size, image_dim, levels, oversample, layer_name, decaf_oversample, extraction_method):
log = get_logger()
BATCH_SIZE = 16
#ex = DecafExtractor.DecafExtractor(layer_name)
#ex = CaffeExtractorPlus.CaffeExtractorPlus(
#network_data_dir + 'hybridCNN_iter_700000_upgraded.caffemodel',
#network_data_dir + 'hybridCNN_deploy_no_relu_upgraded.prototxt',
#network_data_dir + 'hybrid_mean.npy')
ex = NewCaffeExtractor.NewCaffeExtractor()
#import pdb; pdb.set_trace()
ex.set_parameters(patch_size, num_patches, levels, image_dim, BATCH_SIZE)
if oversample:
log.info('Extracting with mirror combinations (X,Y,X-Y,Y-X')
ex.enable_data_augmentation()
ds = Dataset(input_dir, output_dir, len(files),
num_patches * ex.get_number_of_features_per_image(),
'CAFFE', patch_dim=ex.get_descriptor_size(),
patch_type='float32', pos_type='uint16')
for f in files:
if f in ds:
log.info('Skipping <%s>. Already in the dataset.', basename(f))
continue
try:
features = ex.extract_image(f)
except:
features = None
if features is not None and features.cursor > 0:
(patches6, patches7, positions) = features.get()
ds.append(f, patches6, patches7, positions)
if __name__ == '__main__':
init_logging()
log = get_logger()
args = get_arguments()
# Determining image files to extract from
files = [ f for f in glob( join(args.input_dir, '*') )
if splitext(f.lower())[1] in ['.jpg', '.jpeg'] ]
# Determining which function to use for extraction
if args.descriptor == 'DECAF':
extract = extract_decaf
else:
raise Error, 'Only DECAF descriptor is supported.'
if args.split >= 0:
train_ix = (0, args.num_train_images)
test_ix = (args.num_train_images, args.num_train_images + args.num_test_images)
# Compiling directory name that will hold extracted descriptors
tag = get_desc_name(dict(descriptor = args.descriptor,
patches_per_image = args.patches,
patch_size = args.patch_size,
levels = args.levels,
image_dim = args.image_dim,
num_train_images = args.num_train_images,
num_test_images = args.num_test_images,
oversample=args.oversample,
decaf_layer=args.layer_name,
decaf_oversample=args.decaf_oversample
))
# Extracting split
random.shuffle(files)
train_files = files[train_ix[0]:train_ix[1]]
test_files = files[test_ix[0]:test_ix[1]]
train_files = train_files[:args.num_train_images]
test_files = test_files[:args.num_test_images]
# Checking for train/test file overlap
assert(len(set(train_files).intersection(set(test_files))) == 0)
log.info('Extracting from training files...')
output_dirname = join(args.output_dir, 'train', 'split_%d' % args.split)
extract(args.input_dir, output_dirname, args.network_data_dir, train_files, args.patches, args.patch_size, args.image_dim, args.levels, args.oversample, args.layer_name, args.decaf_oversample, args.patch_method)
log.info('Extracting from testing files...')
output_dirname = join(args.output_dir, 'test', 'split_%d' % args.split)
extract(args.input_dir, output_dirname, args.network_data_dir, test_files, args.patches, args.patch_size, args.image_dim, args.levels, args.oversample, args.layer_name, args.decaf_oversample, args.patch_method)
else:
log.info('Extracting all files...')
output_dirname = args.output_dir
extract(args.input_dir, output_dirname, args.network_data_dir, files, args.patches, args.patch_size, args.image_dim, args.levels, args.oversample, args.layer_name, args.decaf_oversample, args.patch_method)
|
#cleaning data in python
#full_stack data analysis
#data analysis is more than just fitting models 1.understand, 2.Tidy/reshape, 3.clean, 4.combine
#steps of data cleaning
#look at your data,Tiday/reshape your data, clean and prepare your data
#data analysis
#the course focuses on cleaning data
#End goal:producing a data set that can be used to fit a model
#common data problems
#inconsistent column names, missing data, outliers, duplicate rows, untidy, need to process data, columns types can signal unexpected data values
import pandas as pd
df = pd.read_csv('E:\csvdhf5xlsxurlallfiles/all_medalists.csv')
print(df)
print(df.head())
print(df.tail())
print(df.head(10))
print(df.tail(10))
print(df.columns)#.shape , and .shape are attributes not methods don't need to follow these with parenthesis
print(df.shape)
print(df.info())
print(df[['Medal']])
print(df.iloc[29213])#pandas series
print(df.iloc[[29213]])#pandas dataframe
#Exploratory data analysis
#frequency counts
print(df['Sport'].value_counts(dropna=False))
print(df['City'].value_counts(dropna=True))
print(df['City'].value_counts(dropna=False).head())
print(df.describe())#describe() only with numeric data
print(df['City'].describe())
#visual Exploratory data analysis
#bar plots used for descrete data counts,histograms for continious data counts
import matplotlib.pyplot as plt
print(df.describe())
print(df.info())
df.Edition.plot('hist')
df.hist('Edition')
plt.show()
print(df.Edition>1980)
print(df[df.Edition>1980])
#Boxplots , visualizebasic summary statics, .outliers, .min/max, 25th, 50th, 75th, percentiles
df.boxplot(column='Edition')
plt.show()
df['Edition'].plot(kind='hist',rot=70, logx=True, logy=True)
plt.plot()
df.boxplot(column='Edition', rot=70)
plt.show()
|
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404, redirect
from annoying.decorators import render_to
from django.contrib.auth.decorators import login_required
from Aluno.views.utils import aluno_exist
from Avaliacao.Questao.models import QuestaoDeAvaliacao
@aluno_exist
@login_required
@render_to('avaliacao/questao/gabarito.html')
def gabaritoQuestao(request,questao_id):
aluno = request.user.aluno_set.get()
questaoAvaliacao = get_object_or_404(QuestaoDeAvaliacao,pk=questao_id)
questao = questaoAvaliacao.questao
#caso o aluno tente acessar o gabarito sem a avaliacao ter terminado ainda
if not questaoAvaliacao.avaliacao.terminada or \
not questaoAvaliacao.avaliacao.aluno.pk == aluno.pk:
return redirect('/')
#so pode ver o gabarito das questoes que ele fez
if not questaoAvaliacao.avaliacao.aluno.pk == aluno.pk:
return redirect('/')
return locals()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
"""
import Explicator.explicator as e
def typemsg(msg):
"""
Fonction qui analyse le type du message, et reconnait si c'est une question ou une exclamation.
Si c'est une question, on renvoi sur explicator.
:param msg: Message envoyé à interceptor, en tolower()
:return: un booléen, vrai si le msg est une question et faux si ce n'est pas reconnu comme une question
"""
# Notre tableau de mot cle qui peuvent etre dans une question
mot_cle = {"pourquoi", "quand", "quoi", "comment", "ça va", "et toi", "qu'est-ce", "où", "informations", "qui"}
if mot_cle.intersection(msg.split(" ")):
e.explication(msg)
return True
return False
|
import json
import unittest
import pyyoutube
import responses
class ApiChannelSectionTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/channel_sections/"
BASE_URL = "https://www.googleapis.com/youtube/v3/channelSections"
with open(BASE_PATH + "channel_sections_by_id.json", "rb") as f:
CHANNEL_SECTIONS_BY_ID = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "channel_sections_by_ids.json", "rb") as f:
CHANNEL_SECTIONS_BY_IDS = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "channel_sections_by_channel.json", "rb") as f:
CHANNEL_SECTIONS_BY_CHANNEL = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(api_key="api key")
def testGetChannelSectionsById(self) -> None:
section_id = "UCa-vrCLQHviTOVnEKDOdetQ.nGzAI5pLbMY"
section_ids = [
"UC_x5XG1OV2P6uZZ5FSM9Ttw.npYvuMz0_es",
"UC_x5XG1OV2P6uZZ5FSM9Ttw.9_wU0qhEPR8",
]
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.CHANNEL_SECTIONS_BY_ID)
m.add("GET", self.BASE_URL, json=self.CHANNEL_SECTIONS_BY_IDS)
section_res = self.api.get_channel_sections_by_id(
section_id=section_id,
)
self.assertEqual(section_res.kind, "youtube#channelSectionListResponse")
self.assertEqual(len(section_res.items), 1)
self.assertEqual(section_res.items[0].id, section_id)
section_multi_res = self.api.get_channel_sections_by_id(
section_id=section_ids, parts=["id", "snippet"], return_json=True
)
self.assertEqual(len(section_multi_res["items"]), 2)
self.assertIn(section_multi_res["items"][1]["id"], section_ids)
def testGetChannelSectionsByChannel(self) -> None:
channel_id = "UCa-vrCLQHviTOVnEKDOdetQ"
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.CHANNEL_SECTIONS_BY_CHANNEL)
section_by_channel = self.api.get_channel_sections_by_channel(
channel_id=channel_id,
)
self.assertEqual(len(section_by_channel.items), 3)
self.assertEqual(
section_by_channel.items[0].id, "UCa-vrCLQHviTOVnEKDOdetQ.jNQXAC9IVRw"
)
section_by_me = self.api.get_channel_sections_by_channel(
mine=True,
return_json=True,
)
self.assertEqual(
section_by_me["items"][2]["id"], "UCa-vrCLQHviTOVnEKDOdetQ.nGzAI5pLbMY"
)
|
import sys
sys.path.insert(0, '/home/graphicsminer/Projects/image-captioning/data-prepare/coco/PythonAPI')
import matplotlib
matplotlib.use('Agg')
from pycocotools.coco import COCO
import matplotlib.image as mpimg
import matplotlib.pylab as plt
import scipy.misc
from LayerProvider import *
from NeuralNet import *
import numpy as np
from Utils import LoadList, LoadH5
from Trainer import Trainer
from MainLoop import *
from PrepareCOCOData import VGG_preprocess
import glob
import scipy
import pdb
from coco_utils import *
def LoadVGG_Attend(net):
data_path = '../../data/pretrained/vgg16.npy'
#data_path = '/home/kien/PycharmProjects/data/vgg16.npy'
data_dict = np.load(data_path).item()
net.layer_opts['updatable'] = False
net.layer_opts['border_mode'] = 1
W = data_dict['conv1_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv1_1'][1]
b = b.reshape(1, 64, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv1_1'] = ConvLayer(net, net.content['input_img'])
net.content['conv1_1'].W.set_value(W)
net.content['conv1_1'].b.set_value(b)
net.content['relu1_1'] = ReLULayer(net, net.content['conv1_1'])
W = data_dict['conv1_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv1_2'][1]
b = b.reshape(1, 64, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv1_2'] = ConvLayer(net, net.content['relu1_1'])
net.content['conv1_2'].W.set_value(W)
net.content['conv1_2'].b.set_value(b)
net.content['relu1_2'] = ReLULayer(net, net.content['conv1_2'])
net.layer_opts['pool_mode'] = 'max'
net.content['pool1'] = Pool2DLayer(net, net.content['relu1_2'])
W = data_dict['conv2_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv2_1'][1]
b = b.reshape(1, 128, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv2_1'] = ConvLayer(net, net.content['pool1'])
net.content['conv2_1'].W.set_value(W)
net.content['conv2_1'].b.set_value(b)
net.content['relu2_1'] = ReLULayer(net, net.content['conv2_1'])
W = data_dict['conv2_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv2_2'][1]
b = b.reshape(1, 128, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv2_2'] = ConvLayer(net, net.content['relu2_1'])
net.content['conv2_2'].W.set_value(W)
net.content['conv2_2'].b.set_value(b)
net.content['relu2_2'] = ReLULayer(net, net.content['conv2_2'])
net.content['pool2'] = Pool2DLayer(net, net.content['relu2_2'])
W = data_dict['conv3_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_1'][1]
b = b.reshape(1, 256, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_1'] = ConvLayer(net, net.content['pool2'])
net.content['conv3_1'].W.set_value(W)
net.content['conv3_1'].b.set_value(b)
net.content['relu3_1'] = ReLULayer(net, net.content['conv3_1'])
W = data_dict['conv3_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_2'][1]
b = b.reshape(1, 256, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_2'] = ConvLayer(net, net.content['relu3_1'])
net.content['conv3_2'].W.set_value(W)
net.content['conv3_2'].b.set_value(b)
net.content['relu3_2'] = ReLULayer(net, net.content['conv3_2'])
W = data_dict['conv3_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv3_3'][1]
b = b.reshape(1, 256, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv3_3'] = ConvLayer(net, net.content['relu3_2'])
net.content['conv3_3'].W.set_value(W)
net.content['conv3_3'].b.set_value(b)
net.content['relu3_3'] = ReLULayer(net, net.content['conv3_3'])
net.content['pool3'] = Pool2DLayer(net, net.content['relu3_3'])
W = data_dict['conv4_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_1'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_1'] = ConvLayer(net, net.content['pool3'])
net.content['conv4_1'].W.set_value(W)
net.content['conv4_1'].b.set_value(b)
net.content['relu4_1'] = ReLULayer(net, net.content['conv4_1'])
W = data_dict['conv4_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_2'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_2'] = ConvLayer(net, net.content['relu4_1'])
net.content['conv4_2'].W.set_value(W)
net.content['conv4_2'].b.set_value(b)
net.content['relu4_2'] = ReLULayer(net, net.content['conv4_2'])
W = data_dict['conv4_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv4_3'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv4_3'] = ConvLayer(net, net.content['relu4_2'])
net.content['conv4_3'].W.set_value(W)
net.content['conv4_3'].b.set_value(b)
net.content['relu4_3'] = ReLULayer(net, net.content['conv4_3'])
#after max pooling for 4th convolution layer -> 14*14*256 image-feature-region with respect to 224*224 input image
net.content['pool4'] = Pool2DLayer(net, net.content['relu4_3'])
W = data_dict['conv5_1'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_1'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_1'] = ConvLayer(net, net.content['pool4'])
net.content['conv5_1'].W.set_value(W)
net.content['conv5_1'].b.set_value(b)
net.content['relu5_1'] = ReLULayer(net, net.content['conv5_1'])
W = data_dict['conv5_2'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_2'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_2'] = ConvLayer(net, net.content['relu5_1'])
net.content['conv5_2'].W.set_value(W)
net.content['conv5_2'].b.set_value(b)
net.content['relu5_2'] = ReLULayer(net, net.content['conv5_2'])
W = data_dict['conv5_3'][0]
W = np.transpose(W, (3, 2, 0, 1))
b = data_dict['conv5_3'][1]
b = b.reshape(1, 512, 1, 1)
net.layer_opts['filter_shape'] = W.shape
net.content['conv5_3'] = ConvLayer(net, net.content['relu5_2'])
net.content['conv5_3'].W.set_value(W)
net.content['conv5_3'].b.set_value(b)
net.content['relu5_3'] = ReLULayer(net, net.content['conv5_3'])
net.content['pool5'] = Pool2DLayer(net, net.content['relu5_3'])
net.layer_opts['num_fc_node'] = 4096
net.content['fc6'] = FCLayer(net, net.content['pool5'], (1, 512, 7, 7))
W = data_dict['fc6'][0]
W = np.reshape(W,(7,7,512,4096))
W = np.transpose(W,(2,0,1,3))
W = np.reshape(W,(7*7*512,4096))
# W = np.transpose(W)
# W = np.reshape(W, (4096, 25088, 1, 1))
b = data_dict['fc6'][1]
b = b.reshape(1,4096)
net.content['fc6'].W.set_value(W)
net.content['fc6'].b.set_value(b)
net.content['fc7'] = FCLayer(net, net.content['fc6'], (1, 4096, 1, 1))
W = data_dict['fc7'][0]
# W = np.transpose(W)
# W = np.reshape(W, (4096, 4096, 1, 1))
b = data_dict['fc7'][1]
b = b.reshape(1,4096)
net.content['fc7'].W.set_value(W)
net.content['fc7'].b.set_value(b)
return net
def train_Attend_224():
trained_path = '../../data/trained_model/'
cap_data_path = "../../data/mscoco/MSCOCO_processed/MSCOCO_224_capdata_train_%d.h5"
img_data_path = "../../data/mscoco/MSCOCO_processed/MSCOCO_224_imgdata_train_%d.h5"
val_cap_data_path = "../../data/mscoco/MSCOCO_processed/MSCOCO_224_capdata_val_%d.h5"
val_img_data_path = "../../data/mscoco/MSCOCO_processed/MSCOCO_224_imgdata_val_%d.h5"
fourth_cv_mv = "../../data/mscoco/MSCOCO_processed/4thconvo_meanvar.dat"
[relu_mean, relu_std] = LoadList(fourth_cv_mv)
relu_mean = theano.shared(relu_mean.astype(theano.config.floatX))
relu_std = theano.shared(relu_std.astype(theano.config.floatX))
# LSTM params
n_word = 1004
max_len = 40
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
#print('Memory: %.2f avail before putting train data to shared' % (memory[0]/1024./1024/1024))
#create net
net = ShowTellNet()
net = LoadVGG_Attend(net)
#net.name = "ShowAttendTellCOCO_Re7e-4"
net.name = "ShowAttendTellBugFind4"
snapshot_list = glob.glob(trained_path + net.name + '*.dat')
num_big_epoch = 5000
big_batch_size = np.asarray([2000], dtype=theano.config.floatX)
if(len(snapshot_list) == 0):
# Trainer params
trainer = Trainer()
trainer.opts['batch_size'] = 20
trainer.opts['save'] = False
trainer.opts['save_freq'] = 10
#trainer.opts['num_sample'] = num_sample
#trainer.opts['num_val_sample'] = num_val_sample
trainer.opts['validation'] = False
trainer.opts['num_epoch'] = 1
trainer.opts['dzdw_norm_thres'] = 1
trainer.opts['dzdb_norm_thres'] = 0.01
#self.opts['update_rule_name'] = 'ADAM'
net.layer_opts['updatable'] = True
# Setting params
net.net_opts['l1_learning_rate'] = np.asarray(0.005, theano.config.floatX)
net.reset_opts['min_lr'] = np.asarray(0.005, dtype=theano.config.floatX)
net.reset_opts['max_lr'] = net.net_opts['l1_learning_rate']
#Constructing LSTM_ATTEND network from image_feature_region step-by-step
# step 1: net.content['pool4'] reshape to (N, 196, 512) tensor - image_feature_region
# step 2: using (N, 196, 512) image_feature_region tensor as input to compute h0, c0 - initial state memory of LSTM_ATTEND
# step 4: construct DeepOutLayer from h_t, z_t output from LSTM_ATTEND layer
# step 5: using DeepOutLayer output to compute output vector (instead of h_t), then negative log likelihood calculated by SoftMaxLogLoss Layer
#pdb.set_trace()
feature_shape = net.content['relu5_3'].output.shape
new_shape = (feature_shape[0], feature_shape[1], T.prod(feature_shape[2:]))
#pdb.set_trace()
net.content['relu5_3_norm'] = NormLayer(net, net.content['relu5_3'], relu_mean, relu_std)
net.content['4th_convol_feature_region'] = ReshapeLayer(net, net.content['relu5_3'], new_shape)
net.layer_opts['num_region'] = 196
net.content['average_feature_region'] = AverageLayer(net, net.content['4th_convol_feature_region'], 2)
net.layer_opts['num_lstm_node'] = 512
input_shape_h0 = (1, 512)
output_shape_h0 = (1 , net.layer_opts['num_lstm_node'])
n_hidden_h0 = 512
net.layer_opts['num_fc_node'] = n_hidden_h0
net.content['h0_hidden_layer'] = FCLayer(net, net.content['average_feature_region'], input_shape_h0, T.tanh)
net.layer_opts['num_fc_node'] = output_shape_h0[1]
hidden_shape = (input_shape_h0[1], n_hidden_h0)
net.content['h0_initial'] = FCLayer(net, net.content['h0_hidden_layer'], hidden_shape)
out_shape = net.content['h0_initial'].output.shape
net.content['h0_initial'].output = net.content['h0_initial'].output.reshape((-1, out_shape[0], out_shape[1]))
net.layer_opts['num_fc_node'] = n_hidden_h0
net.content['c0_hidden_layer'] = FCLayer(net, net.content['average_feature_region'], input_shape_h0, T.tanh)
net.layer_opts['num_fc_node'] = output_shape_h0[1]
net.content['c0_initial'] = FCLayer(net, net.content['c0_hidden_layer'], hidden_shape)
out_shape = net.content['c0_initial'].output.shape
net.content['c0_initial'].output = net.content['c0_initial'].output.reshape((-1, out_shape[0], out_shape[1]))
#Word Embedding Layer
net.layer_opts['num_emb'] = 400
net.content['we'] = WordEmbLayer(net, net.content['input_sen'],
(trainer.opts['batch_size'], max_len-1, n_word, 1))
# we_out = net.content['we'].output.eval({net.input[1]: Y.eval()})
net.layer_opts['num_lstm_node'] = 512 #
net.layer_opts['context_dim'] = 1024
net.layer_opts['num_dimension_feature'] = 512
net.layer_opts['num_region'] = 196
net.content['4th_convol_feature_region'].output = T.transpose(net.content['4th_convol_feature_region'].output, (0,2,1))
#pdb.set_trace()
net.content['lstm_attend'] = LSTM_Attend(net, net.content['we'],
(trainer.opts['batch_size'], max_len - 1, net.layer_opts['num_emb'], 1),
net.content['4th_convol_feature_region'].output,
initial_h0 = net.content['h0_initial'].output, initial_c0 = net.content['c0_initial'].output)
net.layer_opts['num_deep_out_node'] = 512 #300
net.layer_opts["n_word"] = n_word
net.content['deep_out_layer'] = DeepOutputLayer(net, net.content['we'], net.content['lstm_attend'])
net.layer_opts['num_affine_node'] = n_word
#net.content['affine'] = AffineLayer(net, net.content['lstm_attend'],
# (1, max_len - 1, net.layer_opts['num_dimension_feature'], 1))
net.layer_opts['l2_term'] = 0.000014
net.content['l2'] = L2WeightDecay(net, net.content['deep_out_layer'])
net.layer_opts['softmax_norm_dim'] = 2
net.content['smloss'] = SoftmaxLogLoss(net, net.content['deep_out_layer'])
net.content['cost'] = AggregateSumLoss([net.content['l2'], net.content['smloss']])
# SHOW TELL CODE
#net.layer_opts['num_emb'] = 512
#num_cnn_features = 4096
#net.content['dim_swap'] = SwapDim(net, net.content['fc7'], 1, 2)
#net.content['iwe'] = WordEmbLayer(net, net.content['dim_swap'],
# (trainer.opts['batch_size'], 1, num_cnn_features, 1))
#net.content['we'] = WordEmbLayer(net, net.content['input_sen'],
# (trainer.opts['batch_size'], max_len - 1, n_word, 1))
#net.content['cat'] = Concat(net, net.content['iwe'], net.content['we'], 1)
#net.layer_opts['num_lstm_node'] = 1024
#net.content['lstm_attend'] = LSTM(net, net.content['cat'],
# (trainer.opts['batch_size'], max_len - 1, net.layer_opts['num_emb'], 1))
#net.layer_opts['num_affine_node'] = n_word
#net.content['affine'] = AffineLayer(net, net.content['lstm_attend'],
# (trainer.opts['batch_size'],
# max_len - 1,
# net.layer_opts['num_lstm_node'],
# 1))
#net.content['lstm_r'] = LSTMRemove(net, net.content['affine'], 1)
#net.layer_opts['softmax_norm_dim'] = 2
#net.layer_opts['l2_term'] = 0.000001
#net.content['l2'] = L2WeightDecay(net, net.content['lstm_r'])
#net.content['smloss'] = SoftmaxLogLoss(net, net.content['lstm_r'])
#net.content['cost'] = AggregateSumLoss([net.content['l2'], net.content['smloss']])
net.InitLR(0.2)
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before initialize params' % (memory[0]/1024./1024/1024))
trainer.InitParams(net)
print("Done init params")
train_update_rule = trainer.InitUpdateRule(net)
print("Done init update rule")
#additional_output = ['affine', 'l2']
additional_output = ['deep_out_layer', 'l2']
# net.InitValFunction([val_X, val_Y[:,:-1,:,:]], val_Y[:,1:,:,:],
# additional_output, val_weight, net.content['lstm_attend'].output_z)
e = 0
last_big_e = 0
else:
snapshot_list = sorted(snapshot_list)
print('Loading latest snapshot at %s' % snapshot_list[-1])
e = 0
[net, trainer, last_big_e] = LoadList(snapshot_list[-1])
net.InitLR(0.2)
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before initialize params' % (memory[0]/1024./1024/1024))
trainer.InitParams(net)
print("Done init params")
train_update_rule = trainer.InitUpdateRule(net)
print("Done init update rule")
additional_output = ['deep_out_layer', 'l2']
for big_e in range(last_big_e+1, num_big_epoch):
# Load train data
h_list = range(11)
np.random.shuffle(h_list)
for h in [0]:
#break
#if (not ('train_X' in locals())):
train_X = LoadH5(img_data_path % h)
dict_key = train_X.keys()[0]
train_X = train_X[dict_key]
num_sample = train_X.shape[0]
# train_Y has the shape of (num_sample, 5, max_len, n_word, 1)
train_Y = LoadH5(cap_data_path % h)
dict_key = train_Y.keys()[0]
train_Y = train_Y[dict_key]
Y_shape = train_Y.shape
# For debugging
#train_X = train_X[0:100,:,:,:]
#train_Y = train_Y[0:100,:,:,:,:]
#num_sample = 100
train_Y = train_Y.reshape(5*num_sample, Y_shape[2], Y_shape[3], 1)
random_caption_idx = net.net_opts['rng'].randint(0,5,num_sample) + np.asarray([i*5 for i in range(num_sample)])
# Each image has 5 captions, pick one at random
train_Y = train_Y[random_caption_idx, :, :, :]
#train_Y = train_Y[:, 0, :, :, :]
train_Y = train_Y.astype(theano.config.floatX)
# Create weight from train_Y
train_weight = np.copy(train_Y)
train_weight = train_weight[:,1:,:,:]
weight_shape = train_weight.shape
train_weight = (train_weight[:, :, 0, 0] == 0).reshape(weight_shape[0], weight_shape[1], 1, 1)
train_weight = np.repeat(train_weight, weight_shape[2], 2)
train_weight = np.repeat(train_weight, weight_shape[3], 3)
train_weight = train_weight.astype(theano.config.floatX)
num_big_batch_iteration = np.ceil(np.asarray(num_sample, dtype=theano.config.floatX)/big_batch_size)
for j in [0]:
#for j in range(0, num_big_batch_iteration):
big_batch_range = np.arange(j*big_batch_size, (j+1)*big_batch_size)
if ((j+1)*big_batch_size > num_sample):
big_batch_range = np.arange(j * big_batch_size, num_sample)
if (net.train_function == None):
trainer.opts['num_sample'] = big_batch_range.shape[0]
big_batch_range = np.asarray(big_batch_range, dtype=np.uint32)
np.random.shuffle(big_batch_range)
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail before putting train data to shared' % (memory[0]/1024./1024/1024))
train_Xj = theano.shared(train_X[big_batch_range, :, :, :])
train_Yj = theano.shared(train_Y[big_batch_range, :, :, :])
hash_weight = np.asarray([1.3**t for t in range(max_len)])
hash_value = np.sum(np.argmax(train_Yj[0,:,:,0].eval(), axis=1)*hash_weight)
print(hash_value)
#pdb.set_trace()
train_weightj = theano.shared(train_weight[big_batch_range, :, :, :])
memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
print('Memory: %.2f avail after' % (memory[0]/1024./1024/1024))
val_Xtest = train_Xj.eval()[0:2,:,:,:]
val_Ytest = train_Yj.eval()[0:2,:-1,:,:]
z_m1_dummy = np.zeros((1, 2, net.content['lstm_attend'].Z_shape[0]), dtype=theano.config.floatX)
#pdb.set_trace()
do0 = net.content['deep_out_layer'].output.eval({ \
net.input[0]: val_Xtest, \
net.input[1]: val_Ytest, \
net.content['lstm_attend'].z_m1_sym: z_m1_dummy \
})
net.InitTrainFunction(train_update_rule, [train_Xj, train_Yj[:,:-1,:,:]], train_Yj[:,1:,:,:],
additional_output, train_weightj)
print("Done init train function")
print("start training")
trainer.opts['validation'] = False
trainer.opts['train'] = True
main_loop = SGDRMainLoop(net, trained_path)
main_loop.run(net, trainer, e)
#train_Xj = None
#train_Yj = None
#train_weightj = None
#net.train_function = None
print('Finished iteration %d, h5 %d, of big epoch %d' % (j, h, big_e))
plt.figure()
plt.plot(trainer.all_i[-1000:])
plt.savefig('ST_all_i_last1000_4.png')
plt.close()
plt.figure()
plt.plot(trainer.all_i)
plt.savefig('ST_all_i_4.png')
plt.close()
if (big_e%trainer.opts['save_freq']==0):
net1 = net.NNCopy()
#SaveList([net1, trainer, big_e], '../../data/trained_model/%s_e-%05d.dat' % (net.name, big_e))
# Validating frequency is the same with save freq
#if (big_e % (trainer.opts['save_freq'] * 2) == 0):
# for h in range(6):
# val_X = LoadH5(val_img_data_path % h)
# dict_key = val_X.keys()[0]
# val_X = val_X[dict_key]
# num_val_sample = val_X.shape[0]
#
# # val_Y has the shape of (num_val_sample, 5, max_len, n_word, 1)
# val_Y = LoadH5(val_cap_data_path % h)
#
# dict_key = val_Y.keys()[0]
# val_Y = val_Y[dict_key]
# Y_shape = val_Y.shape
# val_Y = val_Y.reshape(5*num_val_sample, Y_shape[2], Y_shape[3], 1)
# random_caption_idx = net.net_opts['rng'].randint(0,5,num_val_sample) + np.asarray([i*5 for i in range(num_val_sample)])
# # Each image has 5 captions, pick one at random
# val_Y = val_Y[random_caption_idx, :, :, :]
# val_Y = val_Y.astype(theano.config.floatX)
# # Create weight from val_Y
# val_weight = np.copy(val_Y)
# val_weight = val_weight[:,1:,:,:]
# weight_shape = val_weight.shape
# val_weight = (val_weight[:, :, 0, 0] == 0).reshape(weight_shape[0], weight_shape[1], 1, 1)
# val_weight = np.repeat(val_weight, weight_shape[2], 2)
# val_weight = np.repeat(val_weight, weight_shape[3], 3)
# val_weight = val_weight.astype(theano.config.floatX)
# num_big_batch_iteration = np.ceil(np.asarray(num_val_sample, dtype=theano.config.floatX)/big_batch_size)
# for j in range(0, num_big_batch_iteration):
# big_batch_range = np.arange(j*big_batch_size, (j+1)*big_batch_size)
#
# if ((j+1)*big_batch_size > num_val_sample):
# big_batch_range = np.arange(j * big_batch_size, num_val_sample)
#
# trainer.opts['num_val_sample'] = big_batch_range.shape[0]
# big_batch_range = np.asarray(big_batch_range, dtype=np.uint32)
# memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
# print('Memory: %.2f avail before putting val data to shared' % (memory[0]/1024./1024/1024))
# val_Xj = theano.shared(val_X[big_batch_range, :, :, :])
# val_Yj = theano.shared(val_Y[big_batch_range, :, :, :])
#
# hash_weight = np.asarray([1.3**t for t in range(max_len)])
# hash_value = np.sum(np.argmax(val_Yj[0,:,:,0].eval(), axis=1)*hash_weight)
# print(hash_value)
# val_weightj = theano.shared(val_weight[big_batch_range, :, :, :])
# memory = theano.sandbox.cuda.cuda_ndarray.cuda_ndarray.mem_info()
# print('Memory: %.2f avail after' % (memory[0]/1024./1024/1024))
# net.InitValFunction([val_Xj, val_Yj[:,:-1,:,:]], val_Yj[:,1:,:,:],
# additional_output, val_weightj)
# print("Done init val function")
# print("start validating")
# trainer.opts['validation'] = True
# trainer.opts['train'] = False
# main_loop = SGDRMainLoop(net, trained_path)
# main_loop.run(net, trainer, e)
#
# val_Xj = None
# val_Yj = None
# val_weightj = None
# net.val_function = None
# print('Finished validating at iteration %d, h5 %d, of big epoch %d' % (j, h, big_e))
def InferMSCOCO(ID):
"""
Using a trained Show Attend Tell model to generate caption from an image
The image is load from MSCOCO (with ground truth caption)
:type ID: int
:param ID: id of the image in MSCOCO validation set
"""
max_len = 40
n_word = 1004
w = 224
h = 224
val_img_path = '../../data/mscoco/val2014/'
val_json_path = '/home/graphicsminer/data/mscoco/annotations/captions_val2014.json'
trained_path = '../../data/trained_model/'
vocab_path = '../../data/mscoco/MSCOCO_processed/vocab.dat'
file_type = '*.jpg'
coco = COCO(val_json_path)
img_keys = sorted(coco.imgs.keys())
img_key = img_keys[ID]
print("Infering caption for MSCOCO image with ID %d in validation set" % ID)
img_info = coco.loadImgs([img_key])
file_name = val_img_path + img_info[0]['file_name']
file_url = img_info[0]['flickr_url']
anns = coco.loadAnns(coco.getAnnIds([img_key]))
# Preprocess image
X = mpimg.imread(file_name)
X = scipy.misc.imresize(X, [224, 224], 'bicubic')
X = np.reshape(X, (1,224,224,3))
# Change RGB to BGR
X = X[:,:,:,[2,1,0]]
X = np.transpose(X, (0, 3,2,1))
X = VGG_preprocess(X)
X = X.astype(theano.config.floatX)
# Generate a <START> and <END> tokens
vocab = LoadList(vocab_path)
vocab = vocab[0]
start_token = u'<START>'
end_token = u'<END>'
start_ID = np.where(vocab == start_token)
end_ID = np.where(vocab == end_token)
# Generate fist word of the setence
X_sen0 = np.zeros((1, 1, n_word, 1), dtype=theano.config.floatX)
X_sen0[0,0,start_ID,0] = 1.0
net = ShowTellNet()
net = LoadVGG_Attend(net)
net.name = "ShowAttendTellCOCO_Affine"
snapshot_list = glob.glob(trained_path + net.name + '*.dat')
if(len(snapshot_list) >= 0):
print('Loading neural network snapshot from %s' % snapshot_list[-1])
[net, trainer, last_big_e] = LoadList(snapshot_list[-1])
# Generate z[-1]
z_m1 = np.zeros((1, 1, net.content['lstm_attend'].Z_shape[0]), dtype=theano.config.floatX)
#pdb.set_trace()
deep_out0 = net.content['deep_out_layer'].output.eval({ \
net.input[0]: X, \
net.input[1]: X_sen0, \
net.content['lstm_attend'].z_m1_sym: z_m1 \
})
#pdb.set_trace()
def four_cv_mv_cal():
"""
Cal calculate mean and variance of the 4th convolutional layer of VGG on MSCOCO train dataset
"""
net = ShowTellNet()
net = LoadVGG_Attend(net)
net.name = "dummy"
train_data_path = '../../data/mscoco/MSCOCO_processed/MSCOCO_224_imgdata_train_%d.h5'
big_batch_size = 100
all_relu = np.zeros((1, 512, 1, 1), dtype=np.float64)
all_num_sample = 0
for h in range(11):
if (train_X == None):
train_X = LoadH5(train_data_path % h)
dict_keys = train_X.keys()
train_X = train_X[dict_keys[0]]
num_sample = train_X.shape[0]
num_big_batch_iteration = int(np.ceil(np.asarray(num_sample, dtype=theano.config.floatX)/big_batch_size))
for j in range(0, num_big_batch_iteration):
big_batch_range = np.arange(j*big_batch_size, (j+1)*big_batch_size)
if ((j+1)*big_batch_size > num_sample):
big_batch_range = np.arange(j * big_batch_size, num_sample)
X = train_X[big_batch_range, :, :, :]
relu5_3 = net.content['relu5_3'].output.eval({ \
net.input[0]: X \
})
relu5_3 = np.sum(relu5_3, axis=0, keepdims=True)
relu5_3 = np.sum(relu5_3, axis=2, keepdims=True)
relu5_3 = np.sum(relu5_3, axis=3, keepdims=True)
all_relu = all_relu + relu5_3.astype(np.float64)
all_num_sample += num_sample
print('Calculate mean at h=%d and j=%d' % (h, j))
mean_relu = all_relu / np.asarray((14.0*14.0*all_num_sample), dtype=np.float64)
all_relu = np.zeros((1, 512, 1, 1), dtype=np.float64)
for h in range(11):
train_X = LoadH5(train_data_path % h)
dict_keys = train_X.keys()
train_X = train_X[dict_keys[0]]
num_sample = train_X.shape[0]
num_big_batch_iteration = int(np.ceil(np.asarray(num_sample, dtype=theano.config.floatX)/big_batch_size))
for j in range(0, num_big_batch_iteration):
big_batch_range = np.arange(j*big_batch_size, (j+1)*big_batch_size)
if ((j+1)*big_batch_size > num_sample):
big_batch_range = np.arange(j * big_batch_size, num_sample)
X = train_X[big_batch_range, :, :, :]
relu5_3 = net.content['relu5_3'].output.eval({ \
net.input[0]: X \
})
relu5_3 = relu5_3.astype(np.float64) - mean_relu
relu5_3 = np.sum(relu5_3, axis=0, keepdims=True)
relu5_3 = np.sum(relu5_3, axis=2, keepdims=True)
relu5_3 = np.sum(relu5_3, axis=3, keepdims=True)
relu5_3 = relu5_3**2
all_relu = all_relu + relu5_3.astype(np.float64)
print('Calculate std at h=%d and j=%d' % (h, j))
all_num_sample += num_sample
var_relu = all_relu / np.asarray((14.0*14.0*all_num_sample), dtype=np.float64)
std_relu = np.sqrt(var_relu)
pdb.set_trace()
print('Saving mean and std')
SaveList([mean_relu, std_relu], '../../data/mscoco/MSCOCO_processed/4thconvo_meanvar.dat')
if __name__=='__main__':
#import matplotlib
#matplotlib.use('Agg')
#four_cv_mv_cal()
train_Attend_224()
#InferMSCOCO(1)
|
import unittest
# self.assertEqual(a, b)
# self.assertNotEqual(a, b)
# self.assertTrue(x)
# self.assertFalse(x)
# self.assertRaises(TypeError)
# assertIs(a, b)
# assertIsNot(a, b)
# assertIsNone(x)
# assertIsNotNone(x)
# assertIn(a, b)
# assertNotIn(a, b)
# assertListEqual(a, b)
# assertTupleEqual(a, b)
# assertDictEqual(a, b)
discover = unittest.defaultTestLoader.discover(
"./test_case",
pattern="test_*.py",
top_level_dir=None
)
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(discover)
|
def build_map(fileLocation):
mapFile = open(fileLocation, 'r')
linesOfFile = mapFile.readlines()
for index in range(len(linesOfFile)):
linesOfFile[index] = linesOfFile[index].strip()
linesOfFile[index] = linesOfFile[index].split()
#removing the bad lines
nuList = []
for line in linesOfFile:
if "#" not in line and line != "":
nuList.append(line)
for line in nuList:
assert (len(line) == 2)
assert (len(line[0]) == 1)
assert (len(line[1]) == 1)
subsitutionDict = {}
for line in nuList:
subsitutionDict[line[0]] = line[1]
return subsitutionDict
fileLocation = input()
print(build_map(fileLocation))
|
# coding: utf-8
# In[86]:
import pandas as pd
df_ori = pd.read_csv('data\\dw_cl_jl_high_value.csv',delimiter='|')
df_user = pd.read_csv('data\\all_user_7in8not.csv')
data=pd.merge(df_user,df_ori,how='left',left_on='USER_ID_7in8not',right_on='USER_ID')
data.head()
# In[87]:
category_cols=['CITY_DESC','DESC_1ST','SYSTEM_FLAG','PRODUCT_TYPE','RH_TYPE',
'FLUX_RELEASE','DX_FLUX','DX_EFF_DATE','SUB_PURCH_MODE_TYPE','PAYMENT']
data_cate=data[category_cols]
# ##DX_EFF_DATE
# In[88]:
import time
import numpy as np
def f(x):
if str(x)=='00000000':
return x
elif ' ' in str(x):
ymd = str(x).split(' ')[0].strip()
else:
ymd = str(x).strip()
return time.strftime('%Y%m%d',time.strptime(ymd,'%Y/%m/%d'))
data_cate['DX_EFF_DATE'] = data['DX_EFF_DATE'].fillna('00000000')
data_cate['DX_EFF_DATE'] = data['DX_EFF_DATE'].map(lambda x: str(x).replace('nan','00000000')).map(f)
#
# In[89]:
data_cate['CITY_DESC']=data['CITY_DESC'].fillna('未知').astype('str')
data_cate['CITY_DESC']=data['CITY_DESC'].map(lambda x:str(x).replace('主城未知','未知').replace('nan','未知'))
data_cate['CITY_DESC'].value_counts()
#data_cate['DX_EFF_DATE'].value_counts()
# In[90]:
def fun(x):
if('停'in str(x)):
return '停机'
if('拆机' in str(x)):
return '拆机'
else:
return x
data_cate['DESC_1ST'] = data['DESC_1ST'].fillna('未知').astype('str')
data_cate['DESC_1ST'] = data['DESC_1ST'].map(fun)
print(data_cate['DESC_1ST'].value_counts())
print("\r===========================================\r")
#eal_release():
data_cate['FLUX_RELEASE']=data['FLUX_RELEASE'].fillna('未释放').astype('str')
data_cate['FLUX_RELEASE'] = data['FLUX_RELEASE'].map(lambda x:str(x).replace('nan','未释放').replace('CBSS','').replace('BSS',''))
data_cate['FLUX_RELEASE'].value_counts()
# In[91]:
data_cate['RH_TYPE']=data['RH_TYPE'].fillna('非融合').astype('str')
print(data_cate['RH_TYPE'].value_counts())
data['SYSTEM_FLAG'].value_counts()
# In[92]:
data_cate['SUB_PURCH_MODE_TYPE'] =data_cate['SUB_PURCH_MODE_TYPE'].fillna('其他')
data_cate['SUB_PURCH_MODE_TYPE'].value_counts()
data_cate['PAYMENT'] =data_cate['PAYMENT'].fillna('其他')
data_cate['PAYMENT'].value_counts()
# In[93]:
data_cate['DX_FLUX'].value_counts()
dx_flux_dic={
'1.5GB本地流量':1.5,
'1.5G本地流量':1.5,
'100G本地流量':100,
'10GB本地流量':10,
'10G本地流量':10,
'15G本地流量':10,
'1GB本地流量':1,
'1G本地流量':1,
'20GB本地+5GB全国流量,流量有效期1个月':25,
'20GB本地流量+3GB全国流量':23,
'20G本地流量':20,
'2GB本地流量':2,
'2GB本地流量,流量有效期1个月':2,
'2G本地流量':2,
'300分钟+300MB':3.3,
'300分钟本地拨打国内、1GB本地流量':4,
'300分钟本地市话+300M本地流量':3.3,
'3GB本地流量':3,
'3G本地流量':3,
'40BG本地流量':40,
'40GB本地流量':40,
'40GB本地流量,流量有效期1个月':40,
'40G本地流量':40,
'40G全国流量':40,
'4G本地流量':4,
'500M本地流量':0.5,
'5GB本地流量':5,
'5G本地流量':5,
'600分钟+600MB':6.6,
'600分钟本地拨打国内、2GB本地流量':8,
'600分钟本地市话+600M本地流量':6.6,
'60G本地流量':60,
'6G本地流量':6,
'800M本地流量':0.8,
'8GB本地流量':8,
'8G本地流量':8,
'本地流量':80,
'不限量':80
}
data_cate['DX_FLUX']=data['DX_FLUX'].map(dx_flux_dic).fillna(0)
data_cate['DX_FLUX'].value_counts()
# In[94]:
conti_cols=['INNET_DATE','PRODUCT_FEE' #,'TOTAL_FLUX_THIS','FEE_THIS',,'CALLING_DURA_THIS','CHARGE_FLUX_08','CHARGE_VOICE_08','INNER_ROAM_FEE_08'
,'PROD_IN_VOICE','PROD_IN_FLUX','CALL_NUM',
'IS_TERM','TERM_END_MONTH','IS_LH','LH_END_MONTH'
,'DX_FEE','DX_END_DATE','DX_HIGH_SPEED','ARPU_AVG','FEE_LAST1','FEE_LAST2','FEE_LAST3'
,'FEE_LAST4','FEE_LAST5','CALLING_DURA_AVG_3'
,'CALLING_DURA_LAST1','CALLING_DURA_LAST2','CALLING_DURA_LAST3'
,'CALLING_DURA_LAST4','CALLING_DURA_LAST5','AVG_FLUX_3'
,'TOTAL_FLUX_LAST1','TOTAL_FLUX_LAST2','TOTAL_FLUX_LAST3'
,'TOTAL_FLUX_LAST4','TOTAL_FLUX_LAST5','EX_FLUX_FEE_THIS'
,'EX_FLUX_FEE_LAST1','EX_FLUX_FEE_LAST2','EX_FLUX_FEE_LAST3'
,'EX_FLUX_FEE_LAST4','EX_FLUX_FEE_LAST5','PKG_FLUX_FEE_THIS'
,'PKG_FLUX_FEE_LAST1','PKG_FLUX_FEE_LAST2','PKG_FLUX_FEE_LAST3'
,'PKG_FLUX_FEE_LAST4','PKG_FLUX_FEE_LAST5','VOICE_FEE_THIS'
,'VOICE_FEE_LAST1','VOICE_FEE_LAST2','VOICE_FEE_LAST3','VOICE_FEE_LAST4'
,'VOICE_FEE_LAST5','CHARGE_FLUX_07'
,'CHARGE_VOICE_07','HAS_FK','HAS_ADSL','WXBD','STBD','OPPOSE_LINK'
,'CDR_KF_OUT','CDR_KF_IN','IS_YH','YH_NUM'
,'INNER_ROAM_FEE_07','INNER_ROAM_FEE_06','IS_FP_PRINT','PRINT_CNT'
,'IS_TS','TS_CNT','IS_VIDEO','IS_CHANGE'
,'IS_GWLS'
,'RELEASE_FLAG','OVER_FLUX_FEE_AVG','YY_FLAG','BUILDING_INFO'
,'OVER_VOICE_FEE_AVG','IS_ZK']
data_conti= data[conti_cols]
# In[95]:
dummy_cols=['CITY_DESC','DESC_1ST','SYSTEM_FLAG','PRODUCT_TYPE','RH_TYPE','FLUX_RELEASE','SUB_PURCH_MODE_TYPE','PAYMENT']
frm_dummy=data_cate[dummy_cols]
frm_dummy=pd.get_dummies(frm_dummy)
# In[96]:
scatter_cols=frm_dummy.columns.values.tolist()
ori_binary_cols=['HAS_FK','HAS_ADSL','WXBD','STBD','OPPOSE_LINK','IS_YH','IS_TERM','IS_FP_PRINT',
'IS_TS','IS_LH','IS_VIDEO','IS_CHANGE','RELEASE_FLAG','IS_ZK','YY_FLAG']
conti_cols=['INNET_DATE','PRODUCT_FEE' #,'TOTAL_FLUX_THIS','FEE_THIS',,'CALLING_DURA_THIS','CHARGE_FLUX_08','CHARGE_VOICE_08','INNER_ROAM_FEE_08'
,'PROD_IN_VOICE','PROD_IN_FLUX','CALL_NUM','TERM_END_MONTH','LH_END_MONTH'
,'DX_FEE','DX_END_DATE','DX_HIGH_SPEED','ARPU_AVG','FEE_LAST1','FEE_LAST2','FEE_LAST3'
,'FEE_LAST4','FEE_LAST5','CALLING_DURA_AVG_3','CALLING_DURA_LAST1','CALLING_DURA_LAST2','CALLING_DURA_LAST3'
,'CALLING_DURA_LAST4','CALLING_DURA_LAST5','AVG_FLUX_3','TOTAL_FLUX_LAST1','TOTAL_FLUX_LAST2','TOTAL_FLUX_LAST3'
,'TOTAL_FLUX_LAST4','TOTAL_FLUX_LAST5','EX_FLUX_FEE_LAST1','EX_FLUX_FEE_LAST2','EX_FLUX_FEE_LAST3' #,'EX_FLUX_FEE_THIS'
,'EX_FLUX_FEE_LAST4','EX_FLUX_FEE_LAST5','PKG_FLUX_FEE_LAST1','PKG_FLUX_FEE_LAST2','PKG_FLUX_FEE_LAST3' #'PKG_FLUX_FEE_THIS',
,'PKG_FLUX_FEE_LAST4','PKG_FLUX_FEE_LAST5','VOICE_FEE_LAST1','VOICE_FEE_LAST2','VOICE_FEE_LAST3','VOICE_FEE_LAST4' #'VOICE_FEE_THIS',
,'VOICE_FEE_LAST5','CHARGE_FLUX_07','CHARGE_VOICE_07','CDR_KF_OUT','CDR_KF_IN','YH_NUM','INNER_ROAM_FEE_07','INNER_ROAM_FEE_06','PRINT_CNT'
,'TS_CNT','OVER_FLUX_FEE_AVG','BUILDING_INFO','OVER_VOICE_FEE_AVG','DX_HIGH_SPEED','DX_FLUX','DX_EFF_DATE']
#for i in ori_binary_cols:
# scatter_cols.append( i)
frm_scatter=frm_dummy
frm_scatter[ori_binary_cols]=data[ori_binary_cols]
frm_scatter=frm_scatter.fillna(0)
frm_conti=data[conti_cols]
frm_conti=frm_conti.fillna(0)
frm_conti['DX_HIGH_SPEED']=frm_conti['DX_HIGH_SPEED'].applymap(lambda x: str(x).replace("G",""))
frm_conti['OVER_VOICE_FEE_AVG']=frm_conti['OVER_VOICE_FEE_AVG'].map(lambda x: str(x).replace("\"","").replace("," , ""))
frm_conti[['DX_FLUX','DX_EFF_DATE']]=data_cate[['DX_FLUX','DX_EFF_DATE']]
frm_conti.head()
# In[116]:
frm_conti['FEE_LAST1-2']=(frm_conti['FEE_LAST1']-frm_conti['FEE_LAST2']) / ( frm_conti['FEE_LAST2'] + 1)
frm_conti['FEE_LAST2-3']=(frm_conti['FEE_LAST2']-frm_conti['FEE_LAST3']) / (frm_conti['FEE_LAST3'] + 1 )
frm_conti['FEE_LAST3-4']=(frm_conti['FEE_LAST3']-frm_conti['FEE_LAST4']) / (frm_conti['FEE_LAST4'] + 1 )
frm_conti['FEE_LAST4-5']=(frm_conti['FEE_LAST4']-frm_conti['FEE_LAST5']) / (frm_conti['FEE_LAST5'] + 1 )
frm_conti['CALLING_DURA_LAST1-2']=(frm_conti['CALLING_DURA_LAST1']-frm_conti['CALLING_DURA_LAST2'])/(frm_conti['CALLING_DURA_LAST2']+1)
frm_conti['CALLING_DURA_LAST2-3']=(frm_conti['CALLING_DURA_LAST2']-frm_conti['CALLING_DURA_LAST3'])/(frm_conti['CALLING_DURA_LAST3']+1)
frm_conti['CALLING_DURA_LAST3-4']=(frm_conti['CALLING_DURA_LAST3']-frm_conti['CALLING_DURA_LAST4'])/(frm_conti['CALLING_DURA_LAST4']+1)
frm_conti['CALLING_DURA_LAST4-5']=(frm_conti['CALLING_DURA_LAST4']-frm_conti['CALLING_DURA_LAST5'])/(frm_conti['CALLING_DURA_LAST5']+1)
frm_conti['TOTAL_FLUX_LAST1-2']=(frm_conti['TOTAL_FLUX_LAST1']-frm_conti['TOTAL_FLUX_LAST2'])/(frm_conti['TOTAL_FLUX_LAST2']+1)
frm_conti['TOTAL_FLUX_LAST2-3']=(frm_conti['TOTAL_FLUX_LAST2']-frm_conti['TOTAL_FLUX_LAST3'])/(frm_conti['TOTAL_FLUX_LAST3']+1)
frm_conti['TOTAL_FLUX_LAST3-4']=(frm_conti['TOTAL_FLUX_LAST3']-frm_conti['TOTAL_FLUX_LAST4'])/(frm_conti['TOTAL_FLUX_LAST4']+1)
frm_conti['TOTAL_FLUX_LAST4-5']=(frm_conti['TOTAL_FLUX_LAST4']-frm_conti['TOTAL_FLUX_LAST5'])/(frm_conti['TOTAL_FLUX_LAST5']+1)
frm_conti['EX_FLUX_FEE_LAST1-2']=(frm_conti['EX_FLUX_FEE_LAST1']-frm_conti['EX_FLUX_FEE_LAST2'])/(frm_conti['EX_FLUX_FEE_LAST2']+1)
frm_conti['EX_FLUX_FEE_LAST2-3']=(frm_conti['EX_FLUX_FEE_LAST2']-frm_conti['EX_FLUX_FEE_LAST3'])/(frm_conti['EX_FLUX_FEE_LAST3']+1)
frm_conti['EX_FLUX_FEE_LAST3-4']=(frm_conti['EX_FLUX_FEE_LAST3']-frm_conti['EX_FLUX_FEE_LAST4'])/(frm_conti['EX_FLUX_FEE_LAST4']+1)
frm_conti['EX_FLUX_FEE_LAST4-5']=(frm_conti['EX_FLUX_FEE_LAST4']-frm_conti['EX_FLUX_FEE_LAST5'])/(frm_conti['EX_FLUX_FEE_LAST5']+1)
frm_conti['PKG_FLUX_FEE_LAST1-2']=(frm_conti['PKG_FLUX_FEE_LAST1']-frm_conti['PKG_FLUX_FEE_LAST2'])/(frm_conti['PKG_FLUX_FEE_LAST2']+1)
frm_conti['PKG_FLUX_FEE_LAST2-3']=(frm_conti['PKG_FLUX_FEE_LAST2']-frm_conti['PKG_FLUX_FEE_LAST3'])/(frm_conti['PKG_FLUX_FEE_LAST3']+1)
frm_conti['PKG_FLUX_FEE_LAST3-4']=(frm_conti['PKG_FLUX_FEE_LAST3']-frm_conti['PKG_FLUX_FEE_LAST4'])/(frm_conti['PKG_FLUX_FEE_LAST4']+1)
frm_conti['PKG_FLUX_FEE_LAST4-5']=(frm_conti['PKG_FLUX_FEE_LAST4']-frm_conti['PKG_FLUX_FEE_LAST5'])/(frm_conti['PKG_FLUX_FEE_LAST5']+1)
frm_conti['VOICE_FEE_LAST1-2']=(frm_conti['VOICE_FEE_LAST1']-frm_conti['VOICE_FEE_LAST2'])/(frm_conti['VOICE_FEE_LAST2']+1)
frm_conti['VOICE_FEE_LAST2-3']=(frm_conti['VOICE_FEE_LAST2']-frm_conti['VOICE_FEE_LAST3'])/(frm_conti['VOICE_FEE_LAST3']+1)
frm_conti['VOICE_FEE_LAST3-4']=(frm_conti['VOICE_FEE_LAST3']-frm_conti['VOICE_FEE_LAST4'])/(frm_conti['VOICE_FEE_LAST4']+1)
frm_conti['VOICE_FEE_LAST4-5']=(frm_conti['VOICE_FEE_LAST4']-frm_conti['VOICE_FEE_LAST5'])/(frm_conti['VOICE_FEE_LAST5']+1)
# In[117]:
from sklearn.preprocessing import MinMaxScaler
#scaler = StandardScaler()
min_max_scaler = MinMaxScaler()
frm_cont_out = pd.DataFrame(min_max_scaler.fit_transform(frm_conti),columns=frm_conti.columns.values)
frm_final=frm_cont_out#_out
frm_final[frm_scatter.columns.values]=frm_scatter
frm_final.describe()
# In[128]:
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
selector=SelectKBest(chi2,k=120)
#
frm_final.index=data['USER_ID_7in8not']
selector.fit(frm_final,data['lost_tag'])
feature_matrix=selector.get_support()
#pvalues=selector.scores_
#print(pvalues)
#pvalues
#Frm_New=selector.fit_transform(frm_final,data['lost_tag'].values)
#feature_matrix.shape
col_names=np.array(frm_final.columns.values)
feature_survice=[]
#i=0
for bool_value,arr_inx in zip(feature_matrix,range(198)):
if bool_value==True:
#print("count:"+str(arr_inx)+" ,bool value is"+str(bool_value))
feature_survice.append(col_names[arr_inx])
#[i+1]=col_names[arr_inx]
#i=i+1
feature_survice
# # 以下是互信息法
#
# In[119]:
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
selector=SelectKBest(mutual_info_classif,k=50)
#
frm_final.index=data['USER_ID_7in8not']
selector.fit(frm_final,data['lost_tag'])
feature_matrix=selector.get_support()
pvalues=selector.scores_
#print(pvalues)
#pvalues
#Frm_New=selector.fit_transform(frm_final,data['lost_tag'].values)
#feature_matrix.shape
col_names=np.array(frm_final.columns.values)
feature_survice=[]
#i=0
for bool_value,arr_inx in zip(feature_matrix,range(198)):
if bool_value==True:
#print("count:"+str(arr_inx)+" ,bool value is"+str(bool_value))
feature_survice.append(col_names[arr_inx])
#[i+1]=col_names[arr_inx]
#i=i+1
feature_survice
# In[120]:
import numpy as np
col_indx=np.argsort(pvalues)
#sort_feature_frm=pd.DataFrame()
feature_dic={}
for i,arr_inx in zip(col_indx,range(174)):
feature_dic[int(arr_inx)+1]=col_names[i]
feature_dic
# # 一下是SelectKBest的 f_classif
# In[121]:
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
selector=SelectKBest(f_classif,k=50)
#
frm_final.index=data['USER_ID_7in8not']
selector.fit(frm_final,data['lost_tag'])
feature_matrix=selector.get_support()
pvalues_1=selector.scores_
#print(pvalues)
#pvalues
#Frm_New=selector.fit_transform(frm_final,data['lost_tag'].values)
#feature_matrix.shape
col_names=np.array(frm_final.columns.values)
feature_survice=[]
#i=0
for bool_value,arr_inx in zip(feature_matrix,range(198)):
if bool_value==True:
#print("count:"+str(arr_inx)+" ,bool value is"+str(bool_value))
feature_survice.append(col_names[arr_inx])
#[i+1]=col_names[arr_inx]
#i=i+1
feature_survice
# In[123]:
import numpy as np
col_indx_1=np.argsort(pvalues_1)
#sort_feature_frm=pd.DataFrame()
feature_dic={}
for i,arr_inx in zip(col_indx_1,range(198)):
feature_dic[int(arr_inx)+1]=col_names[i]
feature_dic
# # 基于树模型的特征选择和SVM(有点像跑模型了)
# In[108]:
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
clf = svm.SVC(C=0.8, kernel='rbf', gamma=20, decision_function_shape='ovr')
#linear kernel
#data['lost_tag'].value_counts()
clf.fit(frm_final,data['lost_tag'])
clf.coef_
# In[124]:
#GBDT作为基模型的特征选择
clf_gbdt=GradientBoostingClassifier()
clf_gbdt.fit(frm_final,data['lost_tag'])
bool_inx=clf_gbdt.feature_importances_
bool_inx
# In[125]:
import numpy as np
col_indx=np.argsort(bool_inx)
#sort_feature_frm=pd.DataFrame()
feature_dic={}
for i,arr_inx in zip(col_indx,range(198)):
feature_dic[int(arr_inx)+1]=col_names[i]
feature_dic
# # 以下是Variance Threadshold方法选取离散特征。
# In[21]:
print(data_cate.columns.values)
data.columns.values
# In[30]:
train_data_cate=frm_dummy[['CITY_DESC_万州', 'CITY_DESC_两江新区', 'CITY_DESC_丰都', 'CITY_DESC_九龙坡',
'CITY_DESC_云阳', 'CITY_DESC_北碚', 'CITY_DESC_南岸', 'CITY_DESC_南川',
'CITY_DESC_合川', 'CITY_DESC_垫江', 'CITY_DESC_城口', 'CITY_DESC_大渡口',
'CITY_DESC_大足', 'CITY_DESC_奉节', 'CITY_DESC_巫山', 'CITY_DESC_巫溪',
'CITY_DESC_巴南', 'CITY_DESC_开州区', 'CITY_DESC_彭水', 'CITY_DESC_忠县',
'CITY_DESC_未知', 'CITY_DESC_梁平', 'CITY_DESC_武隆', 'CITY_DESC_永川',
'CITY_DESC_江北', 'CITY_DESC_江津', 'CITY_DESC_沙坪坝', 'CITY_DESC_涪陵城区',
'CITY_DESC_渝中', 'CITY_DESC_渝北', 'CITY_DESC_潼南', 'CITY_DESC_璧山',
'CITY_DESC_电子商务部', 'CITY_DESC_石柱', 'CITY_DESC_秀山', 'CITY_DESC_綦江',
'CITY_DESC_荣昌', 'CITY_DESC_酉阳', 'CITY_DESC_铜梁', 'CITY_DESC_长寿',
'CITY_DESC_集团客户部', 'CITY_DESC_黔江主城区', 'DESC_1ST_OCS充值',
'DESC_1ST_OCS有效', 'DESC_1ST_停机', 'DESC_1ST_拆机', 'DESC_1ST_挂失',
'DESC_1ST_正常在用', 'DESC_1ST_限制呼出', 'SYSTEM_FLAG_OCS-BSS',
'SYSTEM_FLAG_非OCS-BSS', 'PRODUCT_TYPE_全家享', 'PRODUCT_TYPE_冰激凌',
'PRODUCT_TYPE_智慧沃家', 'PRODUCT_TYPE_标准资费', 'PRODUCT_TYPE_非标准资费',
'RH_TYPE_BSS全家享主卡', 'RH_TYPE_BSS全家享副卡', 'RH_TYPE_BSS其他融合产品',
'RH_TYPE_当前证件下有宽带', 'RH_TYPE_智慧沃家共享版', 'RH_TYPE_非融合',
'FLUX_RELEASE_全家享主卡', 'FLUX_RELEASE_全家享副卡', 'FLUX_RELEASE_冰激凌单卡',
'FLUX_RELEASE_政企低消', 'FLUX_RELEASE_普通低消', 'FLUX_RELEASE_未释放',
'FLUX_RELEASE_畅越低消', 'FLUX_RELEASE_订购不限量包', 'FLUX_RELEASE_预约冰激凌',
'SUB_PURCH_MODE_TYPE_低消', 'SUB_PURCH_MODE_TYPE_低消承诺送流量',
'SUB_PURCH_MODE_TYPE_其他', 'SUB_PURCH_MODE_TYPE_承诺话费送语音',
'SUB_PURCH_MODE_TYPE_政企低消', 'SUB_PURCH_MODE_TYPE_畅爽',
'SUB_PURCH_MODE_TYPE_畅越低消', 'PAYMENT_MISPOS(界面控制)',
'PAYMENT_MISPOS刷卡缴费', 'PAYMENT_其他', 'PAYMENT_合约返费/赠费',
'PAYMENT_存款清退_普通预存款', 'PAYMENT_总部电子渠道支付宝缴费', 'PAYMENT_挂账缴费',
'PAYMENT_月结协议预存款销帐', 'PAYMENT_月结普通预存款销帐', 'PAYMENT_沃支付银行卡代扣',
'PAYMENT_现金交费', 'PAYMENT_缴费卡实时销帐', 'PAYMENT_缴费卡收入_普通预存款',
'PAYMENT_自助终端现金', 'PAYMENT_营业厅收入(帐务收费)_普通预存款', 'PAYMENT_资金归集现金缴费',
'PAYMENT_转帐(转出)', 'PAYMENT_退预存款', 'PAYMENT_银行代收_普通预存款',
'PAYMENT_银行联网交费','IS_LH','HAS_FK', 'HAS_ADSL', 'IS_FP_PRINT',
'IS_TS', 'IS_VIDEO', 'IS_CHANGE', 'IS_GWLS',
'RELEASE_FLAG', 'YY_FLAG', 'BUILDING_INFO', 'IS_ZK']]
#train_data_cate=data['lost_tag']
frm_dummy.columns.values
# In[68]:
#VarianceThreshold 处理离散型变量,也就是哑变量和
from sklearn.feature_selection import VarianceThreshold
sel=VarianceThreshold(threshold=(0.8*(1-0.8)))#表示剔除特征的方差大于阈值的特征Removing features with low variance
sel.fit_transform(train_data_cate)#返回的结果为选择的特征矩阵
print(sel.fit_transform(train_data_cate))
bool_inx=sel.get_support()
survive_cols=[]
all_cols=train_data_cate.columns.values
for i in range(len(bool_inx)):
if(bool_inx[i]==True):
survive_cols.append(all_cols[i])
survive_cols
# # 以下是矩阵计算的过程
# In[35]:
matrixa=np.array([[2,5,2],[3,8,2],[7,6,3],
[2,8,4],[8,8,3],[6,4,5]])
a=np.cov(matrixa,rowvar=0)
a
# In[41]:
eigVals,eigVects=np.linalg.eig(np.mat(a))
eigVals
# In[42]:
eigVects
# In[46]:
def eigValPct(eigVals,percentage):
sortArray=np.sort(eigVals) #使用numpy中的sort()对特征值按照从小到大排序
sortArray=sortArray[-1::-1] #特征值从大到小排序
arraySum=np.sum(sortArray) #数据全部的方差arraySum
tempSum=0
num=0
for i in sortArray:
tempSum+=i
num+=1
if tempSum>=arraySum*percentage:
return num
eigValInd=np.argsort(eigVals)
eigValInd=eigValInd[:-(2+1):-1] #要两个特征
eigValInd
# In[47]:
redEigVects=eigVects[:,eigValInd]
redEigVects
|
from getpass import getpass
from time import sleep
from subprocess import Popen
import urllib.error
from urllib.parse import urlencode, urlparse, parse_qs, unquote
from urllib.request import urlopen, build_opener, HTTPCookieProcessor, urlretrieve
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup as BS
class Way2SMS:
def __init__(self,username,password):
self.cj = CookieJar()
self.opener = build_opener(HTTPCookieProcessor(self.cj))
self.serverNo = 2
self.serverUrl = "http://site{}.way2sms.com".format(self.serverNo)
self.logInUrl = self.serverUrl + "/Login1.action"
self.qckSMSUrl = self.serverUrl + "/jsp/QCSMS.jsp?mobileno=&name=&Token={}"
self.sendSMSUrl = self.serverUrl + "/jsp/smstonms.action"
self.contactUrl = self.serverUrl + "/QuickContacts?folder=Dashboard&Token={}"
self.captchaUrl = self.serverUrl + "/CaptchaServlet"
self.logOutUrl = self.serverUrl + "/LogOut"
self.captchaPath = "captcha.png"
self.cred = {"username":username,"password":password}
self.captchaNeeded = False
self.postDataStr = 't_15_k_5={t_15_k_5}&m_15_b={m_15_b}&a_m_p=qcsms&way2s=way2s&pjkdws={pjkdws}&{t_15_k_5}={token}&smsActTo=smstonms&custid={custid}&{m_15_b}={mobNo}&textArea={text}&textcode={textcode}'
# 1 - logging in
h1 = self.opener.open(self.logInUrl,urlencode(self.cred).encode())
self.sesID = parse_qs(urlparse(h1.url).query)["id"][0] # storing sesssion token
# print(self.sesID)
self.cookieInfo = {}
for i in self.cj:
self.cookieInfo[i.name] = i.value # storing cookie values
self.notLogged = False
# 2 - grabbing contacts
h2 = BS(self.opener.open(self.contactUrl.format(self.sesID),b''))
self.contacts = {}
names = h2.find("input",attrs={"id":"Quckvalue"})['value'].split("*")
numbers = h2.find("input",attrs={"id":"Qucktitle"})['value'].split(",")
for i in range(len(names)):
self.contacts[names[i]] = numbers[i]
# print(self.contacts)
# 3 - getting the post data pattern
bs = BS(self.opener.open(self.qckSMSUrl.format(self.sesID)))
mobTag = 'm_15_b'
tknTag = 't_15_k_5'
# with open("QCSMS.jsp",'w') as f: f.write(str(bs))
self.dataDict = {'custid':None, 'mobNo':None, 'text':None, 'token':self.sesID,'textcode':None,'pjkdws':None}
try:
self.dataDict[tknTag] = bs.find('input',attrs={'id':tknTag})['value']
except:
self.dataDict[tknTag] = bs.find('textarea',attrs={'id':tknTag}).text
try:
self.dataDict[mobTag] = bs.find('input',attrs={'id':mobTag})['value']
except:
self.dataDict[mobTag] = bs.find('textarea',attrs={'id':mobTag}).text
try:
self.dataDict['pjkdws'] = bs.find('input',attrs={'id':'pjkdws'})['value']
except:
self.dataDict['pjkdws'] = bs.find('textarea',attrs={'id':'pjkdws'}).text
self.dataDict['custid'] = bs.find('input',attrs={'id':'Send'})['onclick'].replace("return checkMobNo(","").replace(",'','','0')","")
self.opener.addheaders.append(('Cookie', unquote(urlencode(self.cookieInfo))))
def sendSMS(self,mobileno,text):
if(self.notLogged):
print("you are not logged in - call logIn()")
return
if(self.captchaNeeded):
with open(self.captchaPath,"wb") as f:
f.write(self.opener.open(self.captchaUrl).read())
p = Popen(["display",self.captchaPath])
self.dataDict['textcode'] = input("Captcha ? ")
p.kill()
if(len(text) <= 140):
self.dataDict['mobNo'] = mobileno
self.dataDict['text'] = text
# print(self.postDataStr.format(**self.dataDict))
try:
h = self.opener.open(self.sendSMSUrl,self.postDataStr.format(**self.dataDict).encode())
resp = h.read()
try:
msg = BS(resp).find("div",attrs={"id":"quicksms"}).find("div",attrs={"class":"quickname"}).text.strip()
if msg.endswith("submitted successfully"): pass
else: print("N : "+msg)
except:
print("N");self.captchaNeeded = True
with open("successResp.html","wb") as f: f.write(resp)
except urllib.error.HTTPError as error:
pass
# with open("HTTPError.html","wb") as f: f.write(error.read())
def curlC(self,mobileno,text):
if(self.notLogged):
print("you are not logged in - call logIn()")
return
if(captchaNeeded):
with open(self.captchaPath,"wb") as f:
f.write(self.opener.open(self.captchaUrl).read())
p = Popen(["display",self.captchaPath])
self.dataDict['textcode'] = input("Captcha ? ")
p.kill()
if(len(text) <= 140):
self.dataDict['mobNo'] = mobileno
self.dataDict['text'] = text
lDict = {'url':self.sendSMSUrl,'cookieData':unquote(urlencode(self.cookieInfo)), 'data': self.postDataStr.format(**self.dataDict)}
return "curl '{url}' -H 'Cookie: {cookieData}' --data '{data}'".format(**lDict)
def chatSession(self,mobileno):
while True:
txt = input("> ")
if(txt==''):
break
self.sendSMS(mobileno,txt)
def getCaptcha(self):
if(self.notLogged):
print("you are not logged in - call logIn()")
return
with open(self.captchaPath,"wb") as f:
f.write(self.opener.open(self.captchaUrl).read())
p = Popen(["display",self.captchaPath])
sleep(2)
p.kill()
def logOut(self):
if(self.notLogged):
print("you are not logged in - call logIn()")
return
if self.opener.open(self.logOutUrl).read().decode() == "ok":
print("Logged out !!!")
self.notLogged = True
def searchContacts(self,name):
l = []
for key in self.contacts:
if(name.lower() in key.lower()):
l.append(key)
if(len(l)==0):
return None
elif(len(l)==1):
return (l[0],self.contacts[l[0]])
else:
for i in range(len(l)):
print(i+1,":",l[i])
c = int(input())
if(0<c<=len(l)):
return (l[c-1],self.contacts[l[c-1]])
else:
return None
if __name__=="__main__":
print("Type in your Way2SMS credentials..")
you = Way2SMS(input("Your mobile number : "),getpass("Password : "))
contact = you.searchContacts(input("Search contact : "))
print("Starting chat with "+contact[0]+"@"+contact[1])
you.chatSession(contact[1])
you.logOut()
|
#!/usr/bin/python
#File name: if.py
number = 23
guess = int(input('Enter an integer:'))
if guess == number:
print ('Congurations, you guessed it.')
print ('But you do not win any prizes!')
elif guess < number:
print ('No, it is a litter higher than that')
else:
print ('No, it is a litter lower than that')
print ('Done')
|
from ficha import Ficha
from casilla import Casilla
class Othelo:
N = 8
def __init__(self, turno = 1):
self.tablero_ = [[Casilla(X,Y) for Y in range(0, self.N)] for X in range(0, self.N)]
self.confInicial()
self.NroFichas = 4
#inicializar las 4 fichas
self.turno_ = 1
def makeJugada(self, jugada):
self.tablero_[jugada[0]][jugada[1]].setFicha(self.turno_)
self.NroFichas += 1
#self.turno_ = self.turno_ * -1
k = self.getAdjacentes(self.tablero_[jugada[0]][jugada[1]])
for pos in k:
if self.isOcupadaCasilla(pos):
cas = self.tablero_[pos[0]][pos[1]]
if cas.getFicha().getColor() != self.turno_ :
cas.rotarFicha()
self.turno_ = self.turno_ * -1
def MostrarPosiblesMovimientos(self):
l = []
for i in range(self.N):
for j in range(self.N):
if self.tablero_[i][j].esOcupada():
k = self.getAdjacentes(self.tablero_[i][j])
for pos in k:
if not self.isOcupadaCasilla(pos):
l.append(pos)
l = set(l)
print('los posibles movimientios: ')
for e in l:
print(e)
def RetornarPosiblesMovimientos(self):
l = []
for i in range(self.N):
for j in range(self.N):
if self.tablero_[i][j].esOcupada():
k = self.getAdjacentes(self.tablero_[i][j])
for pos in k:
if not self.isOcupadaCasilla(pos):
l.append(pos)
l = set(l)
return l
def CalcularGanador(self):
b = 0
n = 0
for i in range(self.N):
for j in range(self.N):
if self.tablero_[i][j].ficha_.esNegro():
n += 1
elif self.tablero_[i][j].ficha_.esBlanco():
b += 1
if b > n:
print ('han ganado las fichas blancas !!!')
elif n > b:
print ('han ganado las fichas negras !!!')
else:
print('empate !!!')
def isOcupadaCasilla(self, pos):
return self.tablero_[pos[0]][pos[1]].esOcupada()
def getAdjacentes(self, casilla):
l = []
pos = casilla.getPos()
l.append(( pos[0] , pos[1] - 1)) #izq
l.append(( pos[0] , pos[1] + 1)) #der
l.append(( pos[0] - 1 , pos[1] )) #arr
l.append(( pos[0] + 1 , pos[1] )) #abj
l.append(( pos[0] -1 , pos[1] -1 )) #izqarr
l.append(( pos[0] + 1 , pos[1] - 1)) #izqabj
l.append(( pos[0] - 1, pos[1] + 1)) #derarr
l.append(( pos[0] + 1, pos[1] + 1 )) #deraba
return [e for e in l if e[0] <= 7 and e[0] >= 0 and e[1] <= 7 and e[1] >= 0 ]
def mostrarTurno(self):
if self.turno_ == 1:
print('juega Negro')
if self.turno_ == -1:
print('juega Blanco')
def confInicial(self):
'''define la cofiguraacion inicial'''
turno = -1
for i in range(3,5):
for j in range(3,5):
self.tablero_[i][j].setFicha(turno)
turno = turno *-1
turno = turno *-1
def gameOver(self):
return self.NroFichas == 64
def __str__(self):
strr = '# # # # # # # # ' + '\n'
for i in range(8):
for j in range(8):
strr += str(self.tablero_[i][j]) + ' '
strr += '\n'
strr += '# # # # # # # # ' + '\n'
return strr
"""
def prueba(self):
strr = ''
for i in range(8):
for j in range(8):
strr += str(self.tablero_[i][j])
strr += '\n'
print(strr)"""
|
#!/usr/bin/env python
"""
okdist.py
=============
Used from Opticks okdist- bash functions.
"""
import os, logging, argparse
log = logging.getLogger(__name__)
from opticks.bin.dist import Dist
class OKDist(Dist):
"""
Creates a mostly binary tarball of the Opticks build products
along with some txt files that are needed at runtime. These
include the OpenGL shader sources.
With Geant4 libs and data excluded
305M /home/blyth/local/opticks/Opticks-0.0.0_alpha.tar
"""
exclude_dir_name = [ '.git',
'.hg',
'Geant4-10.2.1',
'Geant4-10.4.2']
exclude_dir_name_prev = [
'cmake', # cmake exported targets and config
'pkgconfig',
]
bases = ['include',
'lib', # order 400 executables
'lib64',
'externals/share/bcm', # cmake infrastructure enhancement
'externals/lib',
'externals/lib64',
'externals/OptiX/lib64',
'externals/glm/glm/glm',
'externals/plog/include',
'externals/include/OpenMesh',
'externals/include/assimp',
'externals/include/YoctoGL',
'externals/include/DualContouringSample',
'installcache/PTX',
'gl', # shaders
'tests', # tree of CTestTestfile.cmake
'integration',
'py', # installed python module tree
'bin',
'opticksaux', # a few example GDML files
'metadata',
'cmake/Modules', # infraastructure for Finding, configuring etc..
]
bases_g4 = [
'externals/config',
'externals/share/Geant4-10.4.2/data', # adds about 1.6G to .tar when included
]
bases_xercesc = [
'externals/include/xercesc',
]
extras = []
def __init__(self, distprefix, distname, include_geant4, include_xercesc ):
extra_bases = []
if include_geant4:
extra_bases += self.bases_g4
pass
if include_xercesc:
extra_bases += self.bases_xercesc
pass
self.include_geant4 = include_geant4
self.include_xercesc = include_xercesc
self.include_optix_big = False
Dist.__init__(self, distprefix, distname, extra_bases)
def exclude_file(self, name):
exclude = False
if name.endswith(".log"):
exclude = True
pass
if name.startswith("libG4OK"): ## Opticks Geant4 interface lib named like g4 libs
exclude = False
elif name.startswith("libG4") and self.include_geant4 == False:
exclude = True
elif name.startswith("libxerces") and self.include_xercesc == False:
exclude = True
elif (name.startswith("liboptix_denoiser") or name.startswith("libcudnn")) and self.include_optix_big == False:
exclude = True
pass
return exclude
if __name__ == '__main__':
parser = argparse.ArgumentParser(__doc__)
parser.add_argument( "--distname", help="Distribution name including the extension, expect .tar or .tar.gz" )
parser.add_argument( "--distprefix", help="Distribution prefix, ie the top level directory structure within distribution file." )
parser.add_argument( "--include_geant4", default=False, action="store_true", help="Include Geant4 libraries and datafiles from the distribution" )
parser.add_argument( "--include_xercesc", default=False, action="store_true", help="Include xercesc libraries and includes in the distribution" )
parser.add_argument( "--level", default="info", help="logging level" )
args = parser.parse_args()
fmt = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
logging.basicConfig(level=getattr(logging,args.level.upper()), format=fmt)
log.info("distprefix %s distname %s " % (args.distprefix, args.distname))
dist = OKDist(args.distprefix, args.distname, include_geant4=args.include_geant4, include_xercesc=args.include_xercesc)
print(dist.large())
|
from pylab import *
from scipy import signal
from scipy.optimize import curve_fit
from . import base as wl
class Light(wl.Fringes):
"""
パラメータ
------------
wl_c : (val) 中心波長[um]
wl_bw : (val) 波長のバンド幅[um]
wl_step : (val) 波長のステップ幅[um]
属性
------------
scale_ : (array) 走査鏡の変位[um]
fringe_ : (array) 干渉縞[um]
envelope_ : (array) 包絡線[um]
"""
def __init__(self, wl_c=1560/1000, wl_bw=25/1000, wl_step=1/1000):
super().__init__()
self.wl_c = wl_c
self.wl_bw = wl_bw
self.wl_step = wl_step
self.wl_list_ = np.arange(wl_c - wl_bw / 2 * 2, (wl_c + wl_step) + wl_bw / 2 * 2, wl_step) # 波長のリスト
@staticmethod
def ref_index_air(wl):
n = -8e-10 * wl + 1.0003
return n
@staticmethod
def ref_index_BK7(wl):
B1 = 1.03961212E+00
B2 = 2.31792344E-01
B3 = 1.01046945E+00
C1 = 6.00069867E-03
C2 = 2.00179144E-02
C3 = 1.03560653E+02
wl2 = wl**2
n = np.sqrt( 1 + B1*wl2 / (wl2 - C1) + B2*wl2 / (wl2 - C2) + B3*wl2 / (wl2 - C3) )
return n
@staticmethod
def phase_shift(wl, material):
params = {}
params['Ag'] = (1.2104, -1.3392, 6.8276, 0.1761)
params['Fe'] = (0.5294, -2.7947, 2.7647, 1.3724)
params['Al'] = (1.3394, -0.6279, 11.297, -1.5539)
params['Au'] = (0.6118, -0.3893, 6.4455, -0.1919)
param = params[material]
n = param[0] * wl + param[1]
k = param[2] * wl + param[3]
phi = np.arctan(-2 * k / (n * n + k * k - 1))
return phi
def I_gauss(self, wl):
sigama2 = (self.wl_bw ** 2) / (8 * np.log(2))
f = np.exp(-((wl - self.wl_c) ** 2) / (2 * sigama2)) / (np.power(2 * np.pi * sigama2, 0.5))
return f
def make_scale(self, scan_len, scan_step):
self.x = np.arange(-scan_len / 2, scan_len / 2 + scan_step, scan_step)
def make_scale_noised(self, jitter, grad):
self.x = jitter * randn(len(self.x)) + (1 + grad) * self.x
def make_fringe_noised(self, noise, drift):
a0 = noise * randn(len(self.x))
a1 = drift / max(self.x) * self.x
self.f = self.f + a0 + a1
def make_fringe(self, l_ref=3000 * 1000, l_bs=0, offset=0, material='BK7'):
"""スケールと干渉縞を作成"""
fringe_list = []
for wl in self.wl_list_:
"""あるwlでの干渉縞を作成"""
print("making fringe")
k_i = 2 * np.pi / wl
intensity = self.I_gauss(wl)
phi_x = k_i * self.x * 2
if material == 'BK7':
phi_r = np.pi
else:
phi_r = self.phase_shift(wl, material) # 反射での位相シフト(ガラス以外)
phi_bs = k_i * (self.ref_index_BK7(wl) - self.ref_index_BK7(self.wl_c)) * l_bs * 2
phi_offset = k_i * offset * 2
phi = list(map(lambda x: x - phi_r - phi_bs - phi_offset + np.pi, phi_x))
fringe = intensity * np.cos(phi)
fringe_list.append(fringe)
print("done")
fringes = np.array(fringe_list)
fringe_total = np.sum(fringes, axis=0) # それぞれの波長での干渉縞を重ね合わせ
self.f = fringe_total / max(fringe_total)
|
import boto3
def create_launch_config(conig_name, image_id, inst_type):
"""
A function to create a launch configuration
"""
client = boto3.client('autoscaling', region_name='ap-south-1')
# creating the launch configuration
response = client.create_launch_configuration(
LaunchConfigurationName=conig_name,
ImageId=image_id,
InstanceType=inst_type,
InstanceMonitoring={
'Enabled': False
},
EbsOptimized=False,
AssociatePublicIpAddress=True,
)
# return the config name for creating auto scale group
return conig_name
def create_auto_scale_group(auto_scale_name):
"""
A function to create auto scale groups
"""
client = boto3.client('autoscaling', region_name='ap-south-1')
# call the launch config func
conf_name = create_launch_config("lc1", 'ami-0d2692b6acea72ee6', 't2.micro')
# creating the auto scale groups
response = client.create_auto_scaling_group(
AutoScalingGroupName=auto_scale_name,
LaunchConfigurationName=conf_name,
MinSize=1,
MaxSize=1,
VPCZoneIdentifier='subnet-9e56ecd2'
)
create_auto_scale_group('as1')
|
from kademlia.network import Server
from nkms.crypto import api as API
from nkms.crypto.constants import NOT_SIGNED, NO_DECRYPTION_PERFORMED
from nkms.crypto.powers import CryptoPower, SigningPower, EncryptingPower
from nkms.network.server import NuCypherDHTServer, NuCypherSeedOnlyDHTServer
class Character(object):
"""
A base-class for any character in our cryptography protocol narrative.
"""
_server = None
_server_class = Server
_actor_mapping = {}
_default_crypto_powerups = None
class NotFound(KeyError):
"""raised when we try to interact with an actor of whom we haven't learned yet."""
def __init__(self, attach_server=True, crypto_power: CryptoPower = None,
crypto_power_ups=[]):
"""
:param attach_server: Whether to attach a Server when this Character is born.
:param crypto_power: A CryptoPower object; if provided, this will be the character's CryptoPower.
:param crypto_power_ups: If crypto_power is not provided, a new CryptoPower will be made and
will consume all of the CryptoPowerUps in this list.
If neither crypto_power nor crypto_power_ups are provided, we give this Character all CryptoPowerUps
listed in their _default_crypto_powerups attribute.
"""
if crypto_power and crypto_power_ups:
raise ValueError("Pass crypto_power or crypto_power_ups (or neither), but not both.")
if attach_server:
self.attach_server()
if crypto_power:
self._crypto_power = crypto_power
elif crypto_power_ups:
self._crypto_power = CryptoPower(power_ups=crypto_power_ups)
else:
self._crypto_power = CryptoPower(self._default_crypto_powerups)
class Seal(object):
"""
Can be called to sign something or used to express the signing public key as bytes.
"""
__call__ = self._crypto_power.sign
def _as_tuple(seal):
return self._crypto_power.pubkey_sig_tuple()
def __iter__(seal):
yield from seal._as_tuple()
def __bytes__(seal):
return self._crypto_power.pubkey_sig_bytes()
def __eq__(seal, other):
return other == seal._as_tuple() or other == bytes(seal)
self.seal = Seal()
def attach_server(self, ksize=20, alpha=3, id=None, storage=None,
*args, **kwargs) -> None:
self._server = self._server_class(ksize, alpha, id, storage, *args, **kwargs)
@property
def server(self) -> Server:
if self._server:
return self._server
else:
raise RuntimeError("Server hasn't been attached.")
@property
def name(self):
return self.__class__.__name__
def learn_about_actor(self, actor):
self._actor_mapping[actor.id()] = actor
def encrypt_for(self, recipient: str, cleartext: bytes, sign: bool = True,
sign_cleartext=True) -> tuple:
"""
Looks up recipient actor, finds that actor's pubkey_enc on our keyring, and encrypts for them.
Optionally signs the message as well.
:param recipient: The character whose public key will be used to encrypt cleartext.
:param cleartext: The secret to be encrypted.
:param sign: Whether or not to sign the message.
:param sign_cleartext: When signing, the cleartext is signed if this is True, Otherwise, the resulting ciphertext is signed.
:return: A tuple, (ciphertext, signature). If sign==False, then signature will be NOT_SIGNED.
"""
actor = self._lookup_actor(recipient)
ciphertext = self._crypto_power.encrypt_for(actor.public_key(EncryptingPower),
cleartext)
if sign:
if sign_cleartext:
signature = self.seal(cleartext)
else:
signature = self.seal(ciphertext)
else:
signature = NOT_SIGNED
return ciphertext, signature
def verify_from(self, actor_whom_sender_claims_to_be: "Character", signature: bytes,
message: bytes, decrypt=False,
signature_is_on_cleartext=False) -> tuple:
"""
Inverse of encrypt_for.
:param actor_that_sender_claims_to_be: A Character instance representing the actor whom the sender claims to be. We check the public key owned by this Character instance to verify.
:param messages: The messages to be verified.
:param decrypt: Whether or not to decrypt the messages.
:param signature_is_on_cleartext: True if we expect the signature to be on the cleartext. Otherwise, we presume that the ciphertext is what is signed.
:return: (Whether or not the signature is valid, the decrypted plaintext or NO_DECRYPTION_PERFORMED)
"""
cleartext = NO_DECRYPTION_PERFORMED
if signature_is_on_cleartext:
if decrypt:
cleartext = self._crypto_power.decrypt(message)
msg_digest = API.keccak_digest(cleartext)
else:
raise ValueError(
"Can't look for a signature on the cleartext if we're not decrypting.")
else:
msg_digest = API.keccak_digest(message)
actor = self._lookup_actor(actor_whom_sender_claims_to_be)
signature_pub_key = actor.seal
sig = API.ecdsa_load_sig(signature)
return API.ecdsa_verify(*sig, msg_digest, signature_pub_key), cleartext
def _lookup_actor(self, actor: "Character"):
try:
return self._actor_mapping[actor.id()]
except KeyError:
raise self.NotFound("We haven't learned of an actor with ID {}".format(actor.id()))
def id(self):
return "whatever actor id ends up being - {}".format(id(self))
def public_key(self, key_class):
try:
return self._crypto_power.public_keys[key_class]
except KeyError:
raise # TODO: Does it make sense to have a specialized exception here? Probably.
class Alice(Character):
_server_class = NuCypherSeedOnlyDHTServer
_default_crypto_powerups = [SigningPower, EncryptingPower]
def find_best_ursula(self):
# TODO: Right now this just finds the nearest node and returns its ip and port. Make it do something useful.
return self.server.bootstrappableNeighbors()[0]
def generate_rekey_frags(self, alice_privkey, bob_pubkey, m, n):
"""
Generates re-encryption key frags and returns the frags and encrypted
ephemeral key data.
:param alice_privkey: Alice's private key
:param bob_pubkey: Bob's public key
:param m: Minimum number of rekey shares needed to rebuild ciphertext
:param n: Total number of rekey shares to generate
:return: Tuple(kfrags, eph_key_data)
"""
kfrags, eph_key_data = API.ecies_ephemeral_split_rekey(
alice_privkey, bob_pubkey, m, n)
return (kfrags, eph_key_data)
class Bob(Character):
_default_crypto_powerups = [SigningPower, EncryptingPower]
class Ursula(Character):
_server_class = NuCypherDHTServer
_default_crypto_powerups = [SigningPower, EncryptingPower]
|
from threading import Thread
from src.utils.templates.workerprocess import WorkerProcess
class MovementControl(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""Controls the speed and steering of the vehicle
Parameters
------------
inPs : list(Pipe)
List of input pipes (not used)
outPs : list(Pipe)
List of output pipes (order does not matter)
"""
self.angle = 0.0
self.speed = 16.5
super(MovementControl,self).__init__(inPs, outPs)
def _init_threads(self):
"""Initialize the read thread to transmite the received messages to other processes.
"""
startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))
self.threads.append(startTh)
sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))
self.threads.append(sendTh)
# ===================================== RUN ==========================================
def run(self):
"""Apply the initializing methods and start the threads
"""
super(MovementControl,self).run()
def stop(self):
self.speed = 0.0
self.angle = 0.0
self._singleUpdate(self.outPs)
super(MovementControl, self).stop()
def _listen_for_steering(self, inP, outPs):
while True:
try:
value = inP.recv()
self.angle = float(value)
self._singleUpdate(outPs)
except Exception as e:
print("Listening error:")
print(e)
def _singleUpdate(self, outPs):
data = {}
if(self.speed != 0):
data['action'] = 'MCTL'
data['speed'] = float(self.speed/100.0)
else:
data['action'] = 'BRAK'
data['steerAngle'] = self.angle
try:
for outP in outPs:
outP.send(data)
except Exception as e:
print(e)
|
""" script to analyze the PSD cut for IBD events and NC events, that pass all cuts (except of PSD cut):
not all events are analyzed like in analyze_PSD_cut.py, but only the events that pass all cuts
(analyzed with analyze_spectrum_v2.py)
For each time window, the TTR values of events that pass all cuts (e.g. TTR_beforePSD_IBDevents_100ns_to_600ns.txt
and TTR_IBDlike_NCevents_100ns_to_600ns.txt) are read and then the IBD suppression is calculated for different NC
suppressions (very similar to analyze_PSD_cut_v2).
"""
import numpy as np
from NC_background_functions import tot_efficiency
# flag if PSD efficiency is independent of energy or not:
PSD_energy_independent = False
""" parameters for tail to total method: """
# INFO-me: parameters should agree with the bin-width of the time window!
# start of the tail in ns:
start_tail = [225.0, 225.0, 225.0, 250.0, 250.0, 250.0, 275.0, 275.0, 275.0, 300.0, 300.0, 300.0,
325.0, 325.0, 325.0, 350.0, 350.0]
# end of the tail in ns:
stop_tail = [600.0, 800.0, 1000.0, 600.0, 800.0, 1000.0, 600.0, 800.0, 1000.0, 600.0, 800.0, 1000.0,
600.0, 800.0, 1000.0, 600.0, 800.0]
# set input path, where TTR values are saved:
input_path = "/home/astro/blum/juno/atmoNC/data_NC/output_detsim_v2/" \
"DCR_results_16000mm_10MeVto100MeV_1000nsto1ms_mult1_1800keVto2550keV_dist500mm_R17700mm_PSD90/"
if PSD_energy_independent:
# if PSD_energy_independent = True: PSD efficiency calculation does not depend on energy of prompt signal!
# loop over the different time windows:
for index in range(len(start_tail)):
print("tail start = {0:.1f} ns".format(start_tail[index]))
print("tail end = {0:.1f} ns".format(stop_tail[index]))
# load TTR values of IBD events, that pass all cuts:
ttr_IBD = np.loadtxt(input_path + "TTR_beforePSD_IBDevents_{0:.0f}ns_to_{1:.0f}ns.txt".format(start_tail[index],
stop_tail[index]))
# load TTR values of NC events, that pass all cuts (IBD-like events):
ttr_NC = np.loadtxt(input_path + "TTR_IBDlike_NCevents_{0:.0f}ns_to_{1:.0f}ns.txt".format(start_tail[index],
stop_tail[index]))
# check the efficiency of PSD for different cut-efficiencies of NC events:
supp_NC = [93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0]
# calculate the IBD suppression and the TTR cut value for all NC suppressions:
for index1 in range(len(supp_NC)):
# calculate IBD suppression and corresponding TTR cut value:
supp_IBD, ttr_cut_value = tot_efficiency(ttr_IBD, ttr_NC, supp_NC[index1])
print(supp_NC[index1])
print(supp_IBD)
print(ttr_cut_value)
else:
# if PSD_energy_independent = False: PSD efficiency calculation does depend on energy of prompt signal!
# loop over the different time windows:
for index in range(len(start_tail)):
print("")
print("tail start = {0:.1f} ns".format(start_tail[index]))
print("tail end = {0:.1f} ns".format(stop_tail[index]))
# load TTR values of IBD events, that pass all cuts:
ttr_IBD = np.loadtxt(input_path + "TTR_beforePSD_IBDevents_{0:.0f}ns_to_{1:.0f}ns.txt".format(start_tail[index],
stop_tail[index]))
# load TTR values of NC events, that pass all cuts (IBD-like events):
ttr_NC = np.loadtxt(input_path + "TTR_IBDlike_NCevents_{0:.0f}ns_to_{1:.0f}ns.txt".format(start_tail[index],
stop_tail[index]))
# load filenumber, evtID and Evis of IBD events that pass all cuts:
IBD_array = np.loadtxt(input_path + "IBD_filenumber_evtID_Evis_pass_all_cuts_wo_PSD.txt")
# load filenumber, evtID and Evis of NC events that pass all cuts (IBD-like events):
NC_array = np.loadtxt(input_path + "atmoNC_filenumber_evtID_Evis_pass_all_cuts_wo_PSD.txt")
# get visible energy of IBD events that pass all cuts:
Evis_IBD = IBD_array[:, 2]
# get visible energy of NC events that pass all cuts:
Evis_NC = NC_array[:, 2]
# preallocate arrays, where TTR values of save depending on their energies:
ttr_IBD_10_20 = []
ttr_IBD_20_30 = []
ttr_IBD_30_40 = []
ttr_IBD_40_100 = []
# loop over ttr_IBD and fill arrays depending on their energy:
for index2 in range(len(ttr_IBD)):
if Evis_IBD[index2] <= 20.0:
ttr_IBD_10_20.append(ttr_IBD[index2])
elif 20.0 < Evis_IBD[index2] <= 30.0:
ttr_IBD_20_30.append(ttr_IBD[index2])
elif 30.0 < Evis_IBD[index2] <= 40.0:
ttr_IBD_30_40.append(ttr_IBD[index2])
else:
ttr_IBD_40_100.append(ttr_IBD[index2])
# preallocate arrays, where TTR values of save depending on their energies:
ttr_NC_10_20 = []
ttr_NC_20_30 = []
ttr_NC_30_40 = []
ttr_NC_40_100 = []
# loop over ttr_NC and fill arrays depending on their energy:
for index2 in range(len(ttr_NC)):
if Evis_NC[index2] <= 20.0:
ttr_NC_10_20.append(ttr_NC[index2])
elif 20.0 < Evis_NC[index2] <= 30.0:
ttr_NC_20_30.append(ttr_NC[index2])
elif 30.0 < Evis_NC[index2] <= 40.0:
ttr_NC_30_40.append(ttr_NC[index2])
else:
ttr_NC_40_100.append(ttr_NC[index2])
# check the efficiency of PSD for different cut-efficiencies of NC events:
# supp_NC = [95.0, 97.0, 98.0, 99.0, 99.9]
supp_NC = [99.99]
# calculate the IBD suppression and the TTR cut value for all NC suppressions:
for index1 in range(len(supp_NC)):
# calculate IBD suppression and corresponding TTR cut value:
# supp_IBD_10_20, ttr_cut_value_10_20 = tot_efficiency(ttr_IBD_10_20, ttr_NC_10_20, supp_NC[index1])
# print("10 MeV to 20 MeV:")
# print(supp_NC[index1])
# print(supp_IBD_10_20)
# print(ttr_cut_value_10_20)
# calculate IBD suppression and corresponding TTR cut value:
# supp_IBD_20_30, ttr_cut_value_20_30 = tot_efficiency(ttr_IBD_20_30, ttr_NC_20_30, supp_NC[index1])
# print("20 MeV to 30 MeV:")
# print(supp_NC[index1])
# print(supp_IBD_20_30)
# print(ttr_cut_value_20_30)
# calculate IBD suppression and corresponding TTR cut value:
# supp_IBD_30_40, ttr_cut_value_30_40 = tot_efficiency(ttr_IBD_30_40, ttr_NC_30_40, supp_NC[index1])
# print("30 MeV to 40 MeV:")
# print(supp_NC[index1])
# print(supp_IBD_30_40)
# print(ttr_cut_value_30_40)
# calculate IBD suppression and corresponding TTR cut value:
supp_IBD_40_100, ttr_cut_value_40_100 = tot_efficiency(ttr_IBD_40_100, ttr_NC_40_100, supp_NC[index1])
# print("40 MeV to 100 MeV:")
print(supp_NC[index1])
print(supp_IBD_40_100)
print(ttr_cut_value_40_100)
print("")
|
# Generated by Django 2.1.3 on 2019-03-02 07:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0011_auto_20190225_1931'),
]
operations = [
migrations.AddField(
model_name='companystuff',
name='role',
field=models.IntegerField(default=0),
),
]
|
from rest_framework import serializers
from api.models import APIInfo
class APIInfoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = APIInfo
fields = "__all__"
class APISerializer(serializers.ModelSerializer):
class Meta:
model = APIInfo
fields = "__all__"
|
from datetime import datetime, timedelta
from substrateinterface import SubstrateInterface
from substrateinterface.extensions import SubstrateNodeExtension
import logging
logging.basicConfig(level=logging.DEBUG)
substrate = SubstrateInterface(url="wss://rpc.polkadot.io")
substrate.register_extension(SubstrateNodeExtension(max_block_range=100))
# Search for block number corresponding a specific datetime
block_datetime = datetime(2022, 1, 1, 0, 0, 0)
block_number = substrate.extensions.search_block_number(block_datetime=block_datetime)
print(f'Block number for {block_datetime}: #{block_number}')
# account_info = substrate.runtime.
# exit()
# Returns all `Balances.Transfer` events from the last 30 blocks
events = substrate.extensions.filter_events(pallet_name="Balances", event_name="Transfer", block_start=-30)
print(events)
# All Timestamp extrinsics in block range #3 until #6
extrinsics = substrate.extensions.filter_extrinsics(pallet_name="Timestamp", block_start=3, block_end=6)
print(extrinsics)
|
import warnings
import logging
import sklearn
from asl_data import SinglesData
logging.basicConfig(level=logging.INFO)
def recognize(models: dict, test_set: SinglesData):
""" Recognize test word sequences from word models set
:param models: dict of trained models
{'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...}
:param test_set: SinglesData object
:return: (list, list) as probabilities, guesses
both lists are ordered by the test set word_id
probabilities is a list of dictionaries where each key a word and value is Log Liklihood
[{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
]
guesses is a list of the best guess words ordered by the test set word_id
['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
probabilities = []
guesses = []
# go through each item in the test set one by one
test_sequences = list(test_set.get_all_Xlengths().values())
for test_X, test_Xlength in test_sequences:
logL = float("-inf")
logLDict = {}
for word, model in models.items():
#score the current word in test_set
try:
if model == None:
continue
logL = model.score(test_X, test_Xlength)
logLDict[word] = logL
except RuntimeWarning as rw:
logging.warning('Recognizer: RuntimeWarning : %s', rw)
except ValueError as ve:
logging.warning('Recognizer: ValueError : %s', ve)
#append the dict
probabilities.append(logLDict)
logging.info('length of logLDict=%d', len(logLDict))
max_logl_word = max(logLDict,key=logLDict.get)
# the one with maximum LogL gets added to guesses
guesses.append(max_logl_word)
return probabilities, guesses
|
from abc import ABC, abstractmethod
class Instruction(ABC):
def __init__(self, line, column):
self.line = line
self.column = column
self.array = False
super().__init__()
@abstractmethod
def interpreter(self, tree, table):
pass
@abstractmethod
def getNode(self):
pass
|
'''
Utility methods allowing to reconfigure UNICORE components easily.
Low level interface, rather not used directly in configurators.
@organization: ICM UW
@author: K.Benedyczak @: golbi@icm.edu.pl
@author: R.Kluszczynski @: klusi@icm.edu.pl
'''
import tempfile, os
import sys
import stat
import shutil
import re
from datetime import datetime
from lxml import etree
def info(options, msg):
''' Prints information message to stdout '''
if (options.quiet):
return
print options.indent + msg
def error(msg):
''' Prints error message to stderr '''
print >> sys.stderr, "ERROR " + msg
def fatal(msg):
''' Prints fatal message to stderr and exits '''
print >> sys.stderr, "FATAL " + msg
sys.exit(1)
def getConfigRoot(options, component):
"""Returns a configuration root directory, depending on installation type"""
if options.manualConfigurationRoot != None:
if os.path.isdir(options.manualConfigurationRoot + '/' + component):
return options.manualConfigurationRoot + '/' + component + '/'
return options.manualConfigurationRoot
if options.systemInstall:
return options.configsRoot + component + '/'
else:
return options.configsRoot + component + '/conf/'
def getFile(options, component, cfgFile):
"""Returns a configuration file path, depending on installation type"""
if re.match('^/|^[a-zA-Z]:', cfgFile):
return cfgFile
else:
return getConfigRoot(options, component) + cfgFile
def copyPreserve(options, src, dest, moveSrc, preserveSrc):
'''
Copy a file with respect to permissions.
If '--dry-run' is used leave it immediately.
'''
if options.dry == True:
return
if preserveSrc == True:
st = os.stat(src)
else:
if os.path.isfile(dest):
st = os.stat(dest)
else:
st = None
if moveSrc == True:
shutil.move(src, dest)
if st != None:
__copyFileMetadata(dest, st)
else:
shutil.copy(src, dest)
__copyFileMetadata(dest, st)
def __copyFileMetadata(destFilename, srcStatData):
''' Copy permissions and owner of a file '''
os.chown(destFilename, srcStatData[stat.ST_UID], srcStatData[stat.ST_GID])
os.chmod(destFilename, srcStatData[stat.ST_MODE])
def backupFile(options, filename):
'''
Backups a file if there is no existing one.
If '--always-backup' is used stores it with a timestamp.
If '--no-backup' is used leave it immediately.
'''
if options.backup == False:
return
if not os.path.isfile(filename):
return
originFile = filename + '_origin'
#if not exists, make a copy of the original config file
if not os.path.isfile(originFile):
info(options, "Making backup of %s to %s" % (filename, originFile))
copyPreserve(options, filename, originFile, False, True)
else:
if options.backupAlways:
timestampName = filename + options.backupsuffix
info(options, "File '%s' exists. Saving backup as %s" % (originFile, timestampName))
copyPreserve(options, filename, timestampName, False, True)
else:
info(options, "File '%s' exists. Skipping backup..." % originFile)
def loadXMLDocumentFromFile(options, filename, stripComments = False):
'''
Loads XML document from file and return it.
@return: XML document (etree.XMLElementTree object)
'''
info(options, 'File ' + filename)
doc = etree.parse(filename, __getXMLParser(options, stripComments))
return doc
def writeXMLDocument(options, xmlDoc, filename = None):
'''
Writes XML document to file descriptor (default is standard output).
'''
if filename == None:
fd = sys.stdout
else:
fh, tmpFilename = tempfile.mkstemp()
fd = os.fdopen(fh, 'w')
xmlDoc.write(fd, encoding = 'utf-8', method = 'xml', pretty_print = True, xml_declaration = True)
if filename != None:
fd.close()
copyPreserve(options, tmpFilename, filename, True, False)
if options.dry == True: os.unlink(tmpFilename)
def setXMLElementAttribute(options, xmlDoc, xPath, name, value, nsPrefixMap = {}):
'''
Sets one attribute value of a XML document's tag.
@param options: global options
@param doc: XML document (etree.XMLElementTree object)
@param xPath: XPath determining element
@param name: attribute's name
@param value: attribute's value
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
__setXMLElementAttributes(options, xmlDoc, xPath, { name : value }, nsPrefixMap)
def setXMLElementAttributes(options, xmlDoc, xPath, newAttrsValues = {}, nsPrefixMap = {}):
__setXMLElementAttributes(options, xmlDoc, xPath, newAttrsValues, nsPrefixMap)
def __setXMLElementAttributes(options, xmlDoc, elementXPath, newAttrsValues, nsPrefixMap = {}):
'''
Sets attributes values of a XML document's tag or add them if not exists.
@param options: global options
@param doc: XML document (etree.XMLElementTree object)
@param xPath: XPath determining element
@param newAttrsValues: dictionary containing attributes names and values
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
result = xmlDoc.xpath(elementXPath, namespaces = nsPrefixMap)
if len(result) > 0:
if len(result) > 1:
info(options, "More then 1 element under '" + elementXPath + "'. Changing only the first one.")
element = result[0]
for attribute in newAttrsValues:
oldVal = element.get(attribute)
newVal = newAttrsValues[attribute]
if oldVal != newVal:
element.set(attribute, newVal)
if oldVal == None:
oldVal = 'NONE'
info(options, " - setting " + elementXPath + " attribute '" + attribute + "' to '" + newVal + "' (old value: '" + oldVal + "')")
else:
error("No elements under '" + elementXPath + "'.")
def checkXPathExistence(options, xmlDoc, elementXPath, nsPrefixMap = {}):
'''
Check if XPath refers to at least one element in XML document.
@param options: global options
@param doc: XML document (etree.XMLElementTree object)
@param elementXPath: XPath string
@param nsPrefixMap: prefix map of namespaces used in XPath
@return: True if exists at least one element pointed by XPath expression, otherwise False
'''
return len(xmlDoc.xpath(elementXPath, namespaces = nsPrefixMap)) > 0
def addAdditionalXMLs(options, xmlDoc, stringElements2Add, addAfterXPath = None, nsPrefixMap = {}):
'''
Adds additional XML elements (put by strings) to the document.
@param options: global options
@param filename: name of file
@param stringElements2Add: list of strings containing parts of XML
@param addAfterXPath: XPath string refers to an element after which new elements should be added
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
if addAfterXPath:
stringElements2Add = reversed(stringElements2Add)
for newXMLElement in stringElements2Add:
__addAdditionalXML(options, xmlDoc, newXMLElement, addAfterXPath, nsPrefixMap)
def addAdditionalXML(options, xmlDoc, stringElement2Add, addAfterXPath = None, nsPrefixMap = {}):
__addAdditionalXML(options, xmlDoc, stringElement2Add, addAfterXPath, nsPrefixMap)
def __addAdditionalXML(options, doc, stringElement2Add, addAfterXPath = None, nsPrefixMap = {}):
'''
Adds additional XML element to the document.
@param options: global options
@param doc: XML document (etree.XMLElementTree object)
@param stringElement2Add: string containing part of XML
@param addAfterXPath: XPath string refers to an element after which stringElement2Add should be added
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
matcher = re.match("<!--(.*)-->", stringElement2Add)
if matcher == None:
el = etree.fromstring(stringElement2Add, __getXMLParser(options))
else:
strip = matcher.group(1)
el = etree.Comment(strip)
addAfterElement = None
if addAfterXPath != None and addAfterXPath != '':
result = doc.xpath(addAfterXPath, namespaces = nsPrefixMap)
if len(result) > 0:
addAfterElement = result[0]
if addAfterElement == None:
doc.getroot().append(el)
else:
addAfterElement.addnext(el)
def removeXPathElements(options, xmlDoc, xPathExpression, nsPrefixMap = {}):
'''
Removes elements pointed by XPath expression.
@param options: global options
@param xmlDoc: XML document (etree.XMLElementTree object)
@param xPathExpression: XPath expression
@param nsPrefixMap: prefix map of namespaces used in XPath
'''
elements = xmlDoc.xpath(xPathExpression, namespaces = nsPrefixMap)
if len(elements) > 0:
parent = elements[0].getparent()
if parent != None:
for e in elements:
parent.remove(e)
info(options, " - removed element at '" + xPathExpression + "'")
else:
info(options, " - can not remove XML root element")
else:
info(options, " - nothing to remove at '" + xPathExpression + "'")
def __getXMLParser(options, noComments = False):
'''
Gets XMLParser object for parsing XML documents.
@param options: global options
@return: XML parser (etree.XMLParser object)
'''
if options.parser == None:
options.parser = etree.XMLParser(remove_blank_text = True, remove_comments = noComments)
return options.parser
def setJavaProperties(options, filename, propsDict):
processJavaProperties(options, filename, propsDict, "value")
def processJavaProperties(options, filename, propsDict, mode):
'''
Sets property, if it finds commented one it replaces the first occurrence.
The rest, if detected, will be commented out. The mode of operation
can be changed to comment out properties or to change keys.
@param options: global options
@param filename: filename to change
@param propsDict: dictionary with java properties to set
@param mode: for 'value' -> updates property value
for 'key' -> updates property key
for 'comment' -> comments the property
@todo: handle keys with spaces
@todo: handle comments after key & value pair
'''
info(options, "Updating properties file '%s'" % filename)
fd, tmpFilename = tempfile.mkstemp()
tmpFile = os.fdopen(fd, 'w')
propertiesFile = file(filename, "r")
previousLines = ''
for line in propertiesFile:
strippedLine = previousLines + line.strip()
if len(strippedLine) > 0:
#check a multiline
if strippedLine[-1] == '\\':
previousLines = strippedLine[:-1]
continue
else:
previousLines = ''
#strip comment characters at the begging if exists
comment = re.match('[#!]+', strippedLine)
if comment != None:
propDef = strippedLine[comment.end():].strip()
else:
propDef = strippedLine
name, value = __getNameAndValueOfPropertyLine(propDef)
if name in propsDict:
newValue = propsDict[name]
if mode == "value":
#check if it was already used
if newValue != None:
if value != newValue:
info(options, " - setting '" + name + "' to '" + newValue + "' (old value: '" + value + "')")
strippedLine = name + '=' + newValue
propsDict[name] = None
else:
if not re.match('[#!]', strippedLine):
strippedLine = '# ' + strippedLine
elif mode == "key":
info(options, " - converting '" + name + "' to '" + newValue + "'")
if not re.match('[#!]', strippedLine):
strippedLine = newValue + '=' + value
else:
strippedLine = "#" + newValue + '=' + value
elif mode == "comment":
if not re.match('[#!]', strippedLine):
info(options, " - commenting the " + name + "=" + value + " entry " + newValue)
strippedLine = "#"+strippedLine
if newValue != "":
strippedLine = "# !UNICORE CONFIGURATION UPDATE MESSAGE! "+newValue+"\n"+strippedLine
info(options, "WARNING " + newValue)
tmpFile.write(strippedLine + '\n')
#closing original file
if previousLines != '':
tmpFile.write("# " + previousLines + '\n')
propertiesFile.close()
if mode == "value":
#adding remaining variables
firstOne = True
for name in sorted(propsDict.keys()):
newValue = propsDict[name]
if newValue != None:
if firstOne:
tmpFile.write('\n## Added new properties by configurator (' + str(datetime.now()) + ')\n')
firstOne = False
tmpFile.write(name + '=' + newValue + '\n')
info(options, " - setting new property '" + name + "' to '" + newValue + "'")
#closing temporary file
tmpFile.close()
copyPreserve(options, tmpFilename, filename, True, False)
if options.dry == True: os.unlink(tmpFilename)
def getJavaProperty(filename, name):
if not os.path.isfile(filename):
return None;
propertiesFile = file(filename, "r")
previousLines = ''
for line in propertiesFile:
strippedLine = previousLines + line.strip()
if len(strippedLine) > 0:
#check a multiline
if strippedLine[-1] == '\\':
previousLines = strippedLine[:-1]
continue
else:
previousLines = ''
#strip comment characters at the begging if exists
comment = re.match('[#!]+', strippedLine)
if comment != None:
continue
else:
propDef = strippedLine
key, value = __getNameAndValueOfPropertyLine(propDef)
if (key == name):
propertiesFile.close()
return value
propertiesFile.close()
return None
def getJavaPropertyKeys(filename):
R = []
if not os.path.isfile(filename):
return R;
propertiesFile = file(filename, "r")
previousLines = ''
for line in propertiesFile:
strippedLine = previousLines + line.strip()
if len(strippedLine) > 0:
#strip comment characters at the begging if exists
comment = re.match('[#!]+', strippedLine)
if comment != None:
continue
else:
propDef = strippedLine
#check a multiline
if strippedLine[-1] == '\\':
previousLines = strippedLine[:-1]
continue
else:
previousLines = ''
key, value = __getNameAndValueOfPropertyLine(propDef)
R.append(key)
propertiesFile.close()
return R
def setShellVariables(options, filename, varsDict):
'''
Sets config variable, if it finds commented one it replaces the first occurrence.
@param options: global options
@param filename: filename to change
@param propsDict: dictionary with shell variables to set
@todo: handle mutlilines (when line ends wiht backslash)
'''
info(options, "Updating shell variables file '%s'" % filename)
fd, tmpFilename = tempfile.mkstemp()
tmpFile = os.fdopen(fd, 'w')
configFile = file(filename, 'r')
for line in configFile:
spacesPrefix = ''
matchPrefixSpaces = re.match('[ \t]*', line)
if matchPrefixSpaces != None:
spacesPrefix = line[0:matchPrefixSpaces.end()]
strippedLine = line.strip()
if len(strippedLine) > 0:
''' strip comment characters at the begging it exists '''
comment = re.match('#+', strippedLine)
if comment != None:
propDef = strippedLine[comment.end():].strip()
else:
propDef = strippedLine
name, value = __getNameAndValueOfPropertyLine(propDef)
if name in varsDict:
newValue = varsDict[name]
''' check if it was already used '''
if newValue != None:
if value != newValue:
info(options, " - setting '" + name + "' to '" + newValue + "' (old value: '" + value + "')")
strippedLine = name + '=' + newValue
varsDict[name] = None
else:
if strippedLine[0] != '#':
strippedLine = '# ' + strippedLine
tmpFile.write(spacesPrefix + strippedLine + '\n')
''' closing original file '''
configFile.close()
''' adding remaining variables '''
firstOne = True
for name in varsDict.iterkeys():
newValue = varsDict[name]
if newValue != None:
if firstOne:
tmpFile.write('\n## Added new variables by configurator (' + str(datetime.now()) + ')\n')
firstOne = False
tmpFile.write(name + '=' + newValue + '\n')
info(options, " - setting new property '" + name + "' to '" + newValue + "'")
''' closing temporary file '''
tmpFile.close()
copyPreserve(options, tmpFilename, filename, True, False)
if options.dry == True: os.unlink(tmpFilename)
def __getNameAndValueOfPropertyLine(strippedLine):
'''
Gets name and value from stripped line of properties/shell config file.
Based on:
http://www.linuxtopia.org/online_books/programming_books/python_programming/python_ch34s04.html
'''
keyDelimeterChars = ':='
# keyDelimeterChars = ':= '
# if strippedLine.find(':') >= 0 or strippedLine.find('=') >= 0:
# keyDelimeterChars = ':='
punctuation = [ strippedLine.find(c) for c in keyDelimeterChars ] + [ len(strippedLine) ]
found = min([ pos for pos in punctuation if pos != -1 ])
name = strippedLine[:found].rstrip()
value = strippedLine[found:].lstrip(keyDelimeterChars).rstrip()
return (name, value)
def appendLinesIfNotAlreadyExist(options, filename, linesList):
'''
Appends a line to a file if not exists so far.
'''
info(options, "Appending lines to file '%s'" % filename)
linesSet = set(linesList)
configFile = file(filename, 'r')
for line in configFile:
strippedLine = line.strip()
if strippedLine in linesSet:
linesSet.remove(strippedLine)
info(options, " - line already exists: " + strippedLine)
configFile.close()
''' adding left lines '''
if len(linesSet) > 0:
try:
if options.dry:
fd = None
else:
fd = open(filename, 'a')
firstOne = True
for line in linesList:
if line in linesSet:
if fd != None:
if firstOne :
fd.write('\n## Added new lines by configurator (' + str(datetime.now()) + ')\n')
firstOne = False
fd.write(line + '\n')
info(options, " - line appended: " + line)
if fd != None: fd.close()
except IOError, (errno, strerror):
error("Processing configuration " + file + ": " + "I/O error(%s): %s" % (errno, strerror))
def removeLinesIfExist(options, filename, linesList):
'''
Removes existing lines.
'''
info(options, "Removing lines from file file '%s'" % filename)
linesSet = set(linesList)
fd, tmpFilename = tempfile.mkstemp()
tmpFile = os.fdopen(fd, 'w')
configFile = file(filename, 'r')
for line in configFile:
rightStrippedLine = line.rstrip()
if not rightStrippedLine in linesSet:
tmpFile.write(rightStrippedLine + '\n')
else:
info(options, ' - removing line:' + rightStrippedLine)
configFile.close()
tmpFile.close()
copyPreserve(options, tmpFilename, filename, True, False)
if options.dry == True: os.unlink(tmpFilename)
def replaceFileLines(options, filename, linesDict):
'''
Replaces a line in a file.
'''
info(options, "Replacing lines in file '" + filename + "'")
fd, tmpFilename = tempfile.mkstemp()
tmpFile = os.fdopen(fd, 'w')
configFile = file(filename, 'r')
for line in configFile:
rightStrippedLine = line.rstrip()
if rightStrippedLine in linesDict:
info(options, " - replacing line '%s' with '%s'" % (rightStrippedLine, linesDict[rightStrippedLine]))
tmpFile.write(linesDict[rightStrippedLine] + '\n')
else:
tmpFile.write(rightStrippedLine + '\n')
''' closing original file '''
configFile.close()
''' closing temporary file '''
tmpFile.close()
copyPreserve(options, tmpFilename, filename, True, False)
if options.dry == True: os.unlink(tmpFilename)
|
# 0709 - 조별실습
import csv
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(6, 128)
self.batch_norm1 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 32)
self.fc3 = nn.Linear(32, 16)
self.fc_final = nn.Linear(16, 7)
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
x = self.batch_norm1(x)
x = self.fc2(x)
x = func.relu(x)
x = self.fc3(x)
# x = self.fc4(x)
# x = self.fc5(x)
x = self.fc_final(x)
x = self.sig(x)
return x
class CustomDataset(Dataset):
def __init__(self, data, label, transforms=None):
self.x = data
self.y = label
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x = self.x[idx]
y = self.y[idx]
x = np.array(x)
y = np.array(y)
return x, y
# [데이터 로드]
fish_header = []
fish_data = []
with open('Fish.csv', mode='r', encoding='utf-8-sig') as fish_csv:
rdr = csv.reader(fish_csv)
header = True
for line in rdr:
if header:
for item in line:
fish_header.append(item)
header = False
else:
fish_data.append(line)
# print(fish_header)
# print('----')
# print(fish_data[:4])
# [데이터 전처리]
fish_data = np.array(fish_data)
# # feature/label 분리
fish_X = fish_data[:, 1:]
fish_X = np.array(fish_X, dtype=float)
fish_y = fish_data[:, 0]
# feature 부호화
species_set = set()
for sp in fish_y:
species_set.add(sp) # 중복제거
species_list = list(species_set)
species_list.sort()
fish_y = np.array([species_list.index(sp) for sp in fish_y])
print(species_list)
plt.rc('font', family='SeoulNamsan')
# 상관관계: 무게 vs 종
plt.xlabel('weight')
plt.scatter(fish_X[:, 0], fish_y, marker='o', alpha=0.4)
plt.yticks(list(range(7)), species_list)
plt.show()
# 상관관계: 길이 vs 길이 by 종
sc = plt.scatter(x=fish_X[:, 2] - fish_X[:, 1], y=fish_X[:, 3] - fish_X[:, 2], marker='o', s=10, alpha=0.4, c=fish_y,
cmap=plt.cm.get_cmap('rainbow', 7))
plt.xlabel('L2 - L1')
plt.ylabel('L3 - L2')
plt.legend(*sc.legend_elements())
plt.show()
# 상관관계: 너비 vs 높이 by 종
sc = plt.scatter(x=fish_X[:, 5], y=fish_X[:, 4], marker='o', s=10, alpha=0.4, c=fish_y,
cmap=plt.cm.get_cmap('rainbow', 7))
plt.xlabel('width')
plt.ylabel('height')
plt.legend(*sc.legend_elements())
plt.show()
x_train, x_test, y_train, y_test = \
train_test_split(fish_X, fish_y, test_size=1 / 5, random_state=20210708, shuffle=True, stratify=fish_y)
if torch.cuda.is_available():
print('Using CUDA')
device = torch.device('cuda')
else:
print('Using CPU')
device = torch.device('cpu')
# [선언]
model = Net().to(device)
criterion = nn.CrossEntropyLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer = optim.Adam(model.parameters(), lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=256, gamma=0.2)
# [학습]
max_epoch = 4096
save_step = 50
train_batch_size = 24
train_dataset = CustomDataset(data=x_train, label=y_train)
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True, drop_last=True)
train_loss_dict = {}
os.makedirs('./models', exist_ok=True)
do_train = True # False 시 학습 생략)
if do_train:
os.system('echo y | del models')
print('Waiting for files to be deleted...')
time.sleep(5)
model.train() # 학습 모드
for i in range(max_epoch):
total_loss = 0
for x, y in train_loader:
x = x.float().to(device)
y = y.long().to(device)
outputs = model(x)
loss = criterion(outputs, y)
optimizer.zero_grad()
loss.backward()
total_loss += loss.item()
outputs = outputs.detach()
outputs = outputs.numpy()
y = y.numpy()
avg_loss = total_loss / len(x_train)
if i % save_step == 0:
print(f"epoch -> {i:>4} loss -- > ", avg_loss)
torch.save(model.state_dict(), f'./models/fish_model_{i:04d}.pth') # 모델 저장
elif i == max_epoch - 1:
print(f"epoch -> {i:>4}(final) loss -- > ", avg_loss)
torch.save(model.state_dict(), './models/fish_model_last.pth') # 모델 저장
train_loss_dict[i] = avg_loss
optimizer.step()
# [평가]
def test_model(model, criterion, saved_model, x_test, y_test):
model.eval() # 평가 모드
model.load_state_dict(torch.load(saved_model)) # 저장된 모델 로드
test_dataset = CustomDataset(data=x_test, label=y_test)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
score = 0
total_loss = 0
for x, y in test_loader:
x = x.float().to(device)
y = y.long().to(device)
outputs = model(x)
loss = criterion(outputs, y)
total_loss += loss.item()
y = y.numpy()
top = torch.argmax(outputs, dim=1) # 가장 높은 값의 index
# print(top, y)
# print(top_idx)
for _y, t in zip(y, top):
if _y == t:
score += 1
accuracy = score / len(x_test)
avg_loss = total_loss / len(x_test)
return accuracy, avg_loss
test_accu_dict = {}
test_loss_dict = {}
for saved_model in os.listdir('./models'):
result = test_model(model, criterion, f'./models/{saved_model}', x_test, y_test)
print(saved_model, f'accuracy {result[0] * 100:>.3f}%, avg loss {result[1]:>.6f}')
# fish_model_last.pth
i = saved_model[11:15]
if i == 'last':
i = max_epoch - 1
else:
i = int(i)
test_accu_dict[i] = result[0]
test_loss_dict[i] = result[1]
# exit()
# 결과발표
train_loss_x, train_loss_y = list(train_loss_dict.keys()), list(train_loss_dict.values())
test_loss_x, test_loss_y = list(test_loss_dict.keys()), list(test_loss_dict.values())
test_accu_x, test_accu_y = list(test_accu_dict.keys()), list(test_accu_dict.values())
test_accu_y = [y * 100 for y in test_accu_y]
# max accu
accu_max = -1
epoch_at_accu_max = []
for ep, ac in zip(test_accu_x, test_accu_y):
if ac > accu_max:
accu_max = ac
epoch_at_accu_max = [ep]
elif ac == accu_max:
epoch_at_accu_max.append(ep)
print('----')
print(f'max accuracy: {accu_max}% @ Epoch{"" if len(epoch_at_accu_max) == 1 else "s"} {epoch_at_accu_max}')
# plot
fig, ax1 = plt.subplots()
fig.suptitle(f'{max_epoch} epochs, {train_batch_size} batch-size, {accu_max}% max accuracy')
ax2 = ax1.twinx()
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss', color='k')
ax1.plot(train_loss_x, train_loss_y, 'g-', linewidth=2, label='Train loss')
ax1.plot(test_loss_x, test_loss_y, 'r-', linewidth=2, label='Test loss')
ax2.set_ylim(-5, accu_max + 5)
ax2.set_ylabel('accuracy (%)', color='b')
ax2.plot(test_accu_x, test_accu_y, 'b--', linewidth=1, label='Test accuracy(%)')
for ep in epoch_at_accu_max:
ax2.scatter(ep, accu_max, marker='*', color='gold')
# ax2.text(ep + 5, accu_max, ep, fontsize=10)
ax1.legend(loc='best')
plt.show()
|
from django.contrib import admin
import xadmin
from .models import CustomSelection,CustomBacktest
# Register your models here.
@xadmin.sites.register(CustomSelection)
class CustomSelectionAdmin(object):
pass
@xadmin.sites.register(CustomBacktest)
class CustomBacktestAdmin(object):
pass
|
# -*- coding: utf-8 -*-
REQUIRED_INPUT_INFO = {
"target": "Target temperature mode value.",
"logs": [
{
"file": "*** SEND A LOG FILE AS BASE64 STRING ***",
"sensors_count": "How many sensors to parse from the file.",
}
],
"sensors_total": "How many sensors to use in test logic.",
"slice_length": "The number of iterations to use in test logic.",
"round_to": "All values will be rounded to the given number of digits.",
"cp": "Control point values as a list.",
"md": "Measurement device values as a list.",
"max_deviation": "Maximum allowed deviation to pass the test."
}
|
from spotibot.base.auth import OAuth
|
def vowel_shift(text, n):
if not text:
return text
non_vowels = []
only_vowels = []
vowels = set('aeiouAEIOU')
for a in text:
if a in vowels:
only_vowels.append(a)
non_vowels.append('{}')
else:
non_vowels.append(a)
if not only_vowels:
return text
mod = n % len(only_vowels)
return ''.join(non_vowels).format(*only_vowels[-mod:] + only_vowels[:-mod])
|
# ==============================================================================
# Copyright (c) 2019, Deutsches HörZentrum Hannover, Medizinische Hochschule Hannover
# Author: , Waldo Nogueira (NogueiraVazquez.Waldo@mh-hannover.de), Hanna Dolhopiatenko (Dolhopiatenko.Hanna@mh-hannover.de)
# All rights reserved.
# ==============================================================================
'''This code represents decission architecture. Only used the lagged EEG as input without speech envelopes'''
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten,BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint
import scipy.io as io
import random
from LoadData2 import loadData2
from tensorflow.keras import optimizers
from math import floor
from math import ceil
'''Create model'''
def createModel():
model = Sequential()
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Dense(n_hidden**(5), input_shape=(Window10s, (numChans+2)*2), activation='relu', use_bias=True))
model.add(Dropout(dropout))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Dense(n_hidden**(4), activation='relu', use_bias=True))
model.add(Dropout(dropout))
model.add(Dense(2, activation='tanh'))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
'''Define necessary parameters'''
fs=64
n_hidden=2
dropout=0.25
Window10s=640
numBlocks=288 #EEG Signal has 288 blocks, 10s each
'''############## MAIN CODE ################'''
for Subject in range(1,2):
workingDir='E:/HomeOffice/CodeforGitHub' #Provide your own working path here
'''Load Dataset'''
a, b, chans, transfer1=loadData2(Subject)
numChans=len(chans)
numSides=2
transfer2 =io.loadmat(workingDir+'/wav.mat');
LAGS15 = [250]
eegData=np.zeros((numBlocks,Window10s,numChans+2),dtype=np.float32)
targetAudio=np.zeros((numBlocks,Window10s,numSides),dtype=np.float32)
eegDataTMP=np.zeros((Window10s,numChans),dtype=np.float32)
eegDataTMP2=np.zeros((Window10s,numChans+1),dtype=np.float32)
envTMPA = np.zeros((Window10s,1),dtype=np.float32)
envTMPU = np.zeros((Window10s,1),dtype=np.float32)
'''Split Dataset in 288 blocks'''
for block in range(numBlocks):
eegDataTMP=transfer1['eeg'][block*Window10s:(block+1)*Window10s,:]
blockES = int(np.floor(block/6))
if a[blockES]==1:
envTMPA = transfer2["EnvA"][block*Window10s:(block+1)*Window10s,0]
envTMPU = transfer2["EnvU"][block*Window10s:(block+1)*Window10s,0]
else:
envTMPA = transfer2["EnvU"][block*Window10s:(block+1)*Window10s,0]
envTMPU = transfer2["EnvA"][block*Window10s:(block+1)*Window10s,0]
eegDataTMP2 = np.concatenate((eegDataTMP,envTMPA[:,None]),axis=1)
eegData[block,:,:] = np.concatenate((eegDataTMP2,envTMPU[:,None]),axis=1)
targetAudio[block,:,0]=np.ones((Window10s),dtype=np.float32)
targetAudio[block,:,1]=np.zeros((Window10s ),dtype=np.float32)
''' Choose random blocks for Training/Validation/Testing'''
leaveBlocks= random.sample(range(287), 144)
leaveValidat=leaveBlocks[:72]
leaveTest=leaveBlocks[72:]
'''Training Dataset'''
trainingDataEEG=np.zeros(((numBlocks-2),Window10s,numChans+2))
trainingDataAudioA=np.zeros(((numBlocks-2),Window10s,1))
trainingDataAudioU=np.zeros(((numBlocks-2),Window10s,1))
trainingDataEEGlagged=np.zeros(((numBlocks-2),Window10s,numChans+2))
'''Validation Set'''
develDataEEG=np.zeros(((numBlocks-287),Window10s,numChans+2))
develDataAudioA=np.zeros(((numBlocks-287),Window10s,1))
develDataAudioU=np.zeros(((numBlocks-287),Window10s,1))
'''Testing Set'''
testDataEEG=np.zeros(((numBlocks-287),Window10s,numChans+2))
testDataAudioA=np.zeros(((numBlocks-287),Window10s,1))
testDataAudioU=np.zeros(((numBlocks-287),Window10s,1))
StartLagTrain=np.zeros(((numBlocks-2),Window10s,numChans+2))
EndLagTrain=np.zeros(((numBlocks-2),Window10s,numChans+2))
StartLagDevel=np.zeros(((numBlocks-287),Window10s,numChans+2))
EndLagDevel=np.zeros(((numBlocks-287),Window10s,numChans+2))
StartLagTest=np.zeros(((numBlocks-287),Window10s,numChans+2))
EndLagTest=np.zeros(((numBlocks-287),Window10s,numChans+2))
results1=np.zeros((5))*np.nan
predicted = np.zeros((len(LAGS15),numBlocks, 2))
tested = np.zeros((len(LAGS15),numBlocks, 2))
lags_length=len(LAGS15)
for end_lagi in range(len(LAGS15)):
print(end_lagi)
end_lag=LAGS15[end_lagi]
start_lag=end_lag-15
start=start_lag
fin=end_lag
start=floor(start/1e3*fs)
fin=ceil(fin/1e3*fs)
for blockCV in range(len(leaveValidat)):
leaveValidat11=leaveValidat[blockCV]
leaveTest11=leaveTest[blockCV]
i=0
for block in range(numBlocks):
if leaveValidat11==block or leaveTest11==block:
continue
trainingDataEEG[i,:,:]=eegData[block,:,:]
blockE = int(np.floor(block/6))
trainingDataAudioA[i,:,0]=targetAudio[block,:,b[blockE]]
trainingDataAudioU[i,:,0]=targetAudio[block,:,a[blockE]]
i+=1
k=0
develDataEEG[:,:,:]=eegData[leaveValidat11,:,:]
blockV = int(np.floor(leaveValidat11/6))
develDataAudioA[:,:,0]=targetAudio[leaveValidat11,:,b[blockV]]
develDataAudioU[:,:,0]=targetAudio[leaveValidat11,:,a[blockV]]
testDataEEG[:,:,:]=eegData[leaveTest11,:,:]
blockT = int(np.floor(leaveTest11/6))
testDataAudioA[:,:,0]=targetAudio[leaveTest11,:,b[blockT]]
testDataAudioU[:,:,0]=targetAudio[leaveTest11,:,a[blockT]]
'''Lag EEG Signal'''
StartLagDevel[k,:,:]= np.pad(develDataEEG[k,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagDevel[k,:,:]=np.pad(develDataEEG[k,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
DevelDataEEGLagged=np.concatenate([StartLagDevel, EndLagDevel], axis=2)
StartLagTest[k,:,:]= np.pad(testDataEEG[k,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagTest[k,:,:]=np.pad(testDataEEG[k,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
TestDataEEGLagged=np.concatenate([StartLagTest, EndLagTest], axis=2)
for block in range(numBlocks-2):
StartLagTrain[block,:,:] = np.pad(trainingDataEEG[block,:,:], ((0, start), (0, 0)), mode='constant')[start:, :]
EndLagTrain[block,:,:] = np.pad(trainingDataEEG[block,:,:], ((0, fin), (0, 0)), mode='constant')[fin:, :]
TrainingDataEEGLagged=np.concatenate([StartLagTrain, EndLagTrain], axis=2)
'''Create and fit model'''
Model=createModel()
tempModelName=workingDir+'/RevSingle.hdf5'
checkLow = ModelCheckpoint(filepath=tempModelName, verbose=0, save_best_only=True,mode='min',monitor='val_loss')
early = EarlyStopping(monitor='val_loss',patience=10, mode='min')
trainingDataAudio = np.concatenate((trainingDataAudioA[:,:,:],trainingDataAudioU[:,:,:]),axis=2)
develDataAudio = np.concatenate((develDataAudioA[:,:,:],develDataAudioU[:,:,:]),axis=2)
testDataAudio = np.concatenate((testDataAudioA[:,:,:],testDataAudioU[:,:,:]),axis=2)
Model.fit(TrainingDataEEGLagged[:,:,:],trainingDataAudio[:,:,:],batch_size=2,epochs=150,verbose=1,callbacks=[early,checkLow],validation_data=(DevelDataEEGLagged[:,:,:],develDataAudio[:,:,:]))
Model.load_weights(tempModelName)
'''Prediction'''
predictionA=Model.predict(TestDataEEGLagged[:,:,:])
predicted[end_lagi,blockCV,:] = np.mean(predictionA,axis=1)
tested[end_lagi,blockCV,:] = np.mean(testDataAudio,axis=1)
io.savemat(workingDir+'/Results/RevMulti_'+str(Subject)+'.mat',{'predicted'+str(Subject):predicted, 'Growntrouth'+str(Subject):tested)
|
#!/usr/bin/python
# coding: utf-8
from Aparelho import *
class SOM(Aparelho):
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('civ_5_tracker', '0005_auto_20150205_1128'),
]
operations = [
migrations.AlterModelOptions(
name='game',
options={'ordering': ['finished_date', 'begun_date'], 'get_latest_by': 'finished_date'},
),
migrations.AddField(
model_name='game',
name='speed',
field=models.CharField(max_length=32, null=True, blank=True),
preserve_default=True,
),
]
|
""" Example program using IBM_DB_DBI against Db2"""
# import os
import sys
import getpass
import platform
import ibm_db_dbi
from db2_helpers import db_load_settings
# --------------------------------------------------
# Database Connection Settings
# --------------------------------------------------
database = "sample"
hostname = "localhost"
environment = "dev"
# Load saved connection settings (set by db_credentials command)
settings = db_load_settings(database, hostname)
if not settings:
print("You need to create the database connection settings with db_credentials")
sys.exit(1)
connect_string = "ATTACH=FALSE;"
# connect_string += "PROTOCOL=TCPIP;PORT=" + str(port) + ";"
connect_options = {"SQL_ATTR_INFO_PROGRAMNAME": "JHMTESTHELPERS", # 20 char max
"SQL_ATTR_INFO_USERID": getpass.getuser(), # 255 char max
"SQL_ATTR_INFO_WRKSTNNAME": platform.node() # 255 char max
}
# --------------------------------------------------
hdbi = None # Connection Object
# --------------------------------------------------
try:
hdbi = ibm_db_dbi.connect(connect_string,
host=settings["hostname"],
database=settings["database"],
user=settings["uid"],
password=settings["pwd"],
conn_options=connect_options)
except ibm_db_dbi.Warning as warn:
print("Connection warning:", warn)
except ibm_db_dbi.Error as err:
print("connection error:", err)
sys.exit(1)
if hdbi:
print("connected")
# --------------------------------------------------
# Query 1
# --------------------------------------------------
print("\nQuery1 begin")
my_sql = """select distinct tabschema, tabname
from syscat.tables
where tabschema = 'DB2INST1';
"""
my_cursor = hdbi.cursor()
my_tables = None
try:
my_cursor.execute(my_sql)
except Exception as err:
print("Error on Execute", err)
try:
my_tables = my_cursor.fetchall()
except Exception as err:
print("Error on Fetch", err)
for (tabschema, tablename) in my_tables:
print(tabschema, tablename)
# --------------------------------------------------
# Query 2
# --------------------------------------------------
print("\nQuery2 begin")
my_sql = """select distinct tabschema, tabname
from syscat.tables
where tabschema = ?
and type = ?;
"""
my_params = ("DB2INST1", "T")
my_cursor.execute(my_sql, my_params)
print("Cursor column descriptions")
for column_variable in my_cursor.description:
print(column_variable)
column_name1 = my_cursor.description[0][0]
column_name2 = my_cursor.description[1][0]
my_tables = my_cursor.fetchall()
if my_tables:
print("\n")
print(column_name1, column_name2)
for (tabschema, tablename) in my_tables:
print(tabschema, tablename)
if my_cursor:
my_cursor.close()
# --------------------------------------------------
# Clean up
# --------------------------------------------------
if hdbi:
if hdbi.close():
print("disconnected")
print("done")
|
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
import time
BROKER_IP = "192.168.178.61" # this is my local mqtt broker
BROKER_PORT = 1883 # standard mqtt broker port
BROKER_TOPIC = "Games/Pong"
CLIENT_ID = int(time.time()*1000) # use time as id
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
if str(rc) == "0":
print("Connected with broker")
else:
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(BROKER_TOPIC)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(BROKER_IP, BROKER_PORT, 60)
# i need a state machine inside the pygame code
# first state is establishing connection which is done by sending
publish.single(BROKER_TOPIC, payload=f"client online {CLIENT_ID}", hostname=BROKER_IP, port=BROKER_PORT)
# second state is waiting for response, either rejected, when server is full, or accepted
# with player number 1 or 2
# third state is waiting for second player (if first)
# forth state is game
# fifth state is victory or defeat screen
# then the program exits
while True:
client.loop()
|
import socket
# 默认是 family=AF_INET, type=SOCK_STREAM
# type:
# SOCK_STREAM : TCP
# SOCK_Dgram: UDP
# family
# family=AF_INET :服务器之间的通信
# family=AF_UNIX : unix 不同进程间的通信
# 1) 创建socket,使用默认参数
# ss = socket.socket()
#
# # 2) 为socket绑定ip地址和端口
#
# address=('127.0.0.1',8000)
# ss.bind(address)
#
# # 3) 设置监听端口客户端等待连接数
# ss.listen(3) # 容纳排队连接数
#
# # 4) 设置等待,并接收cs
#
# print('waiting for connection...')
# cs,addr = ss.accept() # 客户端的socket
# print(ss)
# print(cs) # (<socket.socket fd=728, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 8000), raddr=('127.0.0.1', 57831)>, ('127.0.0.1', 57831))
#
# # server 发送
# inp = input('>>>')
# cs.send(bytes(inp,'utf8'))
# server 接受
# data = conn.recv(1024)
#
# print(data)
#
# ss = socket.socket()
# address = ('127.0.0.1',8000)
# ss.bind(address)
# ss.listen(2)
# print('waitting for connection...')
#
# while True:
# cs, addr = ss.accept()
#
# while True:
# try:
# data = cs.recv(1024)
# except Exception:
# break
# if not data: break
# print('......',str(data,'utf8'))
# inp = input('>>>')
# if inp == 'q': break
# cs.send(bytes(inp,'utf8'))
#
# ss.close()
# import subprocess
# while True:
# inp = input('>>>')
# cmd_res = subprocess.Popen(inp,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
#
#
# err = cmd_res.stderr.read()
#
# out = cmd_res.stdout.read()
#
# print('err:',err.decode('gbk'))
# print(len(err))
#
# print(out.decode('gbk'))
from socket import *
sk = socket()
addr = ('127.0.0.1',8090)
sk.connect(addr)
data1 = sk.recv(1024)
print(data1.decode('utf8'))
data2 = sk.recv(1024)
print(data2.decode('utf8'))
sk.close()
|
from ctypes import *
from numpy.ctypeslib import ndpointer
import numpy as np
############### lib definitions ###############
mysofa_lib = cdll.LoadLibrary("libmysofa.so")
mysofa_open = mysofa_lib.mysofa_open
mysofa_open.restype = c_void_p
mysofa_open.argtypes = [c_char_p, c_float, POINTER(c_int), POINTER(c_int)]
mysofa_close = mysofa_lib.mysofa_close
mysofa_close.argtypes = [c_void_p]
mysofa_getfilter_float = mysofa_lib.mysofa_getfilter_float
mysofa_getfilter_float.argtypes = [c_void_p, c_float, c_float, c_float,
ndpointer(c_float, flags="C_CONTIGUOUS"),
ndpointer(c_float, flags="C_CONTIGUOUS"),
POINTER(c_float), POINTER(c_float)]
############### lib definitions ###############
class MySofa:
def __init__(self, filename, rate):
filter_length = c_int()
err = c_int()
self._handle = mysofa_open(filename.encode(), rate, byref(filter_length), byref(err))
self.filter_length = filter_length.value
self.error = err.value
self.ir_left = np.zeros(self.filter_length, dtype=np.float32)
self.ir_right = np.zeros(self.filter_length, dtype=np.float32)
self.delay_left = 0.0
self.delay_right = 0.0
pass
def set_filter(self, x, y, z):
delay_left = c_float()
delay_right = c_float()
mysofa_getfilter_float(self._handle, x, y, z, self.ir_left, self.ir_right,
byref(delay_left), byref(delay_right))
self.delay_left = delay_left.value
self.delay_right = delay_right.value
def apply(self, inp):
left = np.convolve(inp, self.ir_left,'same')
right = np.convolve(inp, self.ir_right,'same')
return np.vstack((left, right))
def close(self):
mysofa_close(self._handle)
if __name__ == "__main__":
msof = MySofa("../share/MIT_KEMAR_normal_pinna.sofa", 16000.0)
print(msof.filter_length, msof.error)
msof.set_filter(-1.0, 0.5, 5.0)
print(msof.ir_left, msof.ir_right, msof.delay_left, msof.delay_right)
msof.close()
import matplotlib.pyplot as plt
plt.figure()
plt.plot(msof.ir_left)
plt.plot(msof.ir_right)
plt.show()
|
from .base import *
DEBUG = True
ALLOWED_HOSTS = ['*']
DEV = DEBUG
INSTALLED_APPS += ('debug_toolbar',)
{% if cookiecutter.postgres == "y" or cookiecutter.postgres == "Y" %}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
{% else %}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '{{ cookiecutter.project_name }}.db',
}
}
{% endif %}
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
SECRET_KEY = 'devel'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 2
AUTH_PASSWORD_VALIDATORS = []
INTERNAL_IPS = ("127.0.0.1",)
{% if cookiecutter.api == "y" or cookiecutter.api == "Y" %}
CORS_ORIGIN_ALLOW_ALL = True
{% endif %}
|
import os
import flask
import json
from flask import Flask, _app_ctx_stack, render_template, request, jsonify, Response
from flask_cors import CORS, cross_origin
from flask_mail import Mail
from whitenoise import WhiteNoise
import jwt
from functools import wraps, update_wrapper
from sqlalchemy.orm import scoped_session
from app.database import SessionLocal, engine, Base
from app.models import Users
from config import Config, DevConfig, ProductionConfig
# Create database structure
#Base.metadata.create_all(bind=engine)
FILE_FOLDER = 'static/'
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
print(APP_ROOT)
app = Flask(__name__)
CORS(app, supports_credentials=True)
# add serving static files
app.wsgi_app = WhiteNoise(app.wsgi_app, root='static/')
app.session = scoped_session(SessionLocal, scopefunc=_app_ctx_stack.__ident_func__)
app.secret_key = os.environ.get("FN_FLASK_SECRET_KEY", default=False)
#if os.environ['FLASK_DEV'] == True:
print("Dev env")
app.config.from_object(DevConfig)
'''
else:
print("Prod env")
app.config.from_object(ProductionConfig)
'''
# print(app.config['UPLOAD_FOLDER'])
# print(app.config['MAIL_SERVER'])
print(app.config['REMOTE'])
mail = Mail(app)
# Global methods and classes
# Error handlers
class CustomError(Exception):
pass
# Error handlers
class ValidationError(Exception):
pass
# Error handlers
class ForbiddenError(Exception):
pass
def get_current_user(f):
@wraps(f)
def decorator(*args, **kwargs):
print('get_current_user')
if 'Authorization' in request.headers and len(request.headers['Authorization'].split(' ')) > 1:
token = request.headers['Authorization'].split(' ')[1]
print('token: ', token)
try:
data = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')
user = data['user']
print(data)
print(user)
current_user = app.session.query(Users) \
.filter(Users.id == user['id']) \
.filter(Users.email==user['email']).first()
print(current_user)
except Exception as e:
print('Error when decoding token: ', str(e))
raise CustomError({'message': 'Error when verifying token: token is invalid\n'})
else:
current_user = None
return f(current_user, *args, **kwargs)
return decorator
def no_cache(view):
@wraps(view)
def no_cache_impl(*args, **kwargs):
response = flask.make_response(view(*args, **kwargs))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache_impl, view)
def token_included(f):
@wraps(f)
def decorator(*args, **kwargs):
print('token_included')
token = None
if 'Authorization' in request.headers:
token = request.headers['Authorization'].split(' ')[1]
print('token: ', token)
if not token:
current_user = None
@app.after_request
def set_headers(response):
response.headers["Referrer-Policy"] = 'no-referrer'
return response
# BLUEPRINTS
from app.auth.views import auth_blueprint
from app.files.views import files_blueprint
from app.users.views import users_blueprint
from app.products.views import products_blueprint
from app.analytics.views import analytics_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/api')
app.register_blueprint(files_blueprint, url_prefix='/api')
app.register_blueprint(users_blueprint, url_prefix='/api')
app.register_blueprint(products_blueprint, url_prefix='/api')
app.register_blueprint(analytics_blueprint, url_prefix='/api')
# from app import views
|
from __future__ import print_function, division
import pygame
import OpenGL.GL as gl
import numpy as np
import itertools
import neurodot_present as npr
class TrueBrightnessPatch:
def __init__(self,
width, # OpenGL units
height, # OpenGL units
pix_w, # OpenGL units
pix_h, # OpenGL units
bright_linewidth = 1, # pixels
dark_linewidth = 1, # pixels
color_channel = 'RGB'
):
self.width = width
self.height = height
self.pix_h = pix_h
self.pix_w = pix_w
# get linewidths in OpenGL's coordinates
self.bright_linewidth = bright_linewidth * pix_h
self.dark_linewidth = dark_linewidth * pix_h
# get bright colors for different color channels
if color_channel == 'RGB':
self.bright_color = (1.0, 1.0, 1.0)
if color_channel == 'R':
self.bright_color = (1.0, 0.0, 0.0)
if color_channel == 'G':
self.bright_color = (0.0, 1.0, 0.0)
if color_channel == 'B':
self.bright_color = (0.0, 0.0, 1.0)
self.dark_color = (0.0, 0.0, 0.0)
# get number of dark or bright lines that must be displayed
self.num_lines = int(height / ((dark_linewidth + bright_linewidth) * pix_h))
# initialize display_list_index as None so render() will create a display list on its first call
self.display_list_index = None
def render(self):
# make or render display list
if self.display_list_index is None:
self.display_list_index = gl.glGenLists(1)
gl.glNewList(self.display_list_index, gl.GL_COMPILE)
gl.glDisable(gl.GL_LIGHTING)
# render lines
for i in range(0, self.num_lines + 1):
ypos = i * (self.bright_linewidth + self.dark_linewidth)
# render bright line
gl.glColor3f(*self.bright_color)
gl.glRectf(0, ypos, self.width, ypos + self.bright_linewidth)
# render dark line
gl.glColor3f(*self.dark_color)
gl.glRectf(0, ypos + self.bright_linewidth, self.width, ypos + self.bright_linewidth + self.dark_linewidth)
gl.glEnable(gl.GL_LIGHTING)
gl.glEndList()
gl.glCallList(self.display_list_index)
else:
gl.glCallList(self.display_list_index)
class TestPatch:
def __init__(self,
width, # OpenGL units
height, # OpenGL units
):
self.width = width
self.height = height
self.color = (0.0, 0.0, 0.0) # will be set when GammaUtility.update() is called
def render(self):
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(*self.color)
gl.glRectf(0, 0, self.width, self.height)
gl.glEnable(gl.GL_LIGHTING)
class GammaUtility(npr.Screen):
def setup(self,
bot_left,
top_right,
bright_linewidth = 1,
dark_linewidth = 1,
background_color = 'black',
color_bits = 8,
color_channel = 'RGB',
print_output = True
):
npr.Screen.setup(self, background_color = background_color)
self.bot_left = bot_left
self.top_right = top_right
self.color_channel = color_channel
self.print_output = print_output
# get pixel widths and heights in OpenGL coordinates
self.pix_h = (2.0 * self.screen_top) / self.screen_height
self.pix_w = (2.0 * self.screen_right) / self.screen_width
# get width and height of ref+test patch
self.width = top_right[0] - bot_left[0]
self.height = top_right[1] - bot_left[1]
# get increment quantities for changing test patch color
self.color_bits = color_bits
self.color_levels = [level for level in np.linspace(1.0, 0.0, 2**color_bits)]
self.color_index = 0
self.test_color_current = self.color_levels[self.color_index]
# increment quantity for use with mouse
self.mouse_increment_cycle = itertools.cycle((1,int(2**self.color_bits / 32)))
self.mouse_increment = self.mouse_increment_cycle.next()
self.standard_patch = TrueBrightnessPatch(width = self.width/2.0,
height = self.height,
pix_w = self.pix_w,
pix_h = self.pix_h,
bright_linewidth = bright_linewidth,
dark_linewidth = dark_linewidth,
color_channel = color_channel
)
self.test_patch = TestPatch(width = self.width/2.0,
height = self.height,
)
def update(self, t, dt):
self.ready_to_render = True # ensure things get rendered
# get next color
if self.color_index > 2**self.color_bits - 1:
# ensure index doesn't go above bounds
self.color_index = 2**self.color_bits - 1
elif self.color_index < 0:
# ensure index doesn't go below bounds
self.color_index = 0
else:
# update color if index is within bounds
self.test_color_current = self.color_levels[self.color_index]
# modify test patch's appropriate RGB values
if self.color_channel == 'RGB':
self.test_patch.color = (self.test_color_current, self.test_color_current, self.test_color_current)
if self.color_channel == 'R':
self.test_patch.color = (self.test_color_current, 0.0, 0.0)
if self.color_channel == 'G':
self.test_patch.color = (0.0, self.test_color_current, 0.0)
if self.color_channel == 'B':
self.test_patch.color = (0.0, 0.0, self.test_color_current)
def render(self):
# do some general OpenGL stuff
npr.Screen.render(self)
# translate to position of reference patch and render
gl.glLoadIdentity()
gl.glTranslatef(self.bot_left[0], self.bot_left[1], 0.0)
self.standard_patch.render()
# translate to position of test patch and render
gl.glLoadIdentity()
gl.glTranslatef(self.bot_left[0] + self.width/2.0, self.bot_left[1], 0.0)
self.test_patch.render()
def pygame_handle_events(self, **kwargs):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
# mouse cases
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# decrease brightness
self.color_index += self.mouse_increment
if event.button == 2:
# change coarsness of increment
self.mouse_increment = self.mouse_increment_cycle.next()
if event.button == 3:
# increase brightness
self.color_index -= self.mouse_increment
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
#self.print_data() # could delete this so that info only printed on enter
raise npr.UserEscape
# print info if enter pressed
if event.key == pygame.K_RETURN:
self.print_data()
return False
# down arrow cases (decreasing brightness)
if event.key == pygame.K_DOWN:
mods = pygame.key.get_mods()
if mods & pygame.KMOD_SHIFT and mods & pygame.KMOD_CTRL:
# change very coarsley (by an eigth of the intensity range)
self.color_index += int(2**self.color_bits / 8)
elif mods & pygame.KMOD_CTRL:
# change coarsley (by a 32nd of the intensity range)
self.color_index += int(2**self.color_bits / 32)
else:
self.color_index += 1
# up arrow cases (increasing brightness)
if event.key == pygame.K_UP:
mods = pygame.key.get_mods()
if mods & pygame.KMOD_SHIFT and mods & pygame.KMOD_CTRL:
# change very coarsley (by an eigth of the intensity range)
self.color_index -= int(2**self.color_bits / 8)
elif mods & pygame.KMOD_CTRL:
# change coarsley (by a 32nd of the intensity range)
self.color_index -= int(2**self.color_bits / 32)
else:
self.color_index -= 1
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
elif event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed: %r" % event.button)
if event.button == 6 or event.button == 0:
# lower brightness (fine)
self.color_index += 1
elif event.button == 4:
# lower brightness (coarse)
self.color_index += int(2**self.color_bits / 32)
elif event.button == 7:
# increase brightness (fine)
self.color_index -= 1
elif event.button == 5:
# increase brightness (coarse)
self.color_index -= int(2**self.color_bits / 32)
elif event.button == 1:
self.print_data()
return False
return True
def run(self, **kwargs):
# loop rate set too high so that it should run effectively as fast as python is capable of looping
npr.Screen.run(self, display_loop_rate = 10000, **kwargs)
def print_data(self):
if self.print_output:
print('Final [0,1] intensity:', self.test_color_current)
print('Final', self.color_bits, 'bit intensity:', 2**self.color_bits - self.color_index)
################################################################################
"""
Ed Eskew
2016-08-03
Directions:
With the screen far enough away that the lines on the left patch are indistinguishable, use the Up/Down arrow keys to
adjust the brightness of the right patch until it appears equal to the left. Use the CTRL modifier for a more coarse
adjustment, and Shift+CTRL for very coarse. Press "Return" to record the current intensity of the test patch and
advance to the next reference patch. Press "Escape" to exit the program without doing anything.
"""
if __name__ == '__main__':
import sys
import os
import shelve
import matplotlib.pyplot as plt
from scipy import interpolate
bot_left = (-0.50, -0.25)
top_right = (0.50, 0.25)
color_channel = 'RGB' # can be RGB or R, G, or B alone
brightness_ratios = [(1,1), (1,2), (2,1), (1,3), (3,1), (1,4), (4,1), (1,5), (5,1), (2,3), (3,2), (10,1), (1,10)] # (bright, dark)
#monitor_name = 'mbpro_retina'
#brightness_ratios = [(1,6),(1,5),(1,4),(1,3),(1,2),(1,1),(2,1),(3,1)] # (bright, dark)[(3,2),(3,1),(4,1),(5,1),(6,1)]#
monitor_name = 'benq-gamer1'
gammaUtility = GammaUtility.with_pygame_display( use_joysticks = True,
debug = False,
)
display_output = True
true_inputs = []
for brightness in brightness_ratios:
gammaUtility.setup(bot_left = bot_left,
top_right = top_right,
bright_linewidth = brightness[0],
dark_linewidth = brightness[1],
background_color = 'black',
color_bits = 8,
color_channel = color_channel,
print_output = False,
)
try:
gammaUtility.run(duration = None)
true_inputs.append(gammaUtility.test_color_current)
except npr.UserEscape:
display_output = False
break
pygame.quit()
# ref_values = [vals[0] / sum(vals) for vals in brightness_ratios]
# fig = plt.figure(1)
# ax1 = fig.add_subplot(111)
# ax1.scatter(ref_values, [1 for val in ref_values])
# plt.show()
if display_output:
# get intensity values for each reference ratio
ref_values = [vals[0] / sum(vals) for vals in brightness_ratios]
# hardcoded example values so I don't have to do the experiment every time
# true_inputs = [0.74117647058823533, 0.62352941176470589, 0.83529411764705885, 0.5490196078431373, 0.8784313725490196, 0.49803921568627452, 0.90588235294117647, 0.46274509803921571, 0.92156862745098045, 0.67450980392156867, 0.80000000000000004, 0.95686274509803926, 0.36078431372549025]
# append 0 and 1 values
ref_values.append(0)
ref_values.append(1)
true_inputs.append(0)
true_inputs.append(1)
# get cubic spline function for this info
x_range = np.linspace(0, 1, 100)
inv_gam_func = interpolate.interp1d(ref_values,
true_inputs,
kind = 'cubic',
)
# gam_func = interpolate.interp1d(inputs, gamma_values, kind = 'cubic')
interp_vals = [inv_gam_func(x) for x in x_range]
# check if calibrations folder exists, make it if not
home = os.path.expanduser('~')
npPath = os.path.sep.join((home, '.neurodot_present'))
calPath = os.path.sep.join((home,'.neurodot_present', 'calibrations'))
if not os.path.isdir(npPath):
os.mkdir(npPath)
if not os.path.isdir(calPath):
os.mkdir(calPath)
# shelve needed values
dbPath = os.path.sep.join((home, '.neurodot_present', 'calibrations', monitor_name))
db = shelve.open(dbPath)
db['input_intensities'] = true_inputs
db['desired_intensities'] = ref_values
db.close()
# pyplot stuff
fig = plt.figure(1)
ax1 = fig.add_subplot(111)
ax1.scatter(ref_values, true_inputs)
ax1.plot(x_range, interp_vals)
ax1.grid(True)
ax1.set_xlabel('Desired Brightness')
ax1.set_ylabel('Input Intensity')
ax1.set_title('Inverse Gamma Function')
ax1.set_xlim(0,1)
ax1.set_ylim(0,1)
plt.show()
print('Desired intensities:')
print(ref_values)
print('Necessary input intensities:')
print(true_inputs)
sys.exit()
|
import sys
import nltk
import sklearn
import pandas
import numpy
# for checking the versions
print('Python: {}'.format(sys.version))
print('NLTK: {}'.format(nltk.__version__))
print('Scikit-learn: {}'.format(sklearn.__version__))
print('pandas: {}'.format(pandas.__version__))
print('numpy: {}'.format(numpy.__version__))
#1 load the dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_table('SMSSpamCollection', header = None, encoding='utf-8')
print(df.info())
print(df.head())
classes = df[0]
print(classes.value_counts())
# 2 preprocess the data 0 ham and 1 spam (Binary Classification)
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
Y = encoder.fit_transform(classes)
print(classes[:10])
print(Y[:10])
text_messages = df[1]
print(text_messages[:10])
# Use regular expression to replace email addresses , urls, phonenumber, other phone number, symbols
# email
processed = text_messages.str.replace(r'^.+@[^\.].*\.[a-z]{2,}$', 'emailaddr')
# web address
processed = processed.str.replace(r'^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)7$', 'webaddress')
# moneysymb
processed = processed.str.replace(r'£|\$', 'moneysymb')
# phonenumbr
processed = processed.str.replace(r'^\(?[\d]{3}\)?[\s-]?[\d]{3}[\s-]?[\d]{4}$', 'phonenumbr')
# number
processed = processed.str.replace(r'\d+(\.\d+)?', 'numbr')
#remove punctuation
processed = processed.str.replace(r'[^\w\d\s]', ' ')
#remove white space
processed = processed.str.replace(r'\s+', ' ')
#leading and trailing white space
processed = processed.str.replace(r'^\s+|\s+?$', '')
#chenging the words to lower case
processed = processed.str.lower()
print(processed)
#remove stop words from text
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
processed = processed.apply(lambda x: ' '.join(term for term in x.split() if term not in stop_words))
#remove stem from text
ps = nltk.PorterStemmer()
processed = processed.apply(lambda x: ' '.join(ps.stem(term) for term in x.split()))
print(processed)
#number of words and most common words and how many times they have appeared in the text
from nltk.tokenize import word_tokenize
all_words = []
for message in processed:
words = word_tokenize(message)
for w in words:
all_words.append(w)
all_words = nltk.FreqDist(all_words)
print('number of words: {}'.format(len(all_words)))
print('Most common words: {}'.format(all_words.most_common(15)))
#use 1500 most comman words as features
word_features = list(all_words.keys())[:1500]
def find_features(message):
words = word_tokenize(message)
features = {}
for word in word_features:
features[word] = (word in words)
return features
features = find_features(processed[0])
for key, value in features.items():
if value == True:
print(key)
#find features for all messages
messages = list(zip(processed, Y))
#define a seed for reproductivity
seed = 1
np.random.seed = seed
np.random.shuffle(messages)
#call find functions for each messages
featuresets = [(find_features(text), label) for (text, label) in messages]
from sklearn import model_selection
training, testing = model_selection.train_test_split(featuresets, test_size = 0.25, random_state = seed)
print('training: {}'.format(len(training)))
print('testing: {}'.format(len(testing)))
#scikit-learn classifier with nltk
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
#Define models to train
names = ['K Nearest neighbors', 'Decision Tree', 'Random Forest', 'Logistic Regression', 'SGD classifier', 'Naive Bayes', 'SVM Linear']
classifiers = [
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter = 100),
MultinomialNB(),
SVC(kernel = 'linear')
]
models = list(zip(names, classifiers))
from nltk.classify.scikitlearn import SklearnClassifier
for name, model in models:
nltk_model = SklearnClassifier(model)
nltk_model.train(training)
accuracy = nltk.classify.accuracy(nltk_model, testing) * 100
print('{}: Accuracy: {}'.format(name, accuracy))
from sklearn.ensemble import VotingClassifier
names = ['K Nearest neighbors', 'Decision Tree', 'Random Forest', 'Logistic Regression', 'SGD classifier', 'Naive Bayes', 'SVM Linear']
classifiers = [
KNeighborsClassifier(),
DecisionTreeClassifier(),
RandomForestClassifier(),
LogisticRegression(),
SGDClassifier(max_iter = 100),
MultinomialNB(),
SVC(kernel = 'linear')
]
models = list(zip(names, classifiers))
nltk_ensemble = SklearnClassifier(VotingClassifier(estimators = models, voting = 'hard', n_jobs = -1))
nltk_ensemble.train(training)
accuracy = nltk.classify.accuracy(nltk_ensemble, testing) * 100
print('Ensemble Method Accuracy: {}'.format(accuracy))
#wrap models in NLTK
txt_features, labels = zip(*testing)
prediction = nltk_ensemble.classify_many(txt_features)
# print a confusion matrix and a classification report
print(classification_report(labels, prediction))
pd.DataFrame(
confusion_matrix(labels, prediction),
index = [['actual', 'actual'], ['ham', 'spam']],
columns = [['predicted', 'predicted'], ['ham', 'spam']])
names = ['KNN', 'DT','RF','LR','SGD','NB','SVM']
acc = [94.40057430007178,97.34386216798278,98.56424982053123,98.56424982053123,98.27709978463747,98.49246231155779,98.49246231155779]
plt.figure(figsize=(8,6))
plt.subplot()
plt.bar(names, acc, width=0.8)
plt.xlabel('Classifiers')
plt.ylabel('Accuracy')
plt.suptitle('Accuracy of Models')
plt.show()
|
#pip install paho-mqtt
import paho.mqtt.publish as publish
import Adafruit_DHT
import time
import datetime
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
# colocamos el channelID de nuestro canal de thingspeak
channelID="1326958"
# colocamos el api key de nuestro canal
apiKey="QAAPTUMOAJJS7YPT"
# se coloca el nombre del host que es thingspeak MQTT
mqttHost = "mqtt.thingspeak.com"
# se realiza la configuración
# se importa ssl que es el modulo de seguridad en la capa de transporte
import ssl
#se especifica el tipo de conexión
tTransport = "websockets"
#version del protocolo de seguridad de la capa de transporte
tTLS = {'ca_certs':"/etc/ssl/certs/ca-certificates.crt",'tls_version':ssl.PROTOCOL_TLSv1}
# se selecciona el puerto
tPort = 443
# creamos el topic
topic = "channels/" + channelID + "/publish/" + apiKey
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# # create the cs (chip select)
cs = digitalio.DigitalInOut(board.D5)
# # create the mcp object
mcp = MCP.MCP3008(spi, cs)
# # create an analog input channel on pin 0
chan = AnalogIn(mcp, MCP.P0)
sensor=Adafruit_DHT.DHT22
pin=23
#archivo=open("humedad.txt","w")
#archivo.write("humedad"+" "+"temperatura"+" "+"co2")
while True:
humedad, temperatura = Adafruit_DHT.read_retry(sensor, pin)
concentracion= (159.6-(((chan.voltage)/10)*133.33))
if humedad is not None and temperatura is not None:
print(f'temperatura={temperatura:.2f}*C Humedad={humedad:.2f}%')
print('concentración', str(concentracion)+"ppm")
# fecha y hora actual para la estampa de tiempo
fecha=datetime.datetime.now()
# modificamos el formato de la estampa de tiempo
fecha=fecha.strftime('%Y-%m-%d-%H:%M:%S')
print('fecha=',fecha)
# cadena de envio
tPayload= "field1=" + str(temperatura) + (' fecha ') + str(fecha) + (' sensor dht22') + "&field2=" + str(humedad) + (' fecha ') + str(fecha) + (' sensor dht22 ') + "&field3=" + str(concentracion) + (' fecha ') + str(fecha) + (' sensor MQ135')
# se intenta publicar la cadena
try:
publish.single(topic, payload=tPayload, hostname=mqttHost, port=tPort, tls=tTLS, transport=tTransport)
except (KeyboardInterrupt):
break
time.sleep(60)
else:
print('fallo lectura')
#archivo.write("\n"+str(humedad)+"%"+" "+str(temperatura)+"°C"+" "+str(concentracion)+"ppm")
#archivo.close
|
from cryptography.fernet import Fernet
import sys
import os
# Key generation
cipher_suite = Fernet(os.environ['FERNET_KEY'].encode())
def encode(text: str) -> str:
return cipher_suite.encrypt(text.encode()).decode('utf-8')
def decode(text: str) -> str:
return cipher_suite.decrypt(text.encode()).decode('utf-8')
|
from flask import Flask
from flask import jsonify
from flask import request
from flask_pymongo import PyMongo
from flask import Response
from flask import request
from flask import json
import urllib
from math import sqrt
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from bson.json_util import loads
from bson import json_util
from flask_cors import CORS
from bson.objectid import ObjectId
app = Flask(__name__)
CORS(app)
app.config['MONGO_DBNAME'] = 'local'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ultiprep-db'
mongo = PyMongo(app)
def predict(ratings, similarity):
mean_user_rating = ratings.mean(axis=1)
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
return pred
@app.route('/recommendedNotes', methods=['GET'])
def recommendedNotes():
name = request.args.get('name')
print name
#, { 'data.isTrashed': 'false' }
result = list(mongo.db.events.aggregate(
[{ "$match": { "$and": [ {'type': {"$in" : ['note_update','note_open','note_pin','note_copy','note_create','note_share']}} ]}},
{"$group" : {
"_id" : {'user':"$user", 'note' : "$data._id"},
'rating': {"$sum": 1 }
}
},
{"$sort":{'_id.user':1}}
]
))
print result
notesResult = []
if len(result) == 0:
return jsonify({'result' : notesResult})
users = mongo.db.events.distinct('user')
users.append(name)
notes = mongo.db.events.distinct('data._id')
data = np.zeros((len(users), len(notes)))
for i in result:
print "fusgukfgsdlfgkfgdls"
user = i["_id"]["user"]
note = i["_id"]["note"]
data[users.index(user)][notes.index(note)] = i["rating"]
print "matrix"
print data
user_similarity = pairwise_distances(data,metric='cosine')
print "similarity"
print user_similarity
user_similarity = 1 - user_similarity
print user_similarity
try:
user_prediction = predict(data,user_similarity)
except ValueError as e:
return jsonify({'result' : notesResult})
print "prediction"
prediction = np.argsort(-user_prediction[users.index(name)])
print user_prediction
result = [notes[i] for i in prediction]
print result
for i in result:
# , {'author' : {"$eq" : name}}, { "$or": [ {'contributors' : {"$in" : [name]}}, {'public' : 'true'}] }
#print i
t = i.encode('ascii','ignore')
n = mongo.db.notes.find({ "$and": [ {'_id' : ObjectId(t)}, { "$or": [ {'contributors' : {"$in" : [name]}}, {'author' : {"$eq" : name}}, {'public' : 'true'}] }] })
#({ "$and": [ {'_id' : ObjectId(t)}, { "$or": [ {'contributors' : {"$in" : [name]}}, {'author' : {"$eq" : name}}, {'public' : 'true'}] }] })
if n.count() > 0:
print "dsd"
#oid = str(n[0]["_id"])
#n[0]["_id"] = oid
json = json_util.dumps(n[0])
#print json
notesResult.append(json)
#print notesResult
return jsonify({'result' : notesResult})
if __name__ == '__main__':
app.run(host='0.0.0.0', port='8082')
|
class Calculator():
# METORY - FUNKCJE KTORE SA CZESCI AKLASY
def __init__(self): # metoda magiczna, to taka ktora ma __ przed i po sobie - wykonywne przez pythona
print("init")
def __del__(self):
print("DEL")# służy żeby pozamykac pliki pootwierane
# def __str__(self):# zwraca reprezenstacje w formie str
#return "Hello"
def __len__(self):
return 5
def dodaj(self, a, b):
wynik = a +b
print(wynik)
def odejmij(self, a, b):
wynik = a - b
print(wynik)
calc = Calculator()# utworzylas obiekt zapisany do zmiennej calc- to tak jakbys stworzyla osobny plik z tymi def (bez self) jak z lekcja z foo.
calc_2 = Calculator()
calc.liczba = 10
calc.liczba += 5
print(calc.liczba)
calc.dodaj(10,15)
calc_2.dodaj(20,20)
calc.odejmij(16,3)
print(len(calc))
|
import sys
import os
f = open("C:/Users/user/Documents/python/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n,a,b,c = map(int,input().split())
l = []
for i in range(0,n):
l.append(input());
map(int,l)
|
#!/usr/bin/env python2.6
#
# Copyright (c) Members of the EGEE Collaboration. 2006-2009.
# See http://www.eu-egee.org/partners/ for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andrea Ceccanti (INFN)
from optparse import OptionParser, OptionGroup
from voms_shared import voms_version, admin_conf_dir, VOMSDefaults,\
admin_db_properties_path, admin_service_endpoint_path, vomses_path, lsc_path,\
aup_path, admin_logging_conf_path, X509Helper, core_conf_dir, voms_conf_path, voms_pass_path,\
voms_log_path, voms_lib_dir, voms_deploy_database_cmd,\
voms_ro_auth_clients_cmd, voms_add_admin_cmd, mysql_util_cmd,\
admin_service_properties_path, voms_undeploy_database_cmd, voms_upgrade_database_cmd
from sys import exit, stdout, stderr
import socket
import logging
import re
import os
import shutil
import pwd
import glob
import time
import string
import random
MYSQL="mysql"
ORACLE="oracle"
usage="""%prog command [options]
Commands:
install: installs or reconfigures a VO
upgrade: upgrades a VO
remove: removes a VO
"""
logger = None
parser = OptionParser(usage, version="%prog v. " + voms_version())
commands = ["install", "upgrade", "remove"]
HOST_CERT = "/etc/grid-security/hostcert.pem"
HOST_KEY = "/etc/grid-security/hostkey.pem"
VOMS_CERT = "/etc/grid-security/vomscert.pem"
VOMS_KEY = "/etc/grid-security/vomskey.pem"
def execute_cmd(cmd, error_msg=None):
status = os.system(cmd)
if status != 0:
if not error_msg:
error_and_exit("Error executing %s" % cmd)
else:
error_and_exit(error_msg)
def backup_dir_contents(d):
logger.debug("Backing up contents for directory: %s", d)
backup_filez = glob.glob(os.path.join(d,"*_backup_*"))
## Remove backup filez
for f in backup_filez:
## Don't remove backup directories potentially created by the user
if not os.path.isdir(f):
os.remove(f)
filez = glob.glob(os.path.join(d,"*"))
backup_date = time.strftime("%d-%m-%Y_%H-%M-%S",time.gmtime())
for f in filez:
os.rename(f, f+"_backup_"+backup_date)
def check_args_and_options(options,args):
if len(args) != 1 or args[0] not in commands:
error_and_exit("Please specify a single command among the following:\n\t%s" % "\n\t".join(commands))
def setup_cl_options():
## Base options
parser.add_option("--vo", dest="vo", help="the VO being configured", metavar="VO")
parser.add_option("--config-owner", dest="config_owner", help="the USER that will own configuration files", metavar="USER", default="voms")
parser.add_option("--verbose", dest="verbose", action="store_true", help="Be verbose.", default=False)
parser.add_option("--dry-run", dest="dry_run", action="store_true", help="Dry run execution. No files are touched.", default=False)
parser.add_option("--hostname", dest="hostname", help="the VOMS services HOSTNAME", metavar="HOSTNAME", default=socket.gethostname())
## Certificate and trust anchors (used for both voms and voms-admin services)
parser.add_option("--cert", dest="cert", help="the certificate CERT used to run the VOMS services", metavar="CERT", default="/etc/grid-security/hostcert.pem")
parser.add_option("--key", dest="key", help="the private key used to run the VOMS services", metavar="KEY", default="/etc/grid-security/hostkey.pem")
parser.add_option("--trust-dir", dest="trust_dir", help="The directory where CA certificates are stored", metavar="DIR", default="/etc/grid-security/certificates")
parser.add_option("--trust-refresh-period", type="int", dest="trust_refresh_period", help="How ofter CAs are refreshed from the filesystem (in seconds).", metavar="SECS", default=3600)
parser.add_option("--skip-voms-core", dest="skip_voms_core", action="store_true", help="Skips VOMS core configuration", default=False)
parser.add_option("--skip-voms-admin", dest="skip_voms_admin", action="store_true", help="Skips VOMS admin configuration", default=False)
parser.add_option("--skip-database", dest="skip_database", action="store_true", help="Skips database operations", default=False)
parser.add_option("--deploy-database", dest="deploy_database", action="store_true", help="Deploys the database for the VO being configured, if not present", default=True)
parser.add_option("--undeploy-database", dest="undeploy_database", action="store_true", help="Undeploys the database for the VO being removed", default=False)
# Other base options
parser.add_option("--openssl", dest="openssl", help="the PATH to the openssl command", metavar="PATH", default="openssl")
## Admin service options
admin_opt_group = OptionGroup(parser, "VOMS admin options", "These options drive the basic configuration of the VOMS admin service.")
admin_opt_group.add_option("--admin-port", dest="admin_port", type="int", help="the PORT on which the admin service will bind", metavar="PORT", default=8443)
admin_opt_group.add_option("--admin-cert", dest="admin_cert", help="Grants CERT full administrator privileges in the VO", metavar="CERT")
admin_opt_group.add_option("--read-only", dest="read_only", action="store_true", help="Sets the VOMS admin service as read-only", default=False)
admin_opt_group.add_option("--disable-ro-access-for-authenticated-clients",
dest="read_only_auth_clients",
action="store_false",
help="Sets the configured VO as non-browsable by authenticated clients",
default="True")
parser.add_option_group(admin_opt_group)
## DB options
db_opt_group = OptionGroup(parser, "Database configuration options", "These options configure VOMS database access")
db_opt_group.add_option("--dbtype", dest="dbtype", help="The database TYPE (mysql or oracle)", metavar="TYPE", default=MYSQL)
db_opt_group.add_option("--dbname", dest="dbname", help="Sets the VOMS database name to DBNAME", metavar="DBNAME")
db_opt_group.add_option("--dbusername", dest="dbusername", help="Sets the VOMS MySQL username to be created as USER", metavar="USER")
db_opt_group.add_option("--dbpassword", dest="dbpassword", help="Sets the VOMS MySQL password for the user to be created as PWD", metavar="PWD")
parser.add_option_group(db_opt_group)
## Connection pool options
conn_pool_opt_group = OptionGroup(parser, "Database connection pool options", "These options configure the voms admin service database connection pool")
conn_pool_opt_group.add_option("--c3p0-acquire-increment",
type='int',
dest="c3p0_acquire_increment",
help="Sets the number of new connections that are acquired from the database connection pool is exausted.",
metavar="NUM",
default=1)
conn_pool_opt_group.add_option("--c3p0-idle-test-period",
type='int',
dest="c3p0_idle_test_period",
help="Check idle connections in the pool every SEC seconds.",
metavar="SEC",
default=0)
conn_pool_opt_group.add_option("--c3p0-min-size",
type='int',
dest="c3p0_min_size",
help="Pool minimum size.",
metavar="NUM",
default=1)
conn_pool_opt_group.add_option("--c3p0-max-size",
type='int',
dest="c3p0_max_size",
help="Pool maximum size.",
metavar="NUM",
default=100)
conn_pool_opt_group.add_option("--c3p0-max-statements",
type='int',
dest="c3p0_max_statements",
help="The size of the connection pool prepared statements cache.",
metavar="NUM",
default=50)
conn_pool_opt_group.add_option("--c3p0-timeout",
type='int',
dest="c3p0_timeout",
help="The time in seconds a connection in the pool can remain pooled but unused before being discarded.",
metavar="SECS",
default=60)
parser.add_option_group(conn_pool_opt_group)
## MySQL specifics
mysql_opt_group = OptionGroup(parser, "MySQL-specific options", "These options are specific for MySQL database backend configuration")
mysql_opt_group.add_option("--createdb", dest="createdb", action="store_true", help="Creates the MySQL database schema when installing a VO", default=False)
mysql_opt_group.add_option("--dropdb", dest="dropdb", action="store_true", help="Drops the MySQL database schema when removing a VO", default=False)
mysql_opt_group.add_option("--dbhost",dest="dbhost", help="Sets the HOST where the MySQL database is running", metavar="HOST", default="localhost")
mysql_opt_group.add_option("--dbport",dest="dbport", type='int', help="Sets the PORT where the MySQL database is listening", metavar="PORT", default="3306")
mysql_opt_group.add_option("--mysql-command", dest="mysql_command", help="Sets the MySQL command to CMD", metavar="CMD", default="mysql")
mysql_opt_group.add_option("--dbauser", dest="dbauser", help="Sets MySQL administrator user to USER", metavar="USER", default="root")
mysql_opt_group.add_option("--dbapwd", dest="dbapwd", help="Sets MySQL administrator password to PWD", metavar="PWD")
mysql_opt_group.add_option("--dbapwdfile", dest="dbapwdfile", help="Reads MySQL administrator password from FILE", metavar="FILE")
parser.add_option_group(mysql_opt_group)
## ORACLE specifics
oracle_opt_group = OptionGroup(parser, "Oracle-specific options", "These options are specific for Oracle database backend configuration")
oracle_opt_group.add_option("--use-thin-driver", dest="use_thin_driver", action="store_true", help="Configures the Oracle database using the pure-java native driver", default=False)
parser.add_option_group(oracle_opt_group)
## VOMS core specifics
voms_core_opt_group = OptionGroup(parser, "VOMS core options", "These options drive the configuration of the VOMS core service.")
voms_core_opt_group.add_option("--core-port", dest="core_port", type="int", help="the PORT on which the VOMS core service will bind", metavar="PORT")
voms_core_opt_group.add_option("--libdir", dest="libdir", help="the DIR where VOMS core will look for the database plugin modules.", metavar="PORT")
voms_core_opt_group.add_option("--logdir", dest="logdir", help="the VOMS core log directory DIR", metavar="DIR")
voms_core_opt_group.add_option("--sqlloc", dest="sqlloc", help="the PATH to the VOMS core database access library", metavar="PATH")
voms_core_opt_group.add_option("--uri", dest="uri", help="Defines a non-standard the URI of the VOMS server included in the issued attribute certificates", metavar="URI")
voms_core_opt_group.add_option("--timeout", dest="timeout", type="int", help="Defines the validity of the AC issued by the VOMS server in seconds. The default is 24 hours (86400)", metavar="SECS", default=86400)
voms_core_opt_group.add_option("--socktimeout", dest="socktimeout", type="int", help="Sets the amount of time in seconds after which the server will drop an inactive connection. The default is 60 seconds", metavar="SECS", default=60)
voms_core_opt_group.add_option("--shortfqans", dest="shortfqans", action="store_true", help="Configures VOMS to use the short fqans syntax", default=False)
voms_core_opt_group.add_option("--skip-ca-check", dest="skip_ca_check", action="store_true", help="Configures VOMS to only consider a certificate subject when checking VO user membership", default=False)
voms_core_opt_group.add_option("--max-reqs", type="int", dest="max_reqs", help="Sets the maximum number of concurrent request that the VOMS service can handle.", default=50)
parser.add_option_group(voms_core_opt_group)
## Registration service specifics
registration_opt_group = OptionGroup(parser, "Registration service options", "These options configure the VOMS Admin registration service")
registration_opt_group.add_option("--disable-registration", dest="enable_registration", action="store_false", help="Disables registration service for the VO", default=True)
registration_opt_group.add_option("--aup-url", dest="aup_url", help="Sets a custom URL for the VO AUP.", metavar="URL")
registration_opt_group.add_option("--aup-signature-grace-period",
type="int",
dest="aup_signature_grace_period",
help="The time (in days) given to users to sign the AUP, after being notified, before being suspended.",
metavar="DAYS",
default="15")
registration_opt_group.add_option("--enable-attribute-requests", dest="enable_attribute_requests", action="store_true",
help="Enable attribute request at registration time.", default=False)
registration_opt_group.add_option("--membership-request-lifetime", type="int", dest="membership_request_lifetime",
help="Time (in seconds) that unconfirmed membership request are maintained in the VOMS database.",
metavar="SECS", default=604800)
registration_opt_group.add_option("--disable-membership-expired-requests-warnings",
action="store_false",
dest="membership_request_warn_when_expired",
help="Disables email notifications when unconfirmed membership requests are removed from the voms database.",
default=True)
parser.add_option_group(registration_opt_group)
## Membership checks configuration
membership_opt_group = OptionGroup(parser, "Membership checks options", "These options configure the VOMS Admin membership checks")
membership_opt_group.add_option("--preserve-expired-members", action="store_true", dest="preserve_expired_members", help="Do not suspend users whose membership has expired.", default=False)
membership_opt_group.add_option("--disable-membership-end-time", action="store_true", dest="disable_membership_end_time", help="Disable membership end time checks completely.", default=False)
membership_opt_group.add_option("--membership-default-lifetime", type="int", dest="membership_default_lifetime", help="Default VO membership lifetime duration (in months).", metavar="MONTHS", default=12)
membership_opt_group.add_option("--membership-check-period", type="int", dest="membership_check_period", help="The membership check background thread period (in seconds)", metavar="SECS", default=600)
membership_opt_group.add_option("--membership-expiration-warning-period", type="int", dest="membership_expiration_warning_period",
help="Warning period duration (in days). VOMS Admin will notify of users about to expire in the next number of days expressed by this configuration option.",
metavar="DAYS", default=30)
membership_opt_group.add_option("--membership-expiration-grace-period", type="int", dest="membership_expiration_grace_period",
help="Membership expiration grace period (in days). In the grace period user will be maintained active even if membership has expired.",
metavar="DAYS", default=7)
membership_opt_group.add_option("--membership-notification-resend-period", type="int", dest="membership_notification_resend_period",
help="Time (in days) that should pass between consecutive warning expiration messages sent to VO administrators to inform about expired and expiring VO members.",
metavar="DAYS", default=1)
saml_opt_group = OptionGroup(parser, "SAML Attribute Authority options", "These options configure the VOMS SAML attribute authority service")
saml_opt_group.add_option("--enable-saml", dest="enable_saml", action="store_true", help="Turns on the VOMS SAML service.", default=False)
saml_opt_group.add_option("--saml-lifetime", dest="saml_lifetime", type="int", help="Defines the maximum validity of the SAML assertions issued by the VOMS SAML server in seconds. The default is 24 hours (86400)", metavar="SECS", default=86400)
saml_opt_group.add_option("--disable-compulsory-group-membership",
action="store_false",
dest="compulsory_group_membership",
help="Disables VOMS compulsory group membership for the SAML AA.", default=True)
parser.add_option_group(saml_opt_group)
notification_opt_group = OptionGroup(parser, "Notification service options", "These options configure the VOMS Admin notification service")
notification_opt_group.add_option("--mail-from", dest="mail_from",help="The EMAIL address used for VOMS Admin notification messages.", metavar="EMAIL")
notification_opt_group.add_option("--smtp-host", dest="smtp_host",help="The HOST where VOMS Admin will deliver notification messages.", metavar="HOST")
notification_opt_group.add_option("--disable-notification", dest="disable_notification", action="store_true", help=" Turns off the VOMS admin notification service.", default=False)
notification_opt_group.add_option("--notification-username", dest="notification_username",help="SMTP authentication USERNAME", metavar="USERNAME", default="")
notification_opt_group.add_option("--notification-password", dest="notification_password",help="SMTP authentication PASSWORD", metavar="PASSWORD", default="")
notification_opt_group.add_option("--notification-use-tls", action="store_true", dest="notification_use_tls",help="Use TLS to connect to SMTP server", default=False)
parser.add_option_group(notification_opt_group)
other_opt_group = OptionGroup(parser, "Other fancy options", "Configuration options that do not fall in the other categories")
other_opt_group.add_option("--disable-conf-backup",
dest="enable_conf_backup",
action="store_false",
help="Disables configuration backup creation.",
default=True)
other_opt_group.add_option("--mkgridmap-translate-email",
dest="mkgridmap_translate_email",
action="store_true",
help="Generate gridmapfiles containing the email part of user certificate subject as emailAddress besides the Email format used by default.",
default=False)
other_opt_group.add_option("--csrf-log-only",
action="store_true",
dest="csrf_log_only",
help="When this option is set, CSRF requests are not blocked but logged. Don't set this option for maximum security",
default=False)
parser.add_option_group(other_opt_group)
def configure_logging(options):
"""
Configures logging so that debug and info messages are routed to stdout and higher level messages are to stderr.
Debug messages are shown only if verbose option is set
"""
class InfoAndBelowLoggingFilter(logging.Filter):
def filter(self,record):
if record.levelno <= logging.INFO:
return 1
return 0
global logger
out = logging.StreamHandler(stdout)
err = logging.StreamHandler(stderr)
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(format="%(message)s", level=log_level)
out.setLevel(log_level)
out.addFilter(InfoAndBelowLoggingFilter())
err.setLevel(logging.WARNING)
logger = logging.getLogger("voms-admin")
logger.addHandler(out)
logger.addHandler(err)
logger.propagate=False
logger.debug("Logging configured")
def check_required_options(options, required_opts):
def option_name_from_var(var_name):
return "--"+re.sub(r'_', '-', var_name)
missing_opts = []
for o in required_opts:
if not options.__dict__[o]:
missing_opts.append(option_name_from_var(o))
if len(missing_opts) > 0:
error_and_exit("Please set the following required options:\n\t%s" % '\n\t'.join(missing_opts))
def check_install_options(options):
if not options.vo:
error_and_exit("Please set the VO option")
if options.skip_voms_core and options.skip_voms_admin:
error_and_exit("There's not much to do if --skip-voms-core and --skip-voms-admin are both set!")
required_opts = ["vo", "dbusername", "dbpassword"]
if not options.skip_voms_admin:
required_opts += ["mail_from",
"smtp_host"]
if not options.skip_voms_core:
required_opts += ["core_port"]
if options.dbtype == ORACLE:
required_opts += ["dbname"]
check_required_options(options, required_opts)
def check_remove_options(options):
if not options.vo:
error_and_exit("Please set the VO option")
def check_upgrade_options(options):
if not options.vo:
error_and_exit("Please set the VO option")
def service_cert_sanity_checks(options):
if not os.path.exists(options.cert):
error_and_exit("Service certificate %s not found." % options.cert)
if not os.path.exists(options.key):
error_and_exit("Service private key %s not found." % options.key)
if not os.path.exists(options.trust_dir):
error_and_exit("Service trust anchor directory %s not found." % options.trust_dir)
def config_owner_ids(options):
try:
pwd_info = pwd.getpwnam(options.config_owner)
return (pwd_info[2], pwd_info[3])
except KeyError:
logger.warn("User %s is not configured on this system." % options.config_owner)
if os.geteuid() == 0:
error_and_exit("User %s is not configured on this system." % options.config_owner)
def create_voms_service_certificate(options):
if os.geteuid() == 0 and not options.dry_run:
logger.info("Creating VOMS services certificate in %s, %s" % (VOMS_CERT, VOMS_KEY))
shutil.copy(HOST_CERT, VOMS_CERT)
shutil.copy(HOST_KEY, VOMS_KEY)
(owner_id, owner_group_id) = config_owner_ids(options)
os.chown(VOMS_CERT,owner_id, owner_group_id)
os.chown(VOMS_KEY,owner_id, owner_group_id)
os.chmod(VOMS_CERT,0644)
os.chmod(VOMS_KEY,0400)
options.cert = VOMS_CERT
options.key = VOMS_KEY
def setup_service_certificate(options):
service_cert_sanity_checks(options)
if options.cert == HOST_CERT and options.key == HOST_KEY and os.geteuid() == 0:
create_voms_service_certificate(options)
def driver_class(options):
if options.dbtype == MYSQL:
return VOMSDefaults.mysql_driver_class
if options.dbtype == ORACLE:
return VOMSDefaults.oracle_driver_class
def driver_dialect(options):
if options.dbtype == MYSQL:
return VOMSDefaults.mysql_dialect
else:
return VOMSDefaults.oracle_dialect
def change_owner_and_set_perms(path, owner_id, group_id, perms):
if os.geteuid() == 0:
os.chown(path, owner_id, group_id)
os.chmod(path, perms)
def write_and_set_permissions(options, path, contents, perms):
f = open(path, "w")
f.write(contents)
f.close()
os.chmod(path, perms)
if os.getuid() == 0:
(owner_id, group_id) = config_owner_ids(options)
os.chown(path,owner_id,group_id)
def append_and_set_permissions(path, contents, owner_id, group_id, perms):
f = open(path, "a")
f.write(contents)
f.close()
change_owner_and_set_perms(path, owner_id, group_id, perms)
def dburl_mysql(options):
return "jdbc:mysql://%s:%d/%s" % (options.dbhost,
options.dbport,
options.dbname)
def dburl_oracle(options):
if options.use_thin_driver:
return "jdbc:oracle:thin:@//%s:%s/%s" % (options.dbhost,
options.dbport,
options.dbname)
else:
return "jdbc:oracle:oci:@%s" % (options.dbname)
def dburl(options):
if options.dbtype == MYSQL:
return dburl_mysql(options)
else:
return dburl_oracle(options)
def create_admin_db_properties(options):
db_options = dict(dbdriver=driver_class(options),
dbdialect=driver_dialect(options),
dburl=dburl(options))
template = string.Template(open(VOMSDefaults.db_props_template,"r").read())
db_properties = template.substitute(**dict(db_options.items()+options.__dict__.items()))
logger.debug("Admin service database properties:\n%s" % db_properties)
if not options.dry_run:
write_and_set_permissions(options,
admin_db_properties_path(options.vo),
db_properties,
0640)
def create_admin_service_properties(options):
template = string.Template(open(VOMSDefaults.service_props_template,"r").read())
service_props = template.substitute(**options.__dict__)
logger.debug("Admin service properties:\n%s" % service_props)
if not options.dry_run:
write_and_set_permissions(options,
admin_service_properties_path(options.vo),
service_props,
0640)
def create_endpoint_info(options):
endpoint_path = admin_service_endpoint_path(options.vo)
url = "%s:%s" % (options.hostname, options.admin_port)
logger.debug("Admin service endpoint: %s" % url)
if not options.dry_run:
write_and_set_permissions(options,
endpoint_path,
url,
0644)
def create_vomses(options):
cert = X509Helper(options.cert, openssl_cmd=options.openssl)
vomses = '"%s" "%s" "%s" "%s" "%s"\n' % (options.vo,
options.hostname,
options.core_port,
cert.subject,
options.vo)
logger.debug("VOMSES configuration: %s", vomses)
if not options.dry_run:
write_and_set_permissions(options,
vomses_path(options.vo),
vomses,
0644)
def create_lsc(options):
cert = X509Helper(options.cert, openssl_cmd=options.openssl)
lsc = "%s\n%s" % (cert.subject, cert.issuer)
logger.debug("LSC configuration: %s", lsc)
if not options.dry_run:
write_and_set_permissions(options,
lsc_path(options.vo),
lsc,
0644)
def create_aup(options):
if not options.dry_run:
shutil.copyfile(VOMSDefaults.vo_aup_template,aup_path(options.vo))
if os.geteuid() == 0:
(owner_id, group_id) = config_owner_ids(options)
change_owner_and_set_perms(aup_path(options.vo),
owner_id,
group_id,
0644)
def create_logging_configuration(options):
if not options.dry_run:
shutil.copyfile(VOMSDefaults.logging_conf_template,admin_logging_conf_path(options.vo))
if os.geteuid() == 0:
(owner_id, group_id) = config_owner_ids(options)
change_owner_and_set_perms(admin_logging_conf_path(options.vo),
owner_id,
group_id,
0644)
def create_admin_configuration(options):
if os.path.exists(admin_conf_dir(options.vo)):
logger.info("VOMS Admin service configuration for VO %s exists.", options.vo)
if not options.dry_run and options.enable_conf_backup:
backup_dir_contents(admin_conf_dir(options.vo))
else:
## Set the deploy database option if the VO is
## installed for the first time on this host and this
## is not meant as a replica
if not options.skip_database:
options.deploy_database = True
# options.createdb = True
## FIXME: set permissions
if not options.dry_run:
os.makedirs(admin_conf_dir(options.vo))
create_admin_db_properties(options)
create_admin_service_properties(options)
create_endpoint_info(options)
create_vomses(options)
create_lsc(options)
create_aup(options)
create_logging_configuration(options)
def create_voms_conf(options):
core_opts = dict(core_logfile=os.path.join(options.logdir, "voms.%s" % options.vo),
core_passfile=voms_pass_path(options.vo),
core_sqlloc=os.path.join(options.libdir, options.sqlloc))
template = string.Template(open(VOMSDefaults.voms_template,"r").read())
all_core_opts = dict(core_opts.items() + options.__dict__.items())
voms_props = template.substitute(**all_core_opts)
if options.skip_ca_check:
voms_props+="\n--skipcacheck"
if options.shortfqans:
voms_props+="\n--shortfqans"
logger.debug("VOMS Core configuration:\n%s" % voms_props)
if not options.dry_run:
## Core configuration
write_and_set_permissions(options,
voms_conf_path(options.vo),
voms_props,
0644)
## Core password file
write_and_set_permissions(options,
voms_pass_path(options.vo),
options.dbpassword+"\n",
0640)
logger.info("VOMS core service configured succesfully.")
def create_core_configuration(options):
if os.path.exists(core_conf_dir(options.vo)):
logger.info("VOMS core service configuration for VO %s already exists.", options.vo)
if not options.dry_run and options.enable_conf_backup:
backup_dir_contents(core_conf_dir(options.vo))
else:
## FIXME: set permissions
os.makedirs(core_conf_dir(options.vo))
create_voms_conf(options)
def generate_password(length=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(length))
def setup_core_defaults(options):
if not options.logdir:
options.logdir = voms_log_path()
if not options.libdir:
options.libdir = voms_lib_dir()
if not options.sqlloc:
if options.dbtype == MYSQL:
options.sqlloc = "libvomsmysql.so"
if options.dbtype == ORACLE:
options.sqlloc = "libvomsoracle.so"
def setup_defaults(options):
if not options.dbname and options.dbtype == MYSQL:
options.dbname = "voms_%s" % (re.sub(r"[-.]","_",options.vo))
if not options.dbhost:
options.dbhost = "localhost"
if not options.dbport:
if options.dbtype == MYSQL:
options.dbport = 3306
if options.dbtype == ORACLE:
options.dbport = 1521
if options.createdb or options.dropdb:
if not options.dbapwd:
error_and_exit("Please set at least the --dbapwd option when attempting MySQL schema creation/removal.")
def setup_admin_defaults(options):
if not options.aup_url:
options.aup_url = "file:%s" % aup_path(options.vo)
if not options.uri:
options.uri = "%s:%d" % (options.hostname, options.core_port)
def create_mysql_db(options):
createdb_cmd = mysql_util_cmd("create_db", options)
if not options.dbapwd or len(options.dbapwd) == 0:
logger.warn("WARNING: No password has been specified for the mysql root account.")
execute_cmd(createdb_cmd, "Error creating MySQL database schema.")
def deploy_database(options):
logger.info("Deploying database for VO %s", options.vo)
if options.dbtype == MYSQL and options.createdb:
create_mysql_db(options)
execute_cmd(voms_deploy_database_cmd(options.vo), "Error deploying VOMS database!")
logger.info("Adding VO administrator reading information from %s", options.cert)
execute_cmd(voms_add_admin_cmd(options.vo, options.cert, ignore_email=True), "Error adding VO administrator!")
if options.read_only_auth_clients:
logger.info("Adding read-only access to authenticated clients on the VO.")
execute_cmd(voms_ro_auth_clients_cmd(options.vo), "Error setting read-only access on the VO!")
if options.admin_cert:
logger.info("Adding VO administrator reading information from %s", options.admin_cert)
execute_cmd(voms_add_admin_cmd(options.vo, options.admin_cert), "Error adding VO administrator!")
def do_admin_install(options):
logger.info("Configuring VOMS admin service for vo %s" , options.vo)
setup_service_certificate(options)
setup_admin_defaults(options)
create_admin_configuration(options)
if options.deploy_database:
deploy_database(options)
def do_core_install(options):
logger.info("Configuring VOMS core service for vo %s" , options.vo)
if options.skip_voms_admin:
setup_service_certificate(options)
setup_core_defaults(options)
create_core_configuration(options)
pass
def do_install(options):
check_install_options(options)
setup_defaults(options)
if not options.skip_voms_admin:
do_admin_install(options)
if not options.skip_voms_core:
do_core_install(options)
logger.info("VO %s configuration completed succesfully.", options.vo)
def upgrade_database(options):
execute_cmd(voms_upgrade_database_cmd(options.vo))
def undeploy_database(options):
logger.warning("Undeploying database for VO %s. The database contents will be lost.", options.vo)
if options.dbtype == MYSQL and options.dropdb:
execute_cmd(mysql_util_cmd("drop_db", options), "Error dropping MySQL database for VO %s!" % options.vo)
else:
execute_cmd(voms_undeploy_database_cmd(options.vo), "Error undeploying VOMS database for VO %s!" % (options.vo))
def remove_dir_and_contents(directory):
logger.info("Removing directory %s and its contents", directory)
if os.path.exists(directory):
for i in glob.glob(directory+"/*"):
logger.debug("Removing %s",i)
os.remove(i)
os.rmdir(directory)
def do_remove(options):
check_remove_options(options)
setup_defaults(options)
if not options.skip_voms_admin:
if not os.path.exists(admin_conf_dir(options.vo)):
logger.error("The VOMS Admin service for VO %s is not configured on this host.", options.vo)
else:
if options.undeploy_database:
if not options.skip_database:
undeploy_database(options)
else:
logger.warning("Database will not be dropped since --skip-database option is set.")
logger.info("Removing VOMS Admin service configuration")
remove_dir_and_contents(admin_conf_dir(options.vo))
if not options.skip_voms_core:
if not os.path.exists(core_conf_dir(options.vo)):
logger.error("The VOMS core service for VO %s is not configured on this host.", options.vo)
else:
logger.info("Removing VOMS core service configuration")
remove_dir_and_contents(core_conf_dir(options.vo))
def do_upgrade(options):
check_upgrade_options(options)
setup_defaults(options)
if not os.path.exists(admin_conf_dir(options.vo)):
logger.error("The VOMS Admin service for VO %s is not configured on this host.", options.vo)
else:
logger.info("Upgrading database for VO %s to the latest version.",
options.vo)
upgrade_database(options)
logger.info("Upgrade completed successfully.")
def error_and_exit(msg):
logger.critical(msg)
exit(1)
def main():
setup_cl_options()
(options, args) = parser.parse_args()
configure_logging(options)
check_args_and_options(options, args)
command = args[0]
try:
if command == "install":
do_install(options)
elif command == "remove":
do_remove(options)
elif command == "upgrade":
do_upgrade(options)
except SystemExit, e:
exit(e)
except:
logger.exception("Unexpected error caught!")
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
import sys
print(sys.argv)
print(sys.argv[1])
sys.stderr.write("i am stderr!\n")
sys.stderr.flush()
sys.stdout.write("i am stdout\n")
|
from django.contrib import admin
from .models import Post
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
search_fields= ['message']
list_display=['pk','message','author']
|
#!/usr/bin/env python
from __future__ import print_function
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from matplotlib import pyplot as plt
from joblib import load
from sklearn.svm import SVC
from exercise6.srv import *
import sklearn
import cv2
import rospy
import numpy as np
import sys
bridge = CvBridge()
clf = load(sys.path[0] + '/../color_classifier/svc4.joblib')
colors = ['red', 'green', 'blue', 'yellow', 'white', 'black']
def plot_hist(h, s):
plt.plot(h)
plt.xlim([0,s])
plt.show()
def calc_hist(image, channel, hist_r, nbins, mask):
hist_range = (0, hist_r)
# OpenCV function is faster (around 40X) than np.histogram()
hist = cv2.calcHist([image], [channel], mask, [nbins], hist_range, accumulate=False)
# normalize histogram
hist_sum = np.sum(hist)
hist = np.divide(hist, hist_sum)
return np.concatenate(hist)
def handle_cylinder(req):
# input is 1D image!
I = req.image_1d
rgb_I = bridge.imgmsg_to_cv2(I, "bgr8")
hsv_I = cv2.cvtColor(rgb_I, cv2.COLOR_BGR2HSV)
r_hist = calc_hist(rgb_I, 0, 256, 64, None)
g_hist = calc_hist(rgb_I, 1, 256, 64, None)
b_hist = calc_hist(rgb_I, 2, 256, 64, None)
h_hist = calc_hist(hsv_I, 0, 180, 30, None)
s_hist = calc_hist(hsv_I, 1, 256, 64, None)
v_hist = calc_hist(hsv_I, 2, 256, 64, None)
# merge hists
hists = np.concatenate([h_hist, s_hist, v_hist, r_hist, g_hist, b_hist])
# run clf on hists
color = colors[clf.predict([hists])[0]]
print('prediction :', color, 'cylinder')
# TODO: publish color?
return cylinder_colorResponse(color)
def handle_ring(req):
I = req.ring
rgb_I = bridge.imgmsg_to_cv2(I)
M = req.mask
mask = bridge.imgmsg_to_cv2(M)
mask = cv2.bitwise_not(mask)
# for debugging:
'''
cv2.imshow('recieved image', I)
cv2.imshow('recieved mask', M)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'): break
'''
hsv_I = cv2.cvtColor(rgb_I, cv2.COLOR_BGR2HSV)
r_hist = calc_hist(rgb_I, 0, 256, 64, mask)
g_hist = calc_hist(rgb_I, 1, 256, 64, mask)
b_hist = calc_hist(rgb_I, 2, 256, 64, mask)
h_hist = calc_hist(hsv_I, 0, 180, 30, mask)
s_hist = calc_hist(hsv_I, 1, 256, 64, mask)
v_hist = calc_hist(hsv_I, 2, 256, 64, mask)
# merge hists
hists = np.concatenate([h_hist, s_hist, v_hist, r_hist, g_hist, b_hist])
# run clf on hists
color = colors[clf.predict([hists])[0]]
print('prediction :', color, 'ring')
return ring_colorResponse(color)
def main():
rospy.init_node('color_detection')
cylinder_service = rospy.Service('cylinder_color', cylinder_color, handle_cylinder)
ring_service = rospy.Service('ring_color', ring_color, handle_ring)
print(clf)
rospy.spin()
if __name__ == '__main__':
main()
|
#Method 1
def armstrong1(num):
temp_1=temp_2=num
string=list()
def noOfDigits(temp_1):
count=0
while temp_1!=0:
temp_1=temp_1//10
count+=1
return count
n=noOfDigits(temp_1)
armstrong=0
while temp_2!=0:
r=temp_2%10
temp_2=temp_2//10
armstrong+=r**n
string.append("*".join(str(r)*n))
string.reverse()
if armstrong==num:
print(f"Yes\n{num} is an Armstrong number.\n{' + '.join(string)} = {armstrong}")
else:
print(f"No\n{num} is not an Armstrong number.\n{' + '.join(string)} = {armstrong}")
#Method 2
def armstrong2(num):
number=list(num.strip())
armstrong=0
for i in number:
armstrong=armstrong+pow(int(i),len(number))
number[number.index(i)]="*".join(i*len(number))
# string.sort()
print(f"Yes\n{num} is an Armstrong number.\n{' + '.join(number)} = {armstrong}") if armstrong==num else print(f"No\n{num} is not an Armstrong number.\n{' + '.join(number)} = {armstrong}")
#armstrong1(int(input()))
armstrong2(input())
|
__all__ = ["Applicant", "BaseModel" "City", "Interviewer", "InterviewSlot", "Mentor", "School"]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
from sklearn import decomposition
mark = pd.read_csv("C:/Users/ASUS/Desktop/3f/4c.csv")
marks=mark.values[:,1:]
pca = decomposition.PCA(n_components=10)
pca.fit(marks)
marks = pca.transform(marks)
np.savetxt("C:/Users/ASUS/Desktop/pcaDone/4c.csv",marks,delimiter=",")
print(marks)
|
#!_*_conding_*_
if __name__ == "__main_":
pass
|
from django.http import HttpResponse
from django.shortcuts import render
def homepage(request): #when someone looks for this home page, it will send this request. Which url they are looking
return HttpResponse("<h1>Homepage</h1>")
def eggs(request):
return HttpResponse("<h1>Eggs are great</h1>")
|
# delete_nth=[1,1,1,1]
# def remove(a):
# delete_nth.pop(-1)
# print(delete_nth)
# remove(2)
# def delete(a):
# list1=[1,1,2,3]
# i=0
# while i<len(list1):
# list1.remove(list1[-i])
# i=i+1
# print(list1)
# delete(2)
# list1=[1,1,2,3]
# list2 = []
# num = int(input("number : "))
# length = int(len(list1))
# i = 0
# while i < length-num:
# list2.append(list1[i])
# i = i + 1
# print(list2)
|
#encoding=utf-8
import cv2 #导入opencv2库
img = cv2.imread("./images/beach.jpg") #载入图片,图片路径有两种斜杠
cv2.imshow("HelloCV", img) #显示图像
cv2.imwrite("D:/save1.jpg", img)#保存图片
cv2.waitKey(0) #等待用户输入键,退出
|
from time import gmtime, strftime
class Log:
# flags disponible pour les logs
LOG_INFO_ENABLE = 1 << 0
LOG_DEBUG_ENABLE = 1 << 1
LOG_WARNING_ENABLE = 1 << 2
LOG_ERROR_ENABLE = 1 << 3
LOG_ALL_ENABLE = 15
# variable a setter avec les valeurs ci-dessus pour choisir quel type de log afficherflags = 0
flags = 0
@staticmethod
def debug(tag, msg):
if Log.flags & Log.LOG_DEBUG_ENABLE:
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "[ DEBUG ][ " + tag + " ] " + msg
@staticmethod
def info(tag, msg):
if Log.flags & Log.LOG_INFO_ENABLE:
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "[ INFO ][ " + tag, " ] " + msg
@staticmethod
def error(tag, msg):
if Log.flags & Log.LOG_ERROR_ENABLE:
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "[ ERROR ][ " + tag + " ] " + msg
@staticmethod
def warning(tag,msg):
if Log.flags & Log.LOG_WARNING_ENABLE:
print strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "[ WARNING ][ " + tag + " ] " + msg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.