text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import httplib2 as http
import httplib
import re
from urlparse import urlparse
import pprint
import urllib2
class streamscrobbler:
def parse_headers(self, response):
headers = {}
int = 0
while True:
line = response.readline()
if line == '\r\n':
break # end of headers
if ':' in line:
key, value = line.split(':', 1)
headers[key] = value
if int == 12:
break;
int = int + 1
return headers
# this is the fucntion you should call with the url to get all data sorted as a object in the return
def getServerInfo(self, url):
status = 0
if url.endswith('.pls') or url.endswith('listen.pls?sid=1'):
address = self.checkPLS(url)
else:
address = url
if isinstance(address, str):
meta_interval = self.checkWhatServer(address)
else:
meta_interval = bool(0)
if isinstance(meta_interval, bool):
if meta_interval is True:
status = 1
else:
status = 0
metadata = False;
elif "SHOUTcast" in meta_interval:
status = 1
if "1.9" in meta_interval:
metadata = self.shoutcastOldGet(address, False);
else:
metadata = self.shoutcastCheck(address, False);
elif "Icecast" or "137" in meta_interval:
status = 1
metadata = self.shoutcastCheck(address, True);
elif "StreamMachine" in meta_interval:
status = 1
metadata = self.shoutcastCheck(address, True);
else:
metadata = False;
return {"status":status, "metadata":metadata}
def checkWhatServer(self, address):
try:
status = urllib2.urlopen(address, timeout=2).getcode()
except Exception:
return bool(0)
if status == 200:
request = urllib2.Request(address)
user_agent = 'iTunes/9.1.1'
request.add_header('User-Agent', user_agent)
request.add_header('icy-metadata', 1)
try:
response = urllib2.urlopen(request, timeout=6)
if "server" in response.headers:
shoutcast = response.headers['server']
elif "X-Powered-By" in response.headers:
shoutcast = response.headers['X-Powered-By']
else:
headers = self.parse_headers(response)
if "icy-notice1" in headers:
shoutcast = headers['icy-notice1']
if "This stream requires" in shoutcast:
shoutcast = headers['icy-notice2']
else:
shoutcast = bool(1);
response.close()
except Exception:
return bool(1)
else:
shoutcast = bool(0);
return shoutcast;
def checkPLS(self, address):
try:
response = urllib2.urlopen(address, timeout=2)
for line in response:
if line.startswith("File1="):
stream = line;
response.close()
if 'stream' in locals():
return stream[6:]
else:
return bool(0)
except Exception:
return bool(0)
def shoutcastOldGet(self, address, itsOld):
station = self.shoutcast7htmlCheck(address)
if station is False:
station = self.shoutcastCheck(address, itsOld)
else:
station = self.justgetcontenttype(address, station)
return station;
def shoutcast7htmlCheck(self, address):
o = urlparse(address)
stringurl = o.scheme + "://" + o.netloc + "/7.html"
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
request = urllib2.Request(stringurl)
request.add_header('User-Agent', user_agent)
request.add_header('Accept-Charset', 'utf-8')
try:
response = urllib2.urlopen(request, timeout=2)
for line in response:
line = self.stripTags(line)
lines = line.split(',', 7)
if len(lines) > 1:
response.close()
return {'song':lines[6], 'bitrate':lines[5]}
else:
response.close()
return False
else:
response.close()
except urllib2.HTTPError, e:
print ' Error 7.html, HTTPError = ' + str(e.code)
return False
except urllib2.URLError, e:
print " Error 7.html, URLError: " + str(e.reason)
return False
except Exception, err:
print " Error 7.html"
return False
def justgetcontenttype(self, address, station):
request = urllib2.Request(address)
try:
user_agent = 'iTunes/9.1.1'
request.add_header('User-Agent', user_agent)
request.add_header('icy-metadata', 1)
request.add_header('Accept-Charset', 'utf-8')
response = urllib2.urlopen(request, timeout=5)
contenttype = "?"
headers = self.parse_headers(response)
if "Content-Type" in headers:
contenttype = headers['Content-Type']
elif 'content-type' in headers:
contenttype = headers['content-type']
response.close()
return {'song':station.get("song"), 'bitrate':station.get("bitrate"), 'contenttype':contenttype}
except urllib2.HTTPError, e:
print ' Error, HTTPError = ' + str(e.code)
return False
except urllib2.URLError, e:
print " Error, URLError: " + str(e.reason)
return False
except Exception, err:
print " Error: " +str(err)
return False
def shoutcastCheck(self, address, itsOld):
request = urllib2.Request(address)
try:
user_agent = 'iTunes/9.1.1'
request.add_header('User-Agent', user_agent)
request.add_header('icy-metadata', 1)
request.add_header('Accept-Charset', 'utf-8')
response = urllib2.urlopen(request, timeout=5)
if itsOld is not True:
headers = self.parse_headers(response)
bitrate = headers['icy-br']
icy_metaint_header = headers['icy-metaint']
if "Content-Type" in headers:
contenttype = headers['Content-Type']
elif 'content-type' in headers:
contenttype = headers['content-type']
else:
bitrate = response.headers.get('icy-br').split(",")[0]
icy_metaint_header = response.headers.get('icy-metaint')
if response.headers.get('Content-Type') is not None:
contenttype = response.headers.get('Content-Type')
elif response.headers.get('content-type') is not None:
contenttype = response.headers.get('content-type')
#print response.headers #isto imprime todas as informacoes
if icy_metaint_header is not None:
metaint = int(icy_metaint_header)
read_buffer = metaint + 255
content = response.read(read_buffer)
start = "StreamTitle='"
end = "';"
title = re.search('%s(.*)%s' % (start, end), content[metaint:]).group(1)
title = re.sub("StreamUrl='.*?';", "", title).replace("';", "").replace("StreamUrl='", "")
title = re.sub("&artist=.*", "", title)
title = re.sub("http://.*", "", title)
response.close()
return {'song':title, 'bitrate':bitrate, 'contenttype':contenttype}
else:
response.close()
print "No metaint"
except urllib2.HTTPError, e:
print ' Error, HTTPError = ' + str(e.code)
return False
except urllib2.URLError, e:
print " Error, URLError: " + str(e.reason)
return False
except Exception, err:
print " Error: " +str(err)
return False
def stripTags(self, text):
finished = 0
while not finished:
finished = 1
start = text.find("<")
if start >= 0:
stop = text[start:].find(">")
if stop >= 0:
text = text[:start] + text[start + stop + 1:]
finished = 0
return text
|
import click
from flask.cli import with_appcontext
from .models import User, db, SocialMedia
from grant.task.models import Task
from grant.settings import STAGING_PASSWORD
# @click.command()
# @click.argument('identity')
# @with_appcontext
# def delete_user(identity):
# print(identity)
# if str.isdigit(identity):
# user = User.get_by_id(identity)
# else:
# user = User.get_by_email(identity)
#
# if user:
# db.session.delete(user)
# db.session.commit()
# click.echo(f'Succesfully deleted {user.display_name} (uid {user.id})')
# else:
# raise click.BadParameter('Invalid user identity. Must be a userid, ' \
# 'account address, or email address of an ' \
# 'existing user.')
@click.command()
@click.argument('identity')
@with_appcontext
def set_admin(identity):
print("Setting admin to user with identity: " + identity)
if str.isdigit(identity):
user = User.get_by_id(identity)
else:
user = User.get_by_email(identity)
if user:
user.set_admin(True)
user.email_verification.has_verified = True
db.session.add(user)
db.session.commit()
click.echo(f'Successfully set {user.display_name} (uid {user.id}) to admin')
else:
raise click.BadParameter('''Invalid user identity. Must be a userid,
'account address, or email address of an
'existing user.''')
@click.command()
@with_appcontext
def mangle_users():
if STAGING_PASSWORD:
print("Mangling all users")
for i, user in enumerate(User.query.all()):
user.email_address = "random" + str(i) + "@grant.io"
user.password = STAGING_PASSWORD
# DELETE TOTP SECRET
user.totp_secret = None
# DELETE BACKUP CODES
user.backup_codes = None
db.session.add(user)
# DELETE ALL TASKS
for task in Task.query.all():
db.session.delete(task)
# REMOVE ALL SOCIAL MEDIA
for social in SocialMedia.query.all():
db.session.delete(social)
db.session.commit()
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sqlite3
from Functions import GuiSignal
from Gui import GuiTab
#Gui основного окна
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.conn = sqlite3.connect("database.db")
self.c = self.conn.cursor()
self.c.execute("CREATE TABLE IF NOT EXISTS Workers(roll INTEGER PRIMARY KEY AUTOINCREMENT ,name TEXT,branch TEXT,sem INTEGER,mobile INTEGER,address TEXT)")
self.c.execute("CREATE TABLE IF NOT EXISTS Late(roll INTEGER PRIMARY KEY AUTOINCREMENT ,name TEXT,count INTEGER)")
self.c.close()
self.setWindowIcon(QIcon("icon/document.png"))
self.setWindowTitle("Контроль посещений")
self.setMinimumSize(900, 700)
self.table_widget= GuiTab.Tabs(self)
self.setCentralWidget(self.table_widget)
self.table_widget.show()
toolbar = QToolBar()
toolbar.setMovable(False)
self.addToolBar(toolbar)
statusbar = QStatusBar()
self.setStatusBar(statusbar)
#Панель кнопок
btn_ac_adduser = QAction(QIcon("icon/add.png"), "Добавить", self)
btn_ac_adduser.triggered.connect(lambda: GuiSignal.insert(self.table_widget.tabs.currentIndex()))
btn_ac_adduser.setStatusTip("Добавить")
toolbar.addAction(btn_ac_adduser)
btn_ac_refresh = QAction(QIcon("icon/refresh.png"),"Обновить",self)
btn_ac_refresh.triggered.connect(lambda: self.loaddata(self.table_widget.tabs.currentIndex()))
btn_ac_refresh.setStatusTip("Обновить")
toolbar.addAction(btn_ac_refresh)
btn_ac_search = QAction(QIcon("icon/search.png"), "Поиск", self)
btn_ac_search.triggered.connect(lambda: GuiSignal.search(self.table_widget.tabs.currentIndex()))
btn_ac_search.setStatusTip("Поиск")
toolbar.addAction(btn_ac_search)
btn_ac_delete = QAction(QIcon("icon/trash.png"), "Удалить", self)
btn_ac_delete.triggered.connect(lambda: GuiSignal.delete(self.table_widget.tabs.currentIndex()))
btn_ac_delete.setStatusTip("Удалить")
toolbar.addAction(btn_ac_delete)
btn_ac_update = QAction(QIcon("icon/update.png"), "Редактировать", self)
btn_ac_update.triggered.connect(lambda: GuiSignal.updateTable(self.table_widget.tabs.currentIndex()))
btn_ac_update.setStatusTip("Редактировать")
toolbar.addAction(btn_ac_update)
#Подгрузка данных с таблицы
def loaddata(self, index):
if index == 0:
self.connection = sqlite3.connect("database.db")
query = "SELECT * FROM Workers"
result = self.connection.execute(query)
self.table_widget.tableWidget.setRowCount(0)
for row_number, row_data in enumerate(result):
self.table_widget.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.table_widget.tableWidget.setItem(row_number, column_number, QTableWidgetItem(str(data)))
self.connection.close()
else:
self.connection = sqlite3.connect("database.db")
query = "SELECT * FROM Late"
result = self.connection.execute(query)
self.table_widget.tableWidget1.setRowCount(0)
for row_number, row_data in enumerate(result):
self.table_widget.tableWidget1.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.table_widget.tableWidget1.setItem(row_number, column_number, QTableWidgetItem(str(data)))
self.connection.close()
#Отрисовка таблица
def handlePaintRequest(self, printer):
document = QTextDocument()
cursor = QTextCursor(document)
model = self.table.model()
table = cursor.insertTable(
model.rowCount(), model.columnCount())
for row in range(table.rows()):
for column in range(table.columns()):
cursor.insertText(model.item(row, column).text())
cursor.movePosition(QTextCursor.NextCell)
document.print_(printer)
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
import matplotlib
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 18}
matplotlib.rc('font', **font)
cmap="RdBu_r"
######### Functions Defined Below #########
########################################################################
########################################################################
######### For each trajectory, assign each point to a particular grid ############
def assign_paths(transitions, data, z_ind, inc_ind, az_ind, top_z,bott_z,bins_z,bins_inc_ang,bins_az_ang,paths_file,paths_density_file,z_step,inc_step, az_step,Grid_z,Grid_density):
pathway_grids = {}
pathway_density = {}
pathway_z = {}
for i in range(len(transitions)):
pathway_grids[i] = []
pathway_density[i] = []
for j in range(len(transitions[i])):
f = int(transitions[i][j])
it = 0
#### Assign point along pathway to a particular grid
for z in range(len(bins_z)):
if data[f][z_ind]<=bins_z[z] and data[f][z_ind]>=bins_z[z]-z_step:
for inc in range(len(bins_inc_ang)):
if data[f][inc_ind]>=bins_inc_ang[inc] and data[f][inc_ind]<=bins_inc_ang[inc]+inc_step:
for az in range(len(bins_az_ang)):
it = (z*len(bins_inc_ang)*len(bins_az_ang)) + inc*len(bins_az_ang) + az+1
if data[f][az_ind]>=bins_az_ang[az] and data[f][az_ind]<=bins_az_ang[az]+az_step:
pathway_grids[i] = np.append( pathway_grids[i], it)
print(it)
print(Grid_density[it-1])
pathway_density[i] = np.append( pathway_density[i], Grid_density[it-1] )
break
for i in range(len(pathway_grids)):
f=open(paths_file,'a+',newline='')
writer=csv.writer(f,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(pathway_grids[i])
f.close()
f=open(paths_density_file,'a+',newline='')
writer=csv.writer(f,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(pathway_density[i])
f.close()
return (pathway_grids,pathway_density)
####### Use if you have already assigned paths to grids
def open_paths (paths_file,paths_density_file):
p_file=open(paths_file)
pathway_grids=[]
with p_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
pathway_grids.append(myarray)
p_file=open(paths_density_file)
pathway_density=[]
with p_file as my_file:
for line in my_file:
myarray=np.fromstring(line, dtype=float, sep=' ')
pathway_density.append(myarray)
return (pathway_grids,pathway_density)
#################################################################################################################
def cluster_trajectories(transitions, bins_z, pathway_grids, pathway_density, count_cutoff, density_cutoff, cluster_cutoff,paths_file,paths_file_c1,paths_file_c2 ,Grid_z,Grid_az,Grid_density):
allowed_trajectories = {}
allowed_path_indices = []
it = -1
it_c1 = -1
it_c2 = -1
clust1_traj = {}
clust2_traj = {}
clust1_grid_traj = {}
clust2_grid_traj = {}
for i in range(len(transitions)):
count = 0
clust1_cnt = 0 #### number of points along the trajectory that belong to cluster 1. set to 0 initially
clust2_cnt = 0 #### number of points along the trajectory that belong to cluster 2. set to 0 initially
#### In order to group trajectories, check that at every z, at least one point of the pathway lies within that group and not in any other defined groups
for z in bins_z:
countz = 0
clust1_cnt_z = 0 #### Initialize this to 0 but if a point along the trajectory lies within clust1, then increase this value
clust2_cnt_z = 0 #### Do the same as the above comment but for clust2
##### Run through the points of each trajectory (which have been assigned to grids already) and determine whether that point belongs to clust1 or clust2
for j in range(len(pathway_grids[i])):
f = int(pathway_grids[i][j])
if Grid_z[f-1]==z and pathway_density[i][j]>=density_cutoff: #### Only use this point if the density is > density_cutoff and the z lies within the correct location
countz += 1
###### Depending on if the point belongs to a particular cutoff, increase the counter
if Grid_az[f]<cluster_cutoff:
clust1_cnt_z+=1
else:
clust2_cnt_z+=1
if countz !=0:
count+=1
if clust1_cnt_z !=0 and clust2_cnt_z == 0: # If at this z, all classified points are cluster1 and not clust2, increase this count
clust1_cnt+=1
elif clust2_cnt_z !=0 and clust1_cnt_z == 0:
clust2_cnt+=1
##### If at every z, at least one trajectory point is within a particular cluster then cluster the trajectory
if count>=count_cutoff:
it+=1
allowed_trajectories[it] = transitions[i]
if clust1_cnt>=count_cutoff:
it_c1 +=1
clust1_traj[it_c1] = transitions[i]
clust1_grid_traj[it_c1] = pathway_grids[i]
elif clust2_cnt>=count_cutoff:
it_c2 +=1
clust2_traj[it_c2] = transitions[i]
clust2_grid_traj[it_c2] = pathway_grids[i]
###############################################################
##### Saving allowed trajectories
##############################################
f = open(paths_file, 'w')
f.write('')
f.close()
for i in range(len(allowed_trajectories)):
f=open(paths_file,'a+',newline='')
writer=csv.writer(f,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(allowed_trajectories[i])
f.close()
f = open(paths_file_c1,'w')
f.write('')
f.close()
for i in range(len(clust1_traj)):
f=open(paths_file_c1,'a+',newline='')
writer=csv.writer(f,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(clust1_traj[i])
f.close()
f = open(paths_file_c2,'w')
f.write('')
f.close()
for i in range(len(clust2_traj)):
f=open(paths_file_c2,'a+',newline='')
writer=csv.writer(f,delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(clust2_traj[i])
f.close()
|
from rest_framework import viewsets
from rest_framework.permissions import AllowAny
from resume.apps.resumes.models import Resume
from .serializers import ResumeSerializer
class ResumeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Resume.objects.all()
serializer_class = ResumeSerializer
permission_classes = (AllowAny,)
|
#!/usr/bin/env python
#coding:utf8
import sys,os,re
import subprocess
import optparse
import pexpect
reload(sys)
sys.setdefaultencoding('utf-8')
class check_mysql_mongo_process:
def __init__(self,node_name):
self.node_name = node_name
self.server_dir = "/data/%s/server"% self.node_name
self.server_process_count = os.popen("ps aux|grep %s/server|grep -v grep|wc -l"%(self.server_dir)).read().strip()
self.mongo_process_count = os.popen("ps aux|grep %s|grep mongo|grep -v grep|wc -l"%(self.node_name)).read().strip()
self.mysql_process_count = os.popen("ps aux|grep mysql|grep -v grep|wc -l").read().strip()
self.mongo_dir = "/data/master_mongodb/%s"% self.node_name
self.mysql_dir = "/data/mysql/3306/"
self.pwd = os.popen("cat /root/.pw").read().strip()
if not os.path.exists("%s"% self.server_dir):
print '\033[1;31;40m[%s]不存在此游戏服\033[0m'% self.node_name
sys.exit(1)
if int(self.server_process_count) > 3:
print '\033[1;31;40m[%s]游戏进程已经开启\033[0m'% self.node_name
sys.exit(2)
else:
if os.path.exists("%s/daemon.pid"% self.server_dir):
os.remove("%s/daemon.pid")% self.server_dir
def start_mongo_process(self):
if int(self.mongo_process_count) - 1 == 0:
if os.path.exists("%s/daemon.pid"% self.mongo_dir):
os.remove("%s/pid_mongod.pid")% self.mongo_dir
res_mongo = subprocess.call("sh %s/mongodb_start_master.sh"% self.mongo_dir,shell=True)
if int(res_mongo) == 0:
print '\033[1;32;40m[%s]mongo开启成功\033[0m'% self.node_name
else:
print '\033[1;31;40m[%s]mongo开启失败\033[0m'% self.node_name
sys.exit(2)
else:
print '\033[1;32;40m[%s]mongo进程已经开启\033[0m'% self.node_name
def start_mysql_process(self):
if int(self.mysql_process_count) - 1 == 0:
if os.path.exists("%s/daemon.pid"% self.mysql_dir):
os.remove("%s/mysql-3306.pid")% self.mysql_dir
res_mysql = subprocess.call("sh %s/start_mysql.sh"% self.mysql_dir,shell=True)
if int(res_mysql) == 0:
print '\033[1;32;40m[%s]mysql开启成功\033[0m'% self.node_name
else:
print '\033[1;31;40m[%s]mysql开启失败\033[0m'% self.node_name
sys.exit(2)
else:
print '\033[1;32;40m[%s]mysql进程已经开启\033[0m'% self.node_name
def stop_mongo_process(self):
if int(self.mongo_process_count) - 1 == 0:
print '\033[1;32;40m[%s]mongo进程已经关闭\033[0m'% self.node_name
else:
if os.path.exists("%s/daemon.pid"% self.mongo_dir):
os.remove("%s/pid_mongod.pid")% self.mongo_dir
res_mongo = subprocess.call("sh %s/close_mongodb_master_by_pidfile.sh"% self.mongo_dir,shell=True)
if int(res_mongo) == 0:
print '\033[1;32;40m[%s]mongo关闭成功\033[0m'% self.node_name
else:
print '\033[1;31;40m[%s]mongo关闭失败\033[0m'% self.node_name
sys.exit(3)
def stop_mysql_process(self,mypassword):
if int(self.mysql_process_count) - 1 == 0:
print '\033[1;32;40m[%s]mysql进程已经关闭\033[0m'% self.node_name
else:
if os.path.exists("%s/daemon.pid"% self.mysql_dir):
os.remove("%s/mysql-3306.pid")% self.mysql_dir
try:
res_mysql = pexpect.spawn("sh %s/close_mysql.sh"% self.mysql_dir)
res_mysql.expect('Enter password:')
res_mysql.sendline(mypassword)
#time.sleep(10)
res_mysql.interact()
print '\033[1;32;40m[%s]mysql关闭成功\033[0m'% self.node_name
except pexpect.EOF:
res_mysql.close()
print '\033[1;31;40m[%s]mysql关闭失败\033[0m'% self.node_name
if __name__ == "__main__":
active_list = ['start','stop']
db_name_list = ['mysql','mongo']
usage = '''./%prog -n node_name -a start|stop -d mysql|mongo
'''
parser = optparse.OptionParser(
usage = usage,
)
parser.add_option(
"-n", "--node_name",
dest="node_name",
help="服名NODE_NAME")
parser.add_option(
"-a", "--action",
dest="active",
choices=active_list,
type="choice",
help="停止or启动对应服mongo或mysql进程")
parser.add_option(
"-d", "--db_name",
dest="db_name",
choices=db_name_list,
type="choice",
help="mongo or mysql")
(options, args) = parser.parse_args()
err_msg = '参数不对,请输--help查看详细说明!'
if options.node_name and options.active and options.db_name:
check = check_mysql_mongo_process(options.node_name)
if options.active == "start":
if options.db_name == "mysql":
check.start_mysql_process()
elif options.db_name == "mongo":
check.start_mongo_process()
elif options.active == "stop":
if options.db_name == "mysql":
password = check.pwd
check.stop_mysql_process(password)
elif options.db_name == "mongo":
check.stop_mongo_process()
else:
parser.error(err_msg)
else:
parser.error(err_msg)
|
#!/usr/bin/env python
import cv2
import serial #导入模块
import threading
STRGLO="" #读取的数据
BOOL=True #读取标志位
#读数代码本体实现
def ReadData(ser):
global STRGLO,BOOL
# 循环接收数据,此为死循环,可用线程实现
if ser.in_waiting:
STRGLO = ser.read(ser.in_waiting).decode("ascii")
print(STRGLO)
#打开串口
# 端口,GNU / Linux上的/ dev / ttyUSB0 等 或 Windows上的 COM3 等
# 波特率,标准值之一:50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,19200,38400,57600,115200
# 超时设置,None:永远等待操作,0为立即返回请求结果,其他值为等待超时时间(单位为秒)
def DOpenPort(portx,bps,timeout):
ret=False
try:
# 打开串口,并得到串口对象
ser = serial.Serial(portx, bps, timeout=timeout)
#判断是否打开成功
if(ser.is_open):
ret=True
threading.Thread(target=ReadData, args=(ser,)).start()
except Exception as e:
print("---异常---:", e)
return ser,ret
#关闭串口
def DColsePort(ser):
global BOOL
BOOL=False
ser.close()
#写数据
def DWritePort(ser,text):
result = ser.write(text.encode("utf8")) # 写数据
return result
#读数据
def DReadPort():
global STRGLO
str=STRGLO
STRGLO=""#清空当次读取
return str
if __name__=="__main__":
ser,ret=DOpenPort("/dev/ttyACM0",115200,None)
if(ret==True):#判断串口是否成功打开
count=DWritePort(ser,"i'm ll")
print("写入字节数:",count)
DReadPort() #读串口数据
#DColsePort(ser) #关闭串口
# create a new cam object
cap = cv2.VideoCapture(0)
# initialize the face recognizer (default face haar cascade)
face_cascade = cv2.CascadeClassifier("/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml")
while True:
# read the image from the cam
_, image = cap.read()
# converting to grayscale
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect all the faces in the image
faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)
# for every face, draw a blue rectangle
img_width=size(image_gray)
for x, y, width, height in faces:
cv2.rectangle(image, (x, y), (x + width, y + height), color=(255, 0, 0), thickness=2)
cv2.imshow("image", image)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
|
# Posts Module
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=50)
excerpt = models.TextField()
content = models.TextField()
published = models.DateTimeField('Date published')
updated = models.DateTimeField('Date upated', auto_now=True, auto_now_add=True)
author = models.ForeignKey(User)
def __unicode__(self):
return self.title
class Meta:
db_table = 'juice_posts_post'
|
POWER_SLOT_MED = 13
POWER_SLOT_HIGH = 12
POWER_SLOT_LOW = 11
POWER_SLOT_RIG = 2663
POWER_SLOT_SUBSYSTEM = 3772
|
#-*-coding:utf-8-*-
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
d = {}
index = 1
for i in num:
d.update({i:index})
index+= 1
index = 0
for i in num:
index += 1
if (target - i) in d and d[target-i] != index:
return (index,d[target-i])
if __name__=='__main__':
s = Solution()
print s.twoSum([3,2,4],6)
|
# -*- coding: utf-8 -*-
from numpy import *
from matplotlib import pyplot
from ospa import ospa_distance
import tkFileDialog
import tables
from RangeBearingMeasurementModel import *
import cProfile
import pstats
import os
import cPickle
import fnmatch
from plot_results import plot_errors
def compute_error_k(logfilename,true_pose,true_map):
f = open(logfilename,'r')
est_pose = fromstring(f.readline(),sep=' ')[0:3]
est_map = fromstring(f.readline(),sep=' ')
log_particle_weights = fromstring( f.readline(), sep=' ' )
map_weights = est_map[0::7]
map_x = est_map[1::7]
map_y = est_map[2::7]
map_means = hstack((map_x[:,newaxis],map_y[:,newaxis]))
if map_means.size > 0:
w_sum = round(sum(map_weights))
sort_idx = argsort(map_weights)[::-1]
map_means = map_means[sort_idx[:w_sum],:]
pose_err = sqrt(sum( (true_pose[:2] - est_pose[:2])**2))
ospa_tuple = ospa_distance( true_map, map_means, p=1, c=5 )
ospa_err = ospa_tuple[0]
ospa_loc = ospa_tuple[1]
ospa_cn = ospa_tuple[2]
n_eff = 1/sum(exp(log_particle_weights)**2)
f.close()
return (pose_err,ospa_err,ospa_loc,ospa_cn,n_eff)
def compute_pose_error(true_pose,est_pose):
return sqrt(sum( (true_pose[:2] - est_pose[:2])**2))
def compute_error(basedir,ground_truth):
n_steps = ground_truth['n_steps']
true_maps = ground_truth['true_maps']
true_traj = ground_truth['true_traj']
data_dirs = []
for (dirpath,dirnames,filenames) in os.walk(basedir):
n_files = len(filenames)
pattern = ['state_estimate*.log']*n_files
if any(map(fnmatch.fnmatch,filenames,pattern)):
data_dirs.append(dirpath)
n_runs = len(data_dirs)
pose_err = zeros([n_steps,n_runs])
map_err = zeros([n_steps,n_runs])
loc_err = zeros([n_steps,n_runs])
cn_err = zeros([n_steps,n_runs])
n_eff = zeros([n_steps,n_runs])
print 'n_steps = %d\tn_runs = %d' % (n_steps,n_runs)
for k in xrange(n_steps):
file_list = []
filename_k = 'state_estimate{:05d}.log'.format(k)
file_list = map(os.path.join,data_dirs,[filename_k]*n_runs)
print(k)
# poses_k = empty((n_runs,3))
# maps_k = []
true_pose_list = [true_traj[:,k]]*n_runs
true_map_list = [true_maps[k].transpose()]*n_runs
results = map(compute_error_k,file_list,true_pose_list,true_map_list)
(pose_err_tmp,map_err_tmp,loc_err_tmp,cn_err_tmp,n_eff_tmp) = zip(*results)
pose_err[k,:] = asarray(pose_err_tmp)
map_err[k,:] = asarray(map_err_tmp)
loc_err[k,:] = asarray(loc_err_tmp)
cn_err[k,:] = asarray(cn_err_tmp)
n_eff[k,:] = asarray(n_eff_tmp)
# p_list = [1]*n_runs
# c_list = [20]*n_runs
# for i in xrange(n_runs):
# f = open(file_list[i],'r')
# pose_estimate = fromstring( f.readline(), sep=' ' )[0:3]
# map_estimate = fromstring( f.readline(), sep=' ' )
# map_weights = map_estimate[0::7]
# map_x = map_estimate[1::7]
# map_y = map_estimate[2::7]
# map_means = hstack((map_x[:,newaxis],map_y[:,newaxis]))
# if map_means.size > 0:
# w_sum = round(sum(map_weights))
# sort_idx = argsort(map_weights)[::-1]
# map_means = map_means[sort_idx[:w_sum],:]
# poses_k[i,:] = pose_estimate
# maps_k.append(map_means)
## pose_err[k] += sqrt(sum( (true_traj[:2,i] - pose_estimate[:2])**2))/n_runs
# map_err[k] += ospa_distance( true_maps[k].transpose(), map_means, p=1, c=20 )/n_runs
# pose_err[k,:] = asarray(map( compute_pose_error, true_pose_list, poses_k ) )
# map_err[k,:] = asarray(map( ospa_distance, true_map_list, maps_k, p_list, c_list ) )
return (pose_err,map_err,loc_err,cn_err,n_eff)
if __name__ == "__main__":
print('loading mat-file...')
file = tables.openFile('groundtruth.mat')
landmarks = file.root.staticMap[:].transpose()
trajectory = file.root.traj[:].transpose()
file.close()
print('computing ground truth for each time step')
sensor_params = {
'max_range':10,
'max_bearing':pi,
'std_range':1.0,
'std_bearing':0.0349,
'pd':0.95,
'clutter_rate':20}
# n_steps = trajectory.shape[1] ;
n_steps = 1135
n_landmarks = landmarks.shape[1]
true_maps = []
observed = zeros(n_landmarks,dtype=bool)
measurement_model = RangeBearingMeasurementModel(sensor_params)
for k in xrange(n_steps):
# check if we have already seen everything
if all( observed ):
features = landmarks
else:
pose = trajectory[:,k]
in_range = measurement_model.check_in_range(pose,landmarks)
observed = logical_or(observed,in_range)
features = landmarks[:,observed]
true_maps.append(features)
ground_truth = {'n_steps':n_steps,'true_maps':true_maps,'true_traj':trajectory}
basedir = tkFileDialog.askdirectory()
if len(basedir) == 0:
print 'user cancelled selection'
exit()
print basedir
# basedir = '/home/cheesinglee/workspace/cuda-PHDSLAM/batch_results/ackerman3_1cluster/'
cProfile.run('results=compute_error(basedir,ground_truth)','compute_error_prof')
stats = pstats.Stats('compute_error_prof')
stats.sort_stats('cumulative').print_stats()
results_file = open('results.pk2','wb')
cPickle.dump(results,results_file,cPickle.HIGHEST_PROTOCOL)
results_file.close()
plot_errors(['results.pk2'])
|
import os
import nltk
import math
import pandas as pd
import numpy as np
import string
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from collections import Counter
from sklearn.metrics import accuracy_score
data = 'D:/vsm+knn/data/20news-18828'
data_train = 'D:/vsm+knn/data/data_train'
data_test = 'D:/vsm+knn/data/data_test'
_dictionary = 'D:/vsm+knn/data/dictionary.csv'
def pre(input):
# 读取数据
raw_data = []
sort = []
num = 0
for file1 in os.listdir(input):
path1 = os.path.join(input, file1)
num += 1
for file2 in os.listdir(path1):
path2 = os.path.join(path1, file2)
sort.append(num)
with open(path2, encoding='latin-1') as file:
document = file.read()
raw_data.append(document)
# 处理数据
new_data = []
for doc in raw_data:
delpunctuation = re.compile('[%s]' % re.escape(string.punctuation))
doc = delpunctuation.sub("", doc)
lowers = str(doc).lower()
tokens = nltk.word_tokenize(lowers)
stemmer = PorterStemmer()
stoplist = stopwords.words('english')
words = []
for word in tokens:
if word not in stoplist:
words.append(stemmer.stem(word))
new_data.append(words)
return new_data, sort
def NBC(train_data, train_label, test_data, test_label):
dictionary = np.array(pd.read_csv(_dictionary, sep=" ", header=None)).reshape(1, -1)[0]
kindcount = []
allcount = []
kindnum = []
kindcounte = []
Accuracy = []
for i in range(len(train_data)):
doc = list(filter(lambda word: word in dictionary, train_data[i]))
if train_label[i] < len(kindcount):
kindcount[int(train_label[i])] += doc
kindnum[int(train_label[i])] += 1
else:
kindcount.append(doc)
kindnum.append(1)
allcount += doc
allcounter = Counter(allcount)
for kind in range(20):
kindcounte.append(Counter(kindcount[kind]))
for i in range(len(test_data)):
Acc = []
for kind in range(20):
Pxy = 0
Px = 0
kindcounter = kindcounte[kind]
for word in test_data[i]:
if word in dictionary:
P0 = math.log((kindcounter[word] + 1) / (len(kindcount[kind]) + len(dictionary)))
Pxy += P0
P1 = math.log((allcounter[word] + 1) / (len(allcount[kind]) + len(dictionary)))
Px += P1
Py = math.log(kindnum[kind] / 18828.0)
Pyx = Pxy + Py - Px
Acc.append([kind, Pyx])
Acc = sorted(Acc, key=lambda item: -item[1])
Accuracy.append(Acc[0][0])
print(" Accuracy:\t", accuracy_score(test_label, Accuracy))
if __name__ == '__main__':
train_data, train_label = pre(data_train)
test_data, test_label = pre(data_test)
NBC(train_data, train_label, test_data, test_label)
|
import os
from TimeLogger import time_logger
SCORES_FILE_NAME = "Scores.txt"
BAD_RETURN_CODE = -1
DEFAULT_WEB_PAGE = "http://127.0.0.1:5000/"
DEFAULT_CHROME_PATH = "c:/Selenium/chromedriver.exe"
@time_logger
def screen_cleaner():
"""
trying to clear the screen. Relevant function changes based on OS. If this does not work- no big deal
"""
try:
if os.name == 'nt':
_ = os.system("cls")
else:
_ = os.system("clear")
finally:
pass
|
import sqlite3
connection = sqlite3.connect("Estoque.db")
cursor = connection.cursor()
create_table = "CREATE TABLE IF NOT EXISTS Estoque (codigo INTEGER PRIMARY KEY AUTOINCREMENT, produto varchar(30), valor decimal (5,2) not null, qtd int not null)"
cursor.execute(create_table)
tables = cursor.fetchall()
|
# Generated by Django 1.9.5 on 2016-11-04 18:00
from django.db import migrations, models
import django.utils.timezone
def move_dates(apps, schema_editor):
"""Move dates to models."""
Domain = apps.get_model("admin", "Domain")
DomainAlias = apps.get_model("admin", "DomainAlias")
Mailbox = apps.get_model("admin", "Mailbox")
Alias = apps.get_model("admin", "Alias")
for model in [Domain, DomainAlias, Mailbox, Alias]:
for instance in model.objects.all():
instance.creation = instance.dates.creation
instance.last_modification = instance.dates.last_modification
instance.save()
class Migration(migrations.Migration):
dependencies = [
('admin', '0005_auto_20161026_1003'),
]
operations = [
migrations.AddField(
model_name='alias',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='alias',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='domain',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='domain',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='domainalias',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='domainalias',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='mailbox',
name='creation',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='mailbox',
name='last_modification',
field=models.DateTimeField(null=True),
),
migrations.RunPython(move_dates)
]
|
SIMULATE_ON_BOAT = False #use serial port from the controller
SERIAL_PORT = "COM6"
SERIAL_BAUD = 115200
SERIAL_TIMEOUT = 5
SIM_MOVE_INTERVAL = 1
SIM_PRINT_INTERVAL = 10
SIM_MAX_SPEED = 2000000.0
#SIM_RUDDER_RESPONSE = 0.00005
SIM_RUDDER_RESPONSE = 0.0001
TACKMODE_DIRECTLY = 0
TACKMODE_ADJ_POS = 1
TACKMODE_ADJ_NEG = 2
TACKMODE_MAXDEV_POS = 3
TACKMODE_MAXDEV_NEG = 4
MENU = 40
W = 1500.0
H = 750.0
MAXW = 10*W
MAXH = 10*H
BOAT_LENGTH = 50
BOAT_BOW = 20
BOAT_WIDTH = 10
RUDDER_LENGTH = 20
SAIL_LENGTH = 40
FLAP_LENGTH = 20
DOT_RADIUS = 2
WIND_COMPASS = 100
HEADING_ARROW = 100
HEADING_OK_LIMIT = 10.0
RUDDER_RESPONSE = 0.00005
RUDDER_COEFF = 40.0
RUDDER_MIN_ANGLE = -30
RUDDER_MAX_ANGLE = 35
tackmode_str = ['TACKMODE_DIRECTLY', 'TACKMODE_ADJ_POS', 'TACKMODE_ADJ_NEG', 'TACKMODE_MAXDEV_POS', 'TACKMODE_MAXDEV_NEG']
FLAP_NORMAL = 10.0
FLAP_MAX = 15.0
TACK_SAIL_CRITICAL_ANGLE = FLAP_MAX/2
FLAP_ITERATION = 0.1
R_MEAN = 6371000.0
MAXDEV = 500000
MAXDEV_OK_FACTOR = 0.75
MAXDEV_OK = MAXDEV * MAXDEV_OK_FACTOR
WAYPOINT0 = 46.913520, -52.998886
WAYPOINT1 = 48.0, -13.0
tackmode = TACKMODE_DIRECTLY
gps_lat, gps_lng = WAYPOINT0
gps_lat_prev, gps_lng_prev = gps_lat, gps_lng
trueHeading = 45.0
MAX_SPEED = 2000000.0
GHOST_DISTANCE = 500000.0
trueWind = 0.0
dt = 0.001
dt_refresh = 4*dt
ghostPoint = 0.0, 0.0
closestPoint = 0.0, 0.0
ghostHeading = 0.0
ghostHeading_instant = 0.0
goHeading = 0.0
error = 0.0
sail_angle = 0.0
crossTrack = 0.0
pathAngle = 0.0
rudder = 0.0
flap = FLAP_NORMAL
flap_final = flap
x_prev = 0.0
y_prev = 0.0
speed = 0.0
paused = False
ghostHeading_initialized = False
sim_active = False;
adjAngle1 = 0.0 #global only for debugging
adjAngle2 = 0.0 #global only for debugging
c_boat, c_sail, c_rudder, c_flap = None, None, None, None
c_compass1, c_compass2, c_compass3, c_compass4, \
c_closestPoint, c_ghostPoint, c_ghostHeading, c_windDir, c_goHeading, c_adjHeading1, c_adjHeading2 \
= None, None, None, None, None, None, None, None, None, None, None
ser = None
startchar = ''
sim_nextWaypoint = -1
mux = -1
isBehindPath = 0
magDec = 0.0
|
# Generated by Django 3.2.4 on 2021-07-02 07:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0007_company_exchange'),
]
operations = [
migrations.AlterField(
model_name='company',
name='exchange',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='companies', to='database.stockexchange'),
),
migrations.AddConstraint(
model_name='company',
constraint=models.UniqueConstraint(fields=('code_vs', 'name'), name='unique_company'),
),
]
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from nltk.corpus import stopwords
from nltk.stem import RSLPStemmer
import speech_recognition as sr
from gtts import gTTS
import random
import string
import numpy
import nltk
import sys
import os
class File:
def __init__(self):
self.keywords = ['meteorologia', 'filha', 'casa', 'lojas de roupa', 'centro comercial', 'filho', 'compras', 'comida', 'jantar', 'andar', 'irmão', 'netos', 'supermercado']
self.examples = []
file = open('newtextfile.txt', 'r')
groups = []
group = False
try:
line = file.readline()
while line != '': # The EOF char is an empty string
if line[0].isdigit():
group = Group()
groups.append(group)
else:
if group:
if line[0] == 'H':
group.human.append(line[2:-1])
if line[0] == 'R':
group.robot.append(line[2:-1])
line = file.readline()
for g in groups:
self.randomize(g)
self.randomize(g)
finally:
file.close()
def randomize(self, group):
for i in range(0, len(group.human)):
self.examples.append(group.human[i])
self.examples.append(random.choice(group.robot))
class Group:
def __init__(self):
self.human = []
self.robot = []
class Examples:
def __init__(self, phrases):
self.phrases = phrases
self.examples = self.populate()
def populate(self):
examples = []
for i in range(0, len(self.phrases)):
id_phrase = self.check_phrase(self.phrases[i], i)
examples.append(Example(self.phrases[i], id_phrase + 1))
return examples
def check_phrase(self, phrase, id_phrase):
for j in range(0, len(self.phrases)):
if id_phrase != j:
if self.phrases[j] == phrase:
if id_phrase > j:
return j
else:
return id_phrase
return id_phrase
def get_ids(self):
aux = []
for e in self.examples:
aux.append(e.id)
return aux
def search_for_phrase(self, lsa, classifier, phrase, keywords):
lsa_result = lsa.process_phrase(phrase, lsa.manage_keywords(keywords))
phrase_id = classifier.predict(LSA.normalizer(lsa_result))
for e in self.examples:
if e.id == phrase_id:
return e.phrase
class Example:
def __init__(self, phrase, number):
self.id = number
self.phrase = phrase
class LSA:
def __init__(self, ngram_max, min_freq, p_eig, phrases):
self.ngram_max = ngram_max
self.min_freq = min_freq
self.p_eig = p_eig
self.ngram_min = 1
self.stopwords = stopwords.words("portuguese")
self.stopwords.append('é')
self.phrases = phrases
self.u = []
self.features_utterance = self.get_features_utterance()
self.tfidf = []
@staticmethod
def normalizer(x_abnormal):
minimum = x_abnormal.min()
maximum = x_abnormal.max()
if minimum == maximum:
return x_abnormal
if minimum == maximum:
return x_abnormal
else:
x_new = (x_abnormal - minimum) / (maximum - minimum)
return x_new
def tokenize(self, t):
if t in self.stopwords:
return []
sentence = t.lower()
sentence = nltk.tokenize.word_tokenize(sentence)
aux = []
for word in sentence:
if word not in self.stopwords and word not in string.punctuation:
aux.append(RSLPStemmer().stem(word.lower()))
phrase = []
for word in aux:
phrase.append(word)
return phrase
def manage_keywords(self, keywords):
tokens, vocabulary = [], []
for i in keywords:
t = self.tokenize(i)
if len(t) > 1:
key_str = ''
for j in t:
key_str = key_str + ' ' + j
tokens.append(key_str[1:])
else:
tokens.extend(t)
for i in tokens:
repeat = False
for v in vocabulary:
if i == v:
repeat = True
break
if not repeat:
vocabulary.append(i)
return vocabulary
def get_features_utterance(self):
vec = TfidfVectorizer(min_df=self.min_freq,
stop_words=self.stopwords,
tokenizer=self.tokenize,
ngram_range=(self.ngram_min, self.ngram_max))
vec.fit_transform(self.phrases)
return vec.get_feature_names()
def tf_idf(self, phrase, keywords):
examples = []
examples.extend(self.phrases)
if phrase:
examples.append(phrase)
vec = TfidfVectorizer(stop_words=self.stopwords,
vocabulary=keywords,
tokenizer=self.tokenize,
ngram_range=(self.ngram_min, self.ngram_max))
x = vec.fit_transform(examples)
return x.todense()
def eliminate_dimensions(self):
res = 0
u, eigen, v = numpy.linalg.svd(self.tfidf, compute_uv=True)
normalized_eigenvalues = eigen / numpy.sum(eigen)
eigenvalues = numpy.diag(eigen)
for i in range(0, len(eigen)):
res += normalized_eigenvalues[i]
if res >= self.p_eig:
k = i+1
x = numpy.matrix.dot(numpy.matrix.dot(u[-1, 0:k], eigenvalues[0:k, 0:k]), v[0:k, :])
return x
def process_phrase(self, phrase, keywords):
tfidf_utterance = numpy.array(self.tf_idf(phrase, self.features_utterance))
tfidf_keywords = numpy.array(self.tf_idf(phrase, keywords))
self.tfidf = numpy.empty([len(tfidf_utterance), len(tfidf_utterance[0]) + len(tfidf_keywords[0])])
self.tfidf = numpy.concatenate([tfidf_utterance, tfidf_keywords], axis=1)
x = numpy.round(self.eliminate_dimensions(), 10)
return x
def process_examples(self, keywords):
lsa = []
for phrase in self.phrases:
lsa.append(self.process_phrase(phrase, keywords).tolist())
return lsa
class NaivesClassifier:
def __init__(self, alpha):
self.classifier = MultinomialNB(alpha=alpha)
def train(self, x_train, y_train):
x_naive = numpy.empty(x_train.shape)
for i in range(0, len(x_train)):
x_naive[i] = LSA.normalizer(x_train[i])
self.classifier.fit(x_naive, y_train)
def predict(self, value):
aux = self.classifier.predict(numpy.reshape(value, (1, len(value))))
return aux
def test_score(self, x_test, y_test, type_):
aux = self.classifier.score(x_test, y_test)*100
print("Performance score of the " + type_ + " set is", numpy.round(aux, 2))
return aux
class SpeakWithTheRobot:
def __init__(self, human_lsa, naives, human_keywords, robot_vectors):
self.slow = False
self.device_id = 0
self.lang = 'pt-pt'
self.naives = naives
self.chunk_size = 2048
self.r = sr.Recognizer()
self.sample_rate = 48000
self.human_lsa = human_lsa
self.robot_vectors = robot_vectors
self.human_keywords = human_keywords
def hear(self):
with sr.Microphone(device_index=self.device_id, sample_rate=self.sample_rate, chunk_size=self.chunk_size) as source:
self.r.adjust_for_ambient_noise(source)
print("Say Something")
audio = self.r.listen(source)
try:
text = self.r.recognize_google(audio, language="pt-PT")
print("you said: " + text)
return text
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service;{0}".format(e))
def speak(self, phrase):
tts = gTTS(text=phrase, lang=self.lang, slow=self.slow)
tts.save("soundfile.mp3")
os.system("mpg123 soundfile.mp3")
return
def speaking_to_the_robot(self):
while True:
print("Press a character")
c = sys.stdin.read(2)
if c[0] == 's':
self.speak(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, self.hear(),
self.human_keywords))
elif c[0] == 't':
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Olá", self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Como está tudo a andar?",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Comigo está tudo fantástico.",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Adoro o café",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Qual é o tempo para hoje?",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Vou à casa da minha filha.",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Vou ao supermercado à tarde.",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Quero dar uma volta depois do almoço.",
self.human_keywords))
print(self.robot_vectors.search_for_phrase(self.human_lsa, self.naives, "Gosto de chá",
self.human_keywords))
elif c[0] == 'q':
break
|
"""Construct word map graphs for each sheet in page_data_dictionary."""
from operator import itemgetter
import xmlColumnOperator
import xmlWordOperators
import numpy as np
class xmlPageOperator(object):
def __init__(self, i, year, page_data, file_path, xml_column_chart_center, xml_column_chart_thirds):
self.page_data = page_data
self.page_index = i
self.year = year
self.page = page_data.page
self.file_path = file_path
self.word_data = page_data.word_data
self.section_list_center = xml_column_chart_center.section_list
self.section_list_thirds = xml_column_chart_thirds.section_list
self.page_top = page_data.page_top
self.page_right = page_data.page_right
self.page_bottom = page_data.page_bottom
self.page_left = page_data.page_left
self.center = page_data.center
self.third_first = page_data.third_first
self.third_second = page_data.third_second
self.search_key_data = self.left_search_bound()
self.search_key_center = self.search_key_data[0]
self.search_key_thirds = self.search_key_data[1]
self.lines_data = self.define_lines()
self.lines_dict = self.lines_data[0]
self.words_excluded = self.lines_data[1]
self.line_data_dict = self.lines_data[2]
self.page_break_dictionary_insgesamt = self.instantiate_columns()
def left_search_bound(self):
"""Based on manual sections bounds, define key variable for company name search bounds."""
difference_list_center = sorted([[item, self.page_index - item[0]] for item in
self.section_list_center if self.page_index - item[0] > 0],
key=itemgetter(1))
difference_list_thirds = sorted([[item, self.page_index - item[0]] for item in
self.section_list_thirds if self.page_index - item[0] > 0],
key=itemgetter(1))
key_center = difference_list_center[0][0][2]
key_thirds = difference_list_thirds[0][0][2]
return (key_center, key_thirds)
def define_lines(self):
"""Define text lines (rows) using bottom bounds of words."""
word_list = sorted([[i, word[0], word[1], word[2], word[3], word[4]] for i, word
in enumerate(self.word_data) if (word[2] > self.page_left and
word[4] < self.page_right and word[1] > self.page_bottom and
word[3] < self.page_top)], key=itemgetter(4))
word_dict = {word[0]: [word[1], word[2], word[3], word[4], word[5]] for word in word_list}
if self.search_key_center == True:
operate_list = [{word[0]: [word[1], word[4]] for word in word_list if word[3] < self.center},
{word[0]: [word[1], word[4]] for word in word_list if word[3] > self.center}]
elif self.search_key_thirds == True:
operate_list = [{word[0]: [word[1], word[4]] for word in word_list if word[3] < self.third_first},
{word[0]: [word[1], word[4]] for word in word_list if (word[3] > self.third_first and word[3] < self.third_second)},
{word[0]: [word[1], word[4]] for word in word_list if word[3] > self.third_second}]
else:
operate_list = [{word[0]: [word[1], word[4]] for word in word_list}]
lines_dict = {}
line_data_dict = {self.page: []}
registered_indeces = []
index = 0
for column in operate_list:
lines_dict_component = {}
sub_list = []
column_list = list(column.items())
for i, word in enumerate(column_list[:-1]):
word_index = column_list[i][0]
bottom_coordinate = column_list[i][1][1]
next_bottom = column_list[i+1][1][1]
prev_bottom = column_list[i-1][1][1]
bottom_offset = .00478
if next_bottom - bottom_offset < bottom_coordinate < next_bottom + bottom_offset:
sub_list.append([word_index, bottom_coordinate])
registered_indeces.append(word_index)
else:
if prev_bottom - bottom_offset < bottom_coordinate < prev_bottom + bottom_offset:
sub_list.append([word_index, bottom_coordinate])
registered_indeces.append(word_index)
if len(sub_list) >= 2:
lines_dict_component.update({np.mean([item[1] for item in sub_list]):
sorted([word_dict[item[0]] for item
in sub_list], key=itemgetter(4))})
sub_list = []
for item in sub_list:
registered_indeces.remove(item[0])
lines_dict.update({index:lines_dict_component})
index += 1
words_excluded = [word for index, word in word_dict.items() if index not in registered_indeces]
line_data_dict[self.page].extend((lines_dict, words_excluded))
return (lines_dict, words_excluded, line_data_dict)
def instantiate_columns(self):
"""Loop over divided columns and operate."""
page_break_dictionary_insgesamt = {}
for index, dictionary in self.lines_dict.items():
column_data = xmlColumnOperator.xmlColumnOperator(index, dictionary, self.page, self.year,
self.file_path, self.page_data,
self.search_key_data)
if self.page in page_break_dictionary_insgesamt.keys():
page_break_dictionary_insgesamt[self.page].update(column_data.page_break_dictionary_teilweise)
if len(column_data.page_break_dictionary_teilweise) > 0 and self.page not in page_break_dictionary_insgesamt.keys():
page_break_dictionary_insgesamt.update({self.page: column_data.page_break_dictionary_teilweise})
return page_break_dictionary_insgesamt
|
from flask import Flask
from flask_sslify import SSLify
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
# Initiate Flask
app = Flask(__name__)
sslify = SSLify(app)
app.config.from_object('config.HerokuConfig')
# initialize the database connection
db = SQLAlchemy(app)
login = LoginManager(app)
login.login_view = 'login'
import field_description.views
|
import numpy
import scipy
import matplotlib.pyplot as plt
import numpy as np
Fs = 1e4
sample = 1e4
f1 = 1000
f2 = 2000
f3 = 3000
x = np.arange(sample)
a = np.cos(2 * np.pi * f1 * x / Fs) + 10
b = np.cos(2 * np.pi * f1 * x / Fs) + np.cos(2 * np.pi * f2 * x / Fs) + np.sin(2 * np.pi * f3 * x / Fs)
c = x**2 + np.sin(2 * np.pi * f1 * x / Fs)
# d = np.cos(np.pi * ((x*x+1)/139)) + j*np.sin(np.pi * ((x*x+1)/139))
# plt.plot(x,Cw)
# plt.show()
# plt.plot(x,Mw)
# plt.show()
# AMw = (1+Mw)*np.sin(2 * np.pi * fc * x / Fs)
plt.plot(x,a)
plt.show()
plt.plot(x,b)
plt.show()
plt.plot(x,c)
plt.show()
# plt.plot(x,d)
# plt.show()
|
# magazine/models.py
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .app_settings import STATUS_CHOICES
class NewsArticle(models.Model):
status = models.CharField(_("Status"), max_length=20, choices=STATUS_CHOICES )
|
import re
def get_game_date(game):
_table = game.parent
game_date_raw = _table.find('td', class_='gameTime').text
p = re.compile(r'\d{4}-\d{02}-\d{2}')
date = p.findall(game_date_raw)
if not len(date):
return None
date = date[0]
return date.replace('-', '')
def get_endorsement(game):
endorse = game.find('span', class_='zhuanSpan')
if not endorse:
return None
endorse = endorse.text
p = re.compile(r'\d+')
endorse = p.findall(endorse)
return 0 if not len(endorse) else endorse[0]
def get_concede(game):
panels = game.find_all('div', class_='betPanel')
if len(panels) != 2:
return None
panel = panels[1]
concede = panel.find('div', class_='rqMod')
if not concede:
return None
return int(concede.text)
def get_odds(game, with_concede=False):
panels = game.find_all('div', class_='betPanel')
if len(panels) != 2:
return None
panel = panels[1 if with_concede else 0]
odds = panel.find_all('div', class_='betChan')
if len(odds) != 3:
return None
return [float(i.text) for i in odds]
def parse_game(game):
date = get_game_date(game)
if not date:
return None
game_id = game.find('a', class_='jq_gdhhspf_selectmatch')
if not game_id:
return None
game_id = game_id.text
serial_number = date + game_id
cup = game.find('td', class_='saiTd')
if not cup:
cup = None
else:
cup = cup.text
deadline = game.find('td', class_='stopTd jq_gdhhspf_match_changetime')
if not deadline:
return None
deadline = deadline.text
host = game.find('td', class_='zhuTeamTd').text
guest = game.find('td', class_='keTeamTd').text
num_endorse = get_endorsement(game)
concede = get_concede(game)
odds = get_odds(game, with_concede=False)
codds = get_odds(game, with_concede=True)
return {
'serial': serial_number,
'league': cup,
'deadline': deadline,
'host': host,
'guest': guest,
'endorse': num_endorse,
'concede': concede,
'odds': odds,
'codds': codds
}
def parse_results(result_dict):
game_date = result_dict['weekDate'].replace('-', '')
games = result_dict['raceList']
results = []
for game_id in games:
serial_number = game_date + game_id
score_raw = games[game_id]['wholeScore']
if len(score_raw) > 0:
if ':' not in score_raw:
continue
host, guest = score_raw.split(':')
results.append({
'serial': serial_number,
'host_score': host,
'guest_score': guest
})
return results
|
def print_soduko(puzzle, print_lists = False):
l = ['-------------------\n']
for row in range(len(puzzle)):
l.append('|')
for col in range(len(puzzle)):
entry = puzzle[row][col]
if isinstance(entry, list):
if print_lists:
l.append(str(entry))
else:
l.append(' ')
else:
l.append(str(entry))
if (col+1)%3 == 0:
l.append('|')
else:
l.append(' ')
l.append('\n')
if (row+1)%3 == 0:
l.append('-------------------\n')
print ''.join(l)
def possible_values(puzzle, row, col):
"""Finds the values that an entry may take"""
# Find values that are certain (not in a list)
r_vals = [val for val in puzzle[row] if not isinstance(val, list)]
c_vals = [r[col] for r in puzzle if not isinstance(r[col], list)]
s_vals = []
for r in range(3):
for c in range(3):
entry = puzzle[(row//3)*3 + r][(col//3)*3 + c]
if not isinstance(entry, list):
s_vals.append(entry)
# Make sets of those values
r_set = set(r_vals)
c_set = set(c_vals)
s_set = set(s_vals)
# Get inverse of those (i.e. posible values)
taken_values_set = (r_set.union(c_set)).union(s_set)
posible_set = set(range(1,10)).difference(taken_values_set)
if len(posible_set) == 0:
return None
elif len(posible_set) == 1:
return posible_set.pop()
else:
return list(posible_set)
def copy_puzzle(puzzle):
new_puzzle = []
for r in puzzle:
new_puzzle.append(list(r))
return new_puzzle
def finished(puzzle):
return all([all([isinstance(e, int) for e in r]) for r in puzzle])
def solve(puzzle):
# Check puzzle size
assert len(puzzle) == 9
assert all(len(column) == 9 for column in puzzle)
# Check if the puzzle is finished
if finished(puzzle):
return puzzle
# Do a deterministic step
stuck = False
while not stuck:
stuck = True
for row in range(len(puzzle)):
for col in range(len(puzzle)):
if isinstance(puzzle[row][col], list):
p_vals = possible_values(puzzle, row, col)
# If there is no possible values the given soduko is infeasible
if p_vals is None:
return None
puzzle[row][col] = p_vals
if not isinstance(p_vals, list):
stuck = False
if finished(puzzle):
return puzzle
# If that didn't solve it, do a guess
## Find a branch point
branch = None
for row in range(len(puzzle)):
for col in range(len(puzzle)):
if isinstance(puzzle[row][col], list):
branch = {'row':row, 'col':col}
branch['values'] = possible_values(puzzle, row, col)
break
if branch is not None:
break
## Call solve recursively at the branch point
for v in branch['values']:
puzzle_copy = copy_puzzle(puzzle)
puzzle_copy[branch['row']][branch['col']] = v
solved_puzzle = solve(puzzle_copy)
if solved_puzzle is not None:
return solved_puzzle
|
#----------------URL and imports-----------------------------
import requests
import json
URL = 'http://localhost:8088/services/'
#---------------REQUESTS--------------------------------------
def post_user(email,phone,password):
user = {}
user['email'] = email
user['phone'] = phone
user['address'] = password
#json_objct = json.dumps(user)
response = requests.post(URL + 'users/addUser' , json=(user))
print('POST USER request----',response)
return response
def post_for_forgot_password(email):
response = requests.post(URL + 'forgot/reset?email=' + str(email))
print('FORGOT password response -----',response)
#print(response.links)
return response
'''def test_post():
response = requests.post('http://localhost:8088/services/forgot/
newPassword?token= ***ENTER YOUR TOKEN HERE*** &newPassword= ***ENTER YOUR NEW PASSWORD HERE ***')
print(response)
print(response.content)
return response
if __name__ == '__main__':
test_post()
#-----------------------------------------------------------------'''
|
"""
Errors like the error cases from Rackspace Monitoring.
"""
from __future__ import division, unicode_literals
import attr
from six import text_type
@attr.s
class ParentDoesNotExist(Exception):
"""
Error that occurs when a parent object does not exist.
For instance, trying to access or modify a Check under a
non-existing Entity will cause this error.
"""
object_type = attr.ib(validator=attr.validators.instance_of(text_type))
key = attr.ib(validator=attr.validators.instance_of(text_type))
code = attr.ib(validator=attr.validators.instance_of(int), default=404)
def to_json(self):
"""
Serializes this error to a JSON-encodable dict.
"""
return {'type': 'notFoundError',
'code': self.code,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': 'Parent does not exist',
'details': 'Object "{0}" with key "{1}" does not exist'.format(
self.object_type, self.key)}
@attr.s
class ObjectDoesNotExist(Exception):
"""
Error that occurs when a required object does not exist.
"""
object_type = attr.ib(validator=attr.validators.instance_of(text_type))
key = attr.ib(validator=attr.validators.instance_of(text_type))
code = attr.ib(validator=attr.validators.instance_of(int), default=404)
def to_json(self):
"""
Serializes this error to a JSON-encodable dict.
"""
return {'type': 'notFoundError',
'code': self.code,
'txnId': '.fake.mimic.transaction.id.c-1111111.ts-123444444.v-12344frf',
'message': 'Object does not exist',
'details': 'Object "{0}" with key "{1}" does not exist'.format(
self.object_type, self.key)}
|
# Tuple(元组)
# 元组(tuple)与列表类似,不同之处在于元组的元素不能修改。元组写在小括号(())里,元素之间用逗号隔开。
# 元组中的元素类型也可以不相同
tuple = ('abcd', 786, 2.23, 'runoob', 70.2)
tinytuple = (123, 'runoob')
print(tuple) # 输出完整元组
print(tuple[0]) # 输出元组的第一个元素
print(tuple[1:3]) # 输出从第二个元素开始到第三个元素
print(tuple[2:]) # 输出从第三个元素开始的所有元素
print(tinytuple * 2) # 输出两次元组
print(tuple + tinytuple) # 连接元组.
tup = (1, 2, 3, 4, 5, 6)
print(tup[0])
print(tup[1:5])
#tup[0] = 11 # 修改元组元素的操作是非法的
#报错为:tuple' object does not support item assignment
tup1 = () # 空元组
tup2 = (20,) # ****一元素,需要在元素后添加逗号****
|
#=========================================================================
# This is a simple function to define land and ocean masks for LMR output.
# author: Michael P. Erb
# date : October 19, 2016
#=========================================================================
import numpy as np
import xarray as xr
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import maskoceans
# Define land and ocean masks for LMR output:
def masks(file_name):
#
print("This masking function doesn't work correctly near the poles and international date line. Double-check the mask.")
#
handle = np.load(file_name)
lon = handle['lon']
lat = handle['lat']
handle.close()
#
# Make a version of lon where western hemisphere longitudes are negative
lon_we = lon
lon_we[lon_we>180] = lon_we[lon_we>180]-360
#
# Make an ocean mask
allpoints = np.ones((lat.shape[0],lon_we.shape[1]))
oceanmask = maskoceans(lon_we,lat,allpoints,inlands=False).filled(np.nan)
oceanmask[0,:] = 1
#
# Make a land mask
landmask = np.zeros((lat.shape[0],lon_we.shape[1]))
landmask[:] = np.nan
landmask[np.isnan(oceanmask)] = 1
#
return landmask, oceanmask
# Define land and ocean masks for netcdf files:
def masks_netcdf(file_name):
handle = xr.open_dataset(file_name,decode_times=False)
lon_1d = handle['lon'].values
lat_1d = handle['lat'].values
#
# Make 2d versions of lat and lon
lon,lat = np.meshgrid(lon_1d,lat_1d)
#
# Make a version of lat where western hemisphere latitudes are negative
lon_we = lon
lon_we[lon_we>180] = lon_we[lon_we>180]-360
#
# Make an ocean mask
allpoints = np.ones((lat.shape[0],lon_we.shape[1]))
oceanmask = maskoceans(lon_we,lat,allpoints,inlands=False).filled(np.nan)
oceanmask[0,:] = 1
#
# Make a land mask
landmask = np.zeros((lat.shape[0],lon_we.shape[1]))
landmask[:] = np.nan
landmask[np.isnan(oceanmask)] = 1
#
return landmask, oceanmask
|
x, y = map(int, input().split())
# def F (x, y, cache):
# print (f"Call for ({x}, {y})")
# if ( (x, y) not in cache):
# if (x == 0):
# cache[(x, y)] = y+1;
# elif (x > 0 and y == 0):
# cache[(x, y)] = F(x-1, 1, cache);
# else:
# cache [(x, y)] = F(x-1, F(x, y-1, cache), cache)
# # print (cache)
# return cache[(x, y)]
def F (x, y):
print (f"Call for ({x}, {y})")
if (x == 0):
return y+1;
elif (x > 0 and y == 0):
return F(x-1, 1);
else:
return F(x-1, F(x, y-1))
cache = dict()
print (F(x, y))
|
"""
Testing utilities. Do not modify this file!
"""
num_pass = 0
num_fail = 0
def assert_equals(msg, expected, actual):
"""
Check whether code being tested produces
the correct result for a specific test
case. Prints a message indicating whether
it does.
:param: msg is a message to print at the beginning.
:param: expected is the correct result
:param: actual is the result of the
code under test.
"""
print(msg)
global num_pass, num_fail
if expected == actual:
print("PASS")
num_pass += 1
else:
print("**** FAIL")
print("expected: " + str(expected))
print("actual: " + str(actual))
num_fail += 1
print("")
def start_tests(header):
"""
Initializes summary statistics so we are ready to run tests using
assert_equals.
:param header: A header to print at the beginning
of the tests.
"""
global num_pass, num_fail
print(header)
for i in range(0,len(header)):
print("=",end="")
print("")
num_pass = 0
num_fail = 0
def finish_tests():
"""
Prints summary statistics after the tests are complete.
"""
print("Passed %d/%d" % (num_pass, num_pass+num_fail))
print("Failed %d/%d" % (num_fail, num_pass+num_fail))
|
from typing import Any, Dict
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
from phi.field import SampledField
from phiml.math._shape import parse_dim_order
from phi.vis._dash.dash_app import DashApp
from phi.vis._dash.player_controls import STEP_BUTTON, PAUSE_BUTTON
from phi.vis._dash.player_controls import REFRESH_INTERVAL
from phi.vis._vis_base import display_name
FRONT = 'front'
RIGHT = 'right'
TOP = 'top'
REFRESH_RATE = Input(f'refresh-rate', 'value')
def all_view_settings(app: DashApp, viewer_group: str):
dims = [Input(f'{viewer_group}_select_{dim}', 'value') for dim in parse_dim_order(app.config.get('select', []))]
return (Input(f'{viewer_group}_projection-select', 'value'),
Input(f'{viewer_group}_component-slider', 'value'),
Input(f'{viewer_group}_refresh-button', 'n_clicks'),
*dims)
def parse_view_settings(app: DashApp, *args) -> Dict[str, Any]:
projection, component, _, *selections = args
return {
'select': {dim: sel for dim, sel in zip(parse_dim_order(app.config.get('select', [])), selections)},
'projection': projection,
'component': [None, 'x', 'y', 'z', 'abs'][component],
}
def refresh_rate_ms(refresh_value):
if refresh_value is None:
return 1000 * 60 * 60 * 24
else:
return (2000, 900, 400, 200)[refresh_value]
def build_view_selection(app: DashApp, field_selections: tuple, viewer_group: str):
dim_sliders = []
for sel_dim in parse_dim_order(app.config.get('select', [])):
sel_pane = html.Div(style={'height': '50px', 'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}, children=[
html.Label(display_name(sel_dim), style={'display': 'inline-block'}),
html.Div(style={'width': '80%', 'display': 'inline-block'}, children=[
dcc.Slider(min=0, max=0, step=1, value=0,
# marks={} if resolution3d is None else _marks(resolution3d[1]),
id=f'{viewer_group}_select_{sel_dim}', updatemode='drag', disabled=False),
]),
])
dim_sliders.append(sel_pane)
@app.dash.callback(Output(f'{viewer_group}_select_{sel_dim}', 'max'), [STEP_BUTTON, REFRESH_INTERVAL, PAUSE_BUTTON, *field_selections])
def update_dim_max(_s, _r, _p, *field_names, dim=sel_dim):
shapes = [app.model.get_field_shape(name) for name in field_names if name != 'None']
sizes = [s.get_size(dim) for s in shapes if dim in s]
if sizes:
return max(sizes) - 1
else:
raise PreventUpdate()
@app.dash.callback(Output(f'{viewer_group}_select_{sel_dim}', 'disabled'), [STEP_BUTTON, REFRESH_INTERVAL, PAUSE_BUTTON, *field_selections])
def update_dim_disabled(_s, _r, _p, *field_names, dim=sel_dim):
shapes = [app.model.get_field_shape(name) for name in field_names if name != 'None']
sizes = [s.get_size(dim) for s in shapes if dim in s]
return max(sizes) <= 1 if sizes else True
@app.dash.callback(Output(f'{viewer_group}_select_{sel_dim}', 'marks'), [STEP_BUTTON, REFRESH_INTERVAL, PAUSE_BUTTON, *field_selections])
def update_dim_disabled(_s, _r, _p, *field_names, dim=sel_dim):
shapes = [app.model.get_field_shape(name) for name in field_names if name != 'None']
sizes = [s.get_size(dim) for s in shapes if dim in s]
if sizes:
return _marks(max(sizes))
else:
return {}
layout = html.Div(style={'width': '100%', 'display': 'inline-block', 'backgroundColor': '#E0E0FF', 'vertical-align': 'middle'}, children=[
# --- Settings ---
html.Div(style={'width': '30%', 'display': 'inline-block', 'vertical-align': 'top'}, children=[
html.Div(style={'width': '50%', 'display': 'inline-block'}, children=[
dcc.Dropdown(options=[{'value': FRONT, 'label': 'Front'}, {'value': RIGHT, 'label': 'Side'}, {'value': TOP, 'label': 'Top'}],
value='front', id=f'{viewer_group}_projection-select', disabled=False),
html.Div(style={'margin-top': 6}, children=[
html.Div(style={'width': '90%', 'margin-left': 'auto', 'margin-right': 'auto'}, children=[
dcc.Slider(min=0, max=4, step=1, value=0, marks={0: '🡡', 4: '⬤', 1: 'x', 2: 'y', 3: 'z'}, id=f'{viewer_group}_component-slider', updatemode='drag'),
])
]),
]),
html.Div(style={'width': '50%', 'display': 'inline-block', 'vertical-align': 'top'}, children=[ # , 'margin-left': 40
html.Div('Refresh Rate', style={'text-align': 'center'}),
html.Div(style={'width': '60%', 'margin-left': 'auto', 'margin-right': 'auto', 'height': 50}, children=[
dcc.Slider(min=0, max=3, step=1, value=1, marks={0: 'low', 3: 'high'}, id=REFRESH_RATE.component_id, updatemode='drag'),
]),
html.Div(style={'text-align': 'center'}, children=[
html.Button('Refresh now', id=f'{viewer_group}_refresh-button'),
])
])
]),
# --- Batch & Depth ---
html.Div(style={'width': '70%', 'display': 'inline-block'}, children=dim_sliders),
])
return layout
def _marks(stop, limit=35, step=1) -> dict:
if stop <= limit * step:
return {i: str(i) for i in range(0, stop, step)}
if stop <= 2 * limit * step:
return _marks(stop, limit, step * 2)
if stop <= 5 * limit * step:
return _marks(stop, limit, step * 5)
else:
return _marks(stop, limit, step * 10)
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
import unittest
from test import _common
from beets import library
from beets import vfs
class VFSTest(_common.TestCase):
def setUp(self):
super().setUp()
self.lib = library.Library(':memory:', path_formats=[
('default', 'albums/$album/$title'),
('singleton:true', 'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.tree = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from script.conftest import get_host, ji_yun_ying_login
|
from quick_sort import partition
def quick_select.py(givenList, left, right, k):
split = partition(givenList, left, right)
if split == k:
return givenList[split]
elif split < k:
return quick_select(givenList, split + 1, right, k)
else:
return quick_select(givenList, left, split - 1, k)
|
'''
Creating a Linked List in Python. Linked Lists are almost useless in Python, since lists are so valuable. This is mostly just an exercise in coding.
However, it is good to know that Python Lists suffer from 2 small setbacks:
1) Lists in Python are equivalent to arrays. They are contiguous, so as to have O(1) access. Linked Lists provide a non-contiguous data structure at the cost of
performance. Hence, sometimes Linked Lists could come in handy.
2) Insertion in the beginning requires Python to shift over all the elements in a list and add it to the front.
Python also has a collections.deque which is essentially a better version of this Linked List implementation.
'''
# private Node class for internal usage.
class _Node:
def __init__(self, data = None, prev = None, next = None):
self.data = data;
self.prev = prev;
self.next = next;
def __str__(self):
return str(self.data)
# Create a Linked List class using a sentinel Node. Sacrifice a little bit of memory space for clarity
# in code. No edge cases to worry about anymore.
class Linked_List:
def __init__(self):
self.__head = _Node()
self.__head.prev = self.__head
self.__head.next = self.__head
self.__len = 0
# add to the tail of a linked list
def push_back(self, value):
new_node = _Node(value, self.__head.prev, self.__head)
self.__head.prev.next = new_node
self.__head.prev = new_node
self.__len += 1
# add to the head of a linked list
def push_front(self, value):
new_node = _Node(value, self.__head, self.__head.next)
self.__head.next.prev = new_node
self.__head.next = new_node
self.__len += 1
# insert element at given position. If the index is 0, then it is equivalent to a push_front. If the
# index is len(Linked_List), then it is euqivalent to push_back. Does not overwrite the given index. Use the overloaded [] operators to overwrite
# something at a given index.
def insert(self, value, index):
if(index > self.__len):
raise IndexError()
if index == self.__len:
self.push_back(value)
elif index == 0:
self.push_front(value)
# depending on if the index is in the second half or the first half of the list, we iterate in a given direction. It reduces the time
# complexity from O(n) to O(n/2)
elif index <= self.__len / 2:
current = self.__head.next
for i in range(index):
current = current.next
new_node = _Node(value, current.prev, current)
current.prev.next = new_node
current.prev = new_node
self.__len += 1
else:
current = self.__head.prev
for i in range(index, self.__len - 1):
current = current.prev
new_node = _Node(value, current.prev, current)
print(str(current))
current.prev.next = new_node
current.prev = new_node
self.__len += 1
def __len__(self):
return self.__len
def __str__(self):
current = self.__head.next
ret_val = "["
while(current is not self.__head):
ret_val += str(current) + ", "
current = current.next
ret_val = ret_val[:-2] + "]"
return ret_val
# Essentially a tester function, making sure that the linked list works as described.
if __name__ == "__main__":
x = Linked_List()
x.push_back(1)
x.push_back(2)
x.push_back(3)
x.push_back(4)
x.push_back(5)
x.push_front(6)
x.push_front(7)
x.push_front(8)
x.push_front(9)
x.insert(10, 9)
x.insert(11, 3)
x.insert(12, 10)
x.insert(13, 10)
print(x)
|
from django.apps import AppConfig
class ProgressAnalyzerConfig(AppConfig):
name = 'progress_analyzer'
|
import _time
def sleep(s: float):
for i in range(int(s)):
_time.sleep_s(1)
_time.sleep_ms(int((s - int(s)) * 1000))
def sleep_s(s: int):
return _time.sleep_s(s)
def sleep_ms(ms: int):
return _time.sleep_ms(ms)
def time() -> float:
return _time.time()
def time_ns() -> int:
return _time.time_ns()
def gmtime(unix_time: float):
return _time.gmtime(unix_time)
def localtime(unix_time: float):
return _time.localtime(unix_time)
def mktime() -> int:
return _time.mktime()
def asctime() -> str:
return _time.asctime()
def ctime(unix_time: float) -> str:
return _time.ctime(unix_time)
|
from . import product_product
from . import product_pricelist
from . import sale
|
import csv
import os
ifile = open('input.csv', "rb")
ofile = open('output.csv', "wb")
complete = 0
incomplete = 0
first = 1
username = ''
prior_user = ''
writer = csv.writer(ofile)
reader = csv.reader(ifile)
writer.writerow(["Username", "First Name", "Last Name", "Completed Mapped Items", "Incomplete Mapped Items", "Email", "Manager", "VP Name", "Sub-department"])
for row in reader:
if(first == 1):
prior_user = row[3]
first = 0
else:
prior_user = username
username = row[3]
if(username != prior_user):
writer.writerow([prior_user, fname, lname, complete, incomplete, email, manager, vp, sub])
complete = 0
incomplete = 0
fname = row[4]
lname = row[5]
email = row[6]
manager = row[7]
vp = row[8]
sub = row[9]
status = row[2]
if(status == "Completed"):
complete += 1
elif(status == "Incomplete"):
incomplete += 1
writer.writerow([username, fname, lname, complete, incomplete, email, manager, vp, sub])
|
# 之前做过了, 分状态写的
# 双指针
# dl的解法是记录该点到左边R的距离和到右边L的距离,哪个近就往同侧倒,一样就不倒
class Solution:
def pushDominoes(self, dominoes: str) -> str:
n = len(dominoes)
# [L_dist, R_dist]
records = [[inf, inf] for _ in range(n)]
cur = -inf
for i, c in enumerate(dominoes):
if c == 'R':
cur = i
elif c == 'L':
cur = -inf
records[i][1] = i - cur
cur = inf
for i in range(n - 1, -1, -1):
if dominoes[i] == 'L':
cur = i
elif dominoes[i] == 'R':
cur = inf
records[i][0] = cur - i
return "".join('.' if l == r else ('R' if l > r else 'L') for l, r in records)
|
# -*- coding: utf-8 -*-
"""helpers.py: Various Helper Functions
Handles all neccessary parts of the irc protocol for client connections.
"""
import os
import sys
import time
import re
import urllib
import traceback
try:
import urllib2
except:
urllib2 = None # Legacy support
# Try to convert to string with a crude 'brute force' fall back
def safe_escape(text):
try:
return str(text)
except:
pass
new = ""
for c in text:
try:
new += unicode(str(c))
except:
new += "?"
return new
# Convert value to string if necesary
def arg_to_str(arg):
if type(arg) in [type(""), type(u"")]:
return arg
else:
return str(arg)
# Get a substring of a string with two delimeters
def get_string_between(start, stop, s):
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
# Get path to folder containing the current script
def get_current_script_path():
return os.path.dirname(os.path.realpath(__file__))
# Convert times (add days, hours, min, sec)
def str_to_seconds(txt):
units = {
"d": 24*60*60,
"h": 60*60,
"m": 60,
"s": 1,
}
txt = txt.strip().lower()
if len(txt) < 2:
return False
if txt[-1] in units.keys():
unit = units[txt[-1]]
else:
return False
try:
part = txt[:-1].replace(",", ".")
n = float(part)
except:
return False
return n*unit
# return human readable timestamps
def time_stamp(t=None, formatting=None):
t = t or time.time()
if not formatting:
return time.asctime(time.localtime(float(t)))
return time.strftime(formatting, time.localtime(float(t)))
def time_stamp_short(t=None):
t = t or time.time()
return time.strftime("%H:%M:%S", time.localtime(t))
def time_stamp_numeric(t=None):
t = t or time.time()
return time.strftime("%d.%m.%y %H:%M", time.localtime(t))
# run a shell command
def run_shell_cmd(cmd):
try:
import commands
except:
return False
output = commands.getoutput(cmd)
return output
# check if string contains a word
def has_word(s, w):
words = re.split('\W+',s)
if w in words:
return True
return False
# split string into words
def split_words(s):
return re.split('\W+',s)
# return info about last error
def get_error_info():
msg = str(traceback.format_exc()) + "\n" + str(sys.exc_info())
return msg
# find any urls in a string
def find_urls(s):
try:
re
except:
return []
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', s)
return urls
|
# Hash Tables: Ransom Note
# Cracking the Coding Interview Challenge
# https://www.hackerrank.com/challenges/ctci-ransom-note
class Word:
def __init__(self, word, used):
self.word = word
self.used = used
def ransom_note(magazine, ransom):
# Initialize hashMap
hashMap = []
for i in range(len(magazine)):
hashMap.append([])
# Add magazine words to hashMap
for word in magazine:
hashIdx = hash_func(word, len(hashMap))
hashMap[hashIdx].append(Word(word, False))
# Compare note words to magazine hashMap
for word in ransom:
hashIdx = hash_func(word, len(hashMap))
foundMatch = False
for entries in hashMap[hashIdx]:
if entries.word == word and not entries.used:
foundMatch = True
entries.used = True
break
if not foundMatch:
return False
return True
def hash_func(word, length):
hashVal, count = 0, 0
for letter in list(word):
count += 1
hashVal += (ord(letter) * 31 + count * 7)
return hashVal % length
#------------------------------- Provided -------------------------------
m, n = map(int, input().strip().split(' '))
magazine = input().strip().split(' ')
ransom = input().strip().split(' ')
answer = ransom_note(magazine, ransom)
if(answer):
print("Yes")
else:
print("No")
|
#!/usr/bin/python
# python stuff
import time
import sys
import numpy as np
import os.path
# index coding stuff
from Symbol import Symbol
from alignment import alignment, nullvec
from bs_index_coding import compute_interferers, transmit_messages, bs_decode_messages
from multirandperm import pairingperm
# tos stuff, all needed?
from TxSerialMsg import *
from SymMsg import *
from T2HMsg import *
from AckMsg import *
from tinyos.message import MoteIF
class Transmitter:
def __init__(self, N, logfile, itMax=1, B=1, verbose=True, dest=False):
#CHANGE SO THAT ONLY THINGS THAT ARE CALLED ONCE!!
self.verbose = verbose
self.prevtime = time.time()
self.N = N
self.logfile = logfile
self.B = B
self.itMax = itMax
self.timeformat = '%Y/%d/%m %H:%M:%S'
#self.A = make_A_matrix(self.N)
#self.counter = 0;
#self.num_transmissions = 0;
self.current_row = 0; #needed now??
Transmitter.setup_logfile(self)
## Set up link to tos mote
self.mif = MoteIF.MoteIF()
self.source = self.mif.addSource("sf@localhost:9002")
#TxSerialMsg.py is be generated by MIG class
self.mif.addListener(self, TxSerialMsg)
# RESET THESE
## generate random messages
#self.W = np.random.randint(0,2**16, (N,B)) # each message is an array of B uint16s
##print 'W is :'
##print self.W
## store final received messages. goal is to "de-NaN" by the end
##self.final_messages = np.nan*np.zeros((N, B))
##not actually messages but indicator if all the pieces for the node have been received
#self.final_messages = np.nan*np.zeros((N, 1))
## keep track of all transmitted and received messages/symbols
#self.tx_symbols = np.array([]) # [1 -by- # of transmissions]
#
## keep track of number of transmissions
#self.TOTAL_TRANSMISSIONS = 0
## for TDMA round also?
#self.TDMA_TRANSMISSIONS = 0
#antidote matrix A
#self.A = np.diag(self.W.reshape(-1)) # receiver (row) i has access to the message it plans to send
#rewrite so not dependent on W
#self.A = np.eye(N)
# Receiver i wants message dest[i]
# e.g. dest = [1, 0] means R0 wants W[1] and R1 wants W[0]
if dest == False:
self.dest = pairingperm(N)
if self.verbose:
print 'Message destinations chosen by pairing'
print 'dest:', self.dest
else:
self.dest = dest
#rewrite A so not dependent on W
self.it = 0
Transmitter.reset(self)
##RESET THESE
#mat_dest = (np.arange(self.N), np.array(self.dest))
#self.signal_space = np.zeros((self.N,self.N))>0
#self.signal_space[mat_dest] = True;
#self.A = np.eye(N)
#self.I = compute_interferers(self.A, signal_space)
#self.J = self.I.astype(float)
#self.J[mat_dest] = -1
#self.map = np.arange(self.N)
#print 'size of I is ', np.size(self.I)
#
#if self.verbose:
#print 'Interferer matrix is:'
#print self.J
#
##RESET THESE
#self.TDMA_MODE = 1;
##self.tm = 0; #needed?
#self.TDleft = np.arange(self.N, dtype=np.uint8)
#self.ackList = np.nan*self.TDleft
#self.ileft = 0;
##transmit the first message
#Transmitter.tdma_stage(self)
#self.ileft = 1
def cleanup(self):
#save to text file
#epsString = [str(a) for a in self.epsVec]
#epsString = '\t'.join([str(a) for a in list(self.epsVec[0])])
epsString = '\t'.join(['%.3f' % a for a in list(self.epsVec[0])])
#logstring = '%d\t%d\t%d\t%d\t%.5f\t%s' % (self.N, self.B, self.TDMA_TRANSMISSIONS, self.TOTAL_TRANSMISSIONS, self.epsIndex,self.epsVec) #print an array??
#logstring = '%%d\t%d\t' % (self.TDMA_TRANSMISSIONS, self.TOTAL_TRANSMISSIONS)
logstring = '%d\t%d\t%d\t%d\t%.5f\t%s' % (self.N, self.B, self.TDMA_TRANSMISSIONS, self.TOTAL_TRANSMISSIONS, self.epsIndex,epsString) #print an array??
lf = open(self.logfile, 'a')
lf.write(logstring)
lf.write('\n')
lf.close()
def reset(self):
self.it += 1
#reinitialize for next experiment
# generate random messages
self.W = np.random.randint(0,2**16, (self.N,self.B)) # each message is an array of B uint16s
#print 'W is :'
#print self.W
# store final received messages. goal is to "de-NaN" by the end
#self.final_messages = np.nan*np.zeros((N, B))
#not actually messages but indicator if all the pieces for the node have been received
self.final_messages = np.nan*np.zeros((self.N, 1))
# keep track of all transmitted and received messages/symbols
self.tx_symbols = np.array([]) # [1 -by- # of transmissions]
# keep track of number of transmissions
self.TOTAL_TRANSMISSIONS = 0
self.TDMA_TRANSMISSIONS = 0
#make float so dont have to cast later
self.TDMA_ERASURES = float(0)
self.INDEX_ERASURES = float(0)
self.TDMA_ERASURES_VEC = np.zeros((1,self.N),float)
self.INDEX_ERASURES_VEC = np.zeros((1,self.N),float)
self.epsIndex = float(0)
self.epsVec = np.zeros((1,self.N),float)
mat_dest = (np.arange(self.N), np.array(self.dest))
signal_space = np.zeros((self.N,self.N))>0
signal_space[mat_dest] = True;
self.A = np.eye(self.N)
self.I = compute_interferers(self.A, signal_space)
self.J = self.I.astype(float)
self.J[mat_dest] = -1
self.map = np.arange(self.N)
#print 'size of I is ', np.size(self.I)
#only print once??
if self.verbose:
print 'Interferer matrix is:'
print self.J
#return to tdma mode
self.TDMA_MODE = 1;
#self.tm = 0; #needed?
self.TDleft = np.arange(self.N, dtype=np.uint8)
self.ackList = np.nan*self.TDleft
self.ileft = 0;
#transmit the first message
Transmitter.tdma_stage(self)
self.ileft = 1
def tdma_stage(self):
smsg = TxSerialMsg()
#self.TDMA_MODE = 1;
#self.TDleft = np.arange(self.N, dtype=np.uint8)
#self.ackList = np.nan*self.TDleft
#test to send something
#smsg.set_messageid(1)
#self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
#tm = 0; #move up
#set packet number
#while np.any(np.isnan(self.ackList)): #change while to if and call at the end of receive loop?
#tm = tm + 1
#for i in self.TDleft:
#mote doesnt see any of these??
self.TDMA_TRANSMISSIONS += 1
#smsg.set_crow(255) #something to signal tdma mode, still necessary?
#smsg.set_messageid(int(self.ileft))
smsg.set_messageid(int(self.TDleft[self.ileft]))
#for(j=len(self.dest[i])) eventually loop through J matrix columns that are -1
#smsg.set_data(self.W[self.dest[self.ileft],:])
smsg.set_data(self.W[self.dest[self.TDleft[self.ileft]],:])
#also send own message w[i] for comparison????, set to V_row?
#print 'TDMA V Row ', self.W
#smsg.set_V_row(self.W[self.ileft])
smsg.set_current_transmission(self.TDMA_TRANSMISSIONS)
self.mif.sendMsg(self.source, 0xFFFF, smsg.get_amType(), 0, smsg)
if self.verbose:
print'TDMA TRANSMISSION ', self.TDMA_TRANSMISSIONS, ': Motes remaining: ', self.TDleft
#print'Sending message to Mote ', int(self.TDleft[self.ileft])
##call tinyos receive thread instead?
#rmsg = AckMsg(msg.dataGet())
##check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
#newAck = rmsg.get_ACKs()
#acklist[newAck==1] = 1
def index_stage(self):
smsgx = TxSerialMsg()
smsgx.set_messageid(255) #now something to represent index coding, 255?
#print 'V is ', self.V
#print 'm_i is ', self.m_i
#PIECES HERE? matrix multiply handles it
#print 'mi: ', self.m_i
#print 'map: ', self.map[self.m_i]
smsgx.set_data(np.dot(self.V[self.m_i,:],self.W,))
#smsgx.set_data(np.dot(self.V[self.map[self.m_i],:],self.W,))
##also send own message w[i] for comparison????
#smsgx.set_V_row(np.asarray(self.V[self.m_i], dtype=np.float32))
#smsgx.set_V_row(np.array([1, 2, 3.5, 4.2, 5, 6, 7, 8], dtype=np.float64))
#print 'sending row m of V: ', self.V[self.m_i]
#smsgx.set_crow()
smsgx.set_V_row(self.V[self.m_i,:])
###random coding instead?
#randomVrow = np.random.randn(1,self.N)
###blah[0,abs(np.array(self.V[self.m_i]))<.0001] = 0
###blah2 = np.asarray(blah,dtype="float64")
###blah2 = np.asarray(blah)
###print self.V[self.m_i,:], type(self.V[self.m_i,2])
###print blah[0,:], type(blah[0,:])
#smsgx.set_V_row(randomVrow[0,:])
##does not decode for random combinations, larger matrix needed to find a null vector????
##smsgx.set_V_row(np.random.randn(1,self.N))
smsgx.set_current_transmission(self.TOTAL_TRANSMISSIONS)
self.mif.sendMsg(self.source, 0xFFFF, smsgx.get_amType(), 0, smsgx)
if self.verbose:
print 'Transmission ', self.m_i+1, '/', self.m #m_i+1 or now just m_i ???
#print self.rx_symbols.astype(int)
self.m_i += 1
self.i += 1
def receive(self,src,msg): #make this main_loop and have it call tdma_stage and index_stage?, or set different modes in the other functions
#tdma tx, index tx, or completed depending on mode
if self.TDMA_MODE==1:
#print 'ileft is ', self.ileft, ', size if tdleft is ', np.size(self.TDleft)
##try moving this down??
#if self.ileft == np.size(self.TDleft):
##if self.ileft > np.size(self.TDleft):
#self.ileft = 0; #only update acklist here?
##not completely correct?? what if size changes inbetween?
##only update tdleft here?
#self.TDleft = np.nonzero(np.isnan(self.ackList.reshape(-1)))[0]
print 'RECEIVE FUNCTION (TDMA)'
rmsg = T2HMsg(msg.dataGet())
#check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
newAck = rmsg.get_ACKs()
print 'Acks for transmission number ', rmsg.get_transmission(), ': ', newAck #first one is 8? 2? garbage value
#print 'Element equal to 1: ', np.array(newAck)==1 #want this to be an array of logicals
self.ackList[np.array(newAck)==1] = 1
self.TDMA_ERASURES += np.sum(np.array(newAck)==2)
self.TDMA_ERASURES_VEC += np.array(newAck)==2
#print 'number of erasures ', self.TDMA_ERASURES
#print 'Erasure vector:', self.TDMA_ERASURES_VEC
#self.TDleft = np.nonzero(np.isnan(self.ackList.reshape(-1)))[0]
#call tdma_stage(self) at end if np.any(np.isnan(self.ackList))
#try moving this here??
if self.ileft == np.size(self.TDleft):
#if self.ileft > np.size(self.TDleft):
self.ileft = 0; #only update acklist here?
#not completely correct?? what if size changes inbetween?
#only update tdleft here?
self.TDleft = np.nonzero(np.isnan(self.ackList.reshape(-1)))[0]
#if not done, transmit next
if np.any(np.isnan(self.ackList)):
#self.tm = self.tm + 1
#for i in self.TDleft:
Transmitter.tdma_stage(self)
self.ileft += 1
else:
print 'Finished TDMA after', self.TDMA_TRANSMISSIONS, 'transmissions and', self.TDMA_ERASURES, 'erasures. Current erasure probability is', self.TDMA_ERASURES/(self.N*self.TDMA_TRANSMISSIONS) #use get_transmission from receive function? will it make a difference?
print 'EpsVec (TDMA only):', self.TDMA_ERASURES_VEC/self.TDMA_TRANSMISSIONS
#COMPUTE ATTEMPTS PER NODE?? dont need if broadcasting every transmission
self.TDMA_MODE = 0; #initialize and proceed to index coding mode
self.m_i = 0;
self.i = 1;
self.m = 0;
self.unsolved = self.map > -50; #array of true the size of self.map
self.rx_symbols = 0;
self.eps_vec = 0*np.ones(self.N)
R = transmit_messages(1, self.eps_vec[self.map])
self.rx_symbols = R
#self.INDEX_ERASURES = 0;
#send the first index message here? call compute_matrices for the first time here?
#handle first case here?
elif self.TDMA_MODE == 0:
print 'RECEIVE FUNCTION (INDEX CODING)'
#get next set of messages
#if self.m_i == m or self.i == 0:
#update data structures and map (next unsolved vector) once the round has been completed
#if self.m_i == self.m or np.any(~self.unsolved):
#self.m_i = 0;
if self.TOTAL_TRANSMISSIONS == 0:
self.map = np.nonzero(np.isnan(self.final_messages.reshape(-1)))[0]
self.rx_symbols = self.rx_symbols[self.unsolved, :]
self.J = self.J[self.unsolved, :]
self.I = self.I[self.unsolved, :]
self.A = self.A[self.unsolved, :]
Transmitter.compute_matrices(self)
#time.sleep(3)
#case for first transmission, must compute matrices first?
self.TOTAL_TRANSMISSIONS = 1
Transmitter.index_stage(self)
#self.m_i += 1
#self.i += 1
return
#if self.m_i == self.m or np.any(~self.unsolved):
##Transmitter.compute_matrices(self)
#self.map = np.nonzero(np.isnan(self.final_messages.reshape(-1)))[0]
#self.rx_symbols = self.rx_symbols[self.unsolved, :]
#self.J = self.J[self.unsolved, :]
#self.I = self.I[self.unsolved, :]
#self.A = self.A[self.unsolved, :]
#Transmitter.compute_matrices(self)
##case for first transmission, must compute matrices first?
#if self.TOTAL_TRANSMISSIONS == 0:
#self.TOTAL_TRANSMISSIONS = 1
#Transmitter.index_stage(self)
#self.m_i += 1
#self.i += 1
#return
#
rmsgx = T2HMsg(msg.dataGet())
#check for acks, remove nodes with ack type 1 from TDMAleft, record transmissions?
newAck = np.array(rmsgx.get_ACKs())
#print 'Acks for transmission number ', rmsgx.get_transmission(), ': ', newAck
print 'RX ACKS: ' , newAck
print 'Remaining nodes: ', self.map
self.unsolved = newAck[self.map] != 1
#self.final_messages2 = self.final_messages
self.final_messages[newAck==1] = 1
#CALCULATE ERASURES FROM THE NUMBER OF 2S, all 2s are useful, even if exited system becaus ergotic etc
#calculate for each node?
#print 'index erasures (old):', self.INDEX_ERASURES_VEC
self.INDEX_ERASURES += np.sum(newAck==2)
self.INDEX_ERASURES_VEC += newAck==2
print 'Erasure Vector (index):', self.INDEX_ERASURES_VEC
#self.unsolved = self.final_messages != 1
#print 'Rx DEST ', self.dest
#print 'Rx NEW UNSOLVED: ', self.unsolved
#print 'Rx NEW finalmessages: '
#print self.final_messages
if self.m_i == self.m or np.any(~self.unsolved):
#Transmitter.compute_matrices(self)
self.m_i = 0;
self.map = np.nonzero(np.isnan(self.final_messages.reshape(-1)))[0]
self.rx_symbols = self.rx_symbols[self.unsolved, :]
self.J = self.J[self.unsolved, :]
self.I = self.I[self.unsolved, :]
self.A = self.A[self.unsolved, :]
Transmitter.compute_matrices(self)
#time.sleep(3)
#send to all motes at end if np.any(np.isnan(self.final_messages)) ??
if np.any(np.isnan(self.final_messages)):
#print 'm_i' ,self.m_i
self.tx_symbols = np.append(self.tx_symbols, Symbol(self.V[self.m_i,:], self.W, self.map))
R = transmit_messages(1, self.eps_vec[self.map])
if self.i == 1:
self.rx_symbols = R
else:
self.rx_symbols = np.bmat([self.rx_symbols, R])
#if self.verbose:
#print 'Transmission ', self.m_i+1, '/', self.m #m_i+1 or now just m_i ???
#print self.rx_symbols.astype(int)
self.TOTAL_TRANSMISSIONS += 1
Transmitter.index_stage(self)
#self.m_i += 1
#self.i += 1
#Transmitter.compute_matrices(self)
#print 'late unsolved is ', self.unsolved
else:
#compute overall erasure probabilities for the iteration
#self.epsIndex = float(self.INDEX_ERASURES)/float(self.N*self.TOTAL_TRANSMISSIONS)
#now both tdma and index
self.epsIndex = (self.INDEX_ERASURES+self.TDMA_ERASURES)/(self.N*(self.TOTAL_TRANSMISSIONS+self.TDMA_TRANSMISSIONS))
self.epsVec = (self.TDMA_ERASURES_VEC+self.INDEX_ERASURES_VEC)/(self.TDMA_TRANSMISSIONS+self.TOTAL_TRANSMISSIONS)
if self.verbose:
print 'ITERATION', self.it, ', INDEX CODING TRANSMISSIONS:', self.TOTAL_TRANSMISSIONS
print 'ERASURES:', self.INDEX_ERASURES, ', EPS:', self.epsIndex, 'EPS (index only):', self.INDEX_ERASURES/(self.N*self.TOTAL_TRANSMISSIONS)
print 'EpsVec:', self.epsVec
#write epsvec instead
#COMPUTE ATTEMPTS PER NODE?
#self.TDMA_MODE = -1
#call cleanup and reset functions!!!!!
Transmitter.cleanup(self)
if (self.it < self.itMax):
Transmitter.reset(self)
else:
self.TDMA_MODE = -1
return self.TOTAL_TRANSMISSIONS #also exit program?
else:
#completed, print this indefinitely
#print 'INDEX CODING COMPLETE: Total number of transmissions: ', self.TOTAL_TRANSMISSIONS
print self.it, 'ITERATIONS COMPLETE'
return self.TOTAL_TRANSMISSIONS
#iterate??
def compute_matrices(self): #change to compute_matrixes
#self.TDMA_MODE = 0;
#eps_vec = .5*np.ones(self.N)
self.eps_vec = 0*np.ones(self.N)
#self.i = 1
#while np.any(np.isnan(self.final_messages)):
Kprime = len(self.map);
if self.verbose:
print 'Remaining ', Kprime, ' nodes are: '
print self.map
## special case for one remaining node
#doesnt make sense anymore, must change
if Kprime == 1:
#smsgx.set_V_row(self.V[self.m_i,:])
#self.TOTAL_TRANSMISSIONS += 1
#while not transmit_messages(1, self.eps_vec[self.map]):
#self.TOTAL_TRANSMISSIONS += 1
#print 'map is ', self.map
#print 'W is ', self.W
#print 'final messages are', self.final_messages
#self.final_messages[self.map] = self.W[self.map]
#make a new V to transmit instead, just what W needs
self.V = np.zeros((1,self.N))
self.V[0,self.map] = 1
self.m = 1
else:
## Generate next m transmissions
#(self.V, U) = alignment('mixed', self.J, 1e-4, 100, True)
(self.V, U) = alignment('greedy', self.J, 1e-4, 100, False)
#try changing it here??
self.V = np.asarray(self.V)
#RANDOM INSTEAD??? even easier, change right before transmission!
#self.V = np.random.
print 'new V is '
print self.V
#print 'type of new V is ', type(self.V)
#print 'type of an element of new V is ', type(self.V[0,0])
self.m = np.shape(self.V)[0]
if self.verbose:
print 'Minimum rank is ', self.m
# generate next symbol based on current V
L = len(self.tx_symbols);
if self.i == 1:
L = 0
self.unsolved = np.ones(Kprime) > 0
##print S
#Vnull = V[ :, [1,3,5,7] ]
##U,S,V = np.linalg.svd(Vnull.T)
def setup_logfile(self):
if not os.path.isfile(self.logfile):
lf = open(self.logfile,'w')
lf.write('Index Coding (Basic Antidotes) Log File\n')
#ADD TIME
#lf.write('%s\nN = %d\nPIECES = %d\n\n' % (time.strftime(self.timeformat), self.N, self.B))
lf.write('%s\n\n' % time.strftime(self.timeformat))
lf.write('N\tPIECES\tnumTDMA\tnumIndex\tepsAvg\tepsVec\n') #what else, time that started, erasures, slot time somehow?
#add individual erasures?
#lf.write('TDMA Transmissions\t Index Transmissions\n') #what else, time that started, erasures, slot time somehow?
lf.close()
if __name__ == "__main__":
print "Running"
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
if len(sys.argv) > 1:
N = int(sys.argv[1])
else:
N = 8
#if len(sys.argv) > 1
#expMax = int(sys.argv
#iterate here?? for loop?
m = Transmitter(N, logfile='log01_28_2012.txt', itMax=100, B=4, verbose=True)
#iterate here???
#while (m.TDMA_MODE != -1):
#time.sleep(.5)
#tot = m.TOTAL_TRANSMISSIONS
#print 'TDMA MODE is ', m.TDMA_MODE, '. Total for this experiment is ', tot
##better way to iterate? will a loop work if each m is destroyed after it is done, something like constructor/destructor???
#del m #would this work? STILL PRINTS BOTH!!!
#m = Transmitter(N, B=2, verbose=True)
##iterate here???
#while (m.TDMA_MODE != -1):
#time.sleep(.5)
#tot = m.TOTAL_TRANSMISSIONS
#print 'TDMA MODE is ', m.TDMA_MODE, '. Total for this experiment is ', tot
#m.compute_matrices()
#m.tdma_stage()
#m.main_loop()
#time.sleep(1)
#m.send()
|
#! /usr/bin/python
# Find data file in switchboard files using basename.
import sys
import subprocess
sys.path.append('/home/nxs113020/cch_plda/tools/')
import make_cochannel
if __name__=='__main__':
"""
Reads list of switchboard sphfiles formatted as:
filename, spkrid, channel
Generates co-channel wav data for a given SIR for the
input data list.
Returns corresponding wav.scp and utt2spk files.
input:
1. input audio list in the format described above.
2. signal-to-interference ratio (dB), sir
outputs:
1. data wav.scp
2. data utt2spk
"""
swb_dir = '/home/nxs113020/Downloads/SwitchBoard2/Phase2'
input_audio_list = sys.argv[1]
sir = float(sys.argv[2])
out_wavscp = sys.argv[3]
out_utt2spk = sys.argv[4]
file_dict = {}
wavscp_list = []
utt2spk_list = []
for i in open(input_audio_list):
basename_spkr_channel = i.split(',')
basename = basename_spkr_channel[0].strip()
spkr_id = basename_spkr_channel[1].strip()
channel = basename_spkr_channel[2].strip()
# search for basename in Switchboard directory
if not(basename in file_dict):
output = subprocess.check_output('find %s -name %s.sph'%(swb_dir,basename),shell=True)
filepath = output.strip()
else:
filepath = file_dict[basename]
print basename
wavscp_format = "%s sox --ignore-length %s -t wav -b 16 - | "
uttid = spkr_id+'_'+basename+':'+channel
wavpath = make_cochannel.switchboard(filepath,channel,sir)
wavscp_list.append(wavscp_format%(uttid,wavpath)+'\n')
utt2spk_list.append(uttid+' '+spkr_id+'\n')
wavscp_list = sorted(set(wavscp_list))
utt2spk_list = sorted(set(utt2spk_list))
wavscp = open(out_wavscp,'w')
utt2spk = open(out_utt2spk,'w')
for i in range(len(wavscp_list)):
wavscp.write(wavscp_list[i])
utt2spk.write(utt2spk_list[i])
wavscp.close()
utt2spk.close()
|
import requests
from bs4 import BeautifulSoup
from election_snooper.helpers import post_to_slack
class BaseSnooper:
def get_page(self, url):
return requests.get(url)
def get_soup(self, url):
req = self.get_page(url)
return BeautifulSoup(req.content, "html.parser")
def post_to_slack(self, item):
message = """
Possible new election found: {}\n
<https://elections.democracyclub.org.uk{}>\n
Please go and investigate!
""".format(
item.title, item.get_absolute_url()
)
post_to_slack(message)
|
import math
import random
from copy import deepcopy
from itertools import chain
from typing import List, Tuple
import numpy as np
import torch
from torch.utils.data import Dataset
from parseridge.corpus.sentence import Sentence
from parseridge.corpus.vocabulary import Vocabulary
from parseridge.utils.logger import LoggerMixin
class Corpus(Dataset, LoggerMixin):
def __init__(
self, sentences: List[Sentence], vocabulary: Vocabulary, device: str = "cpu"
) -> None:
"""
A Corpus stores all the sentences and their numeric representations.
Use `get_iterator()` to generate an iterator over the data.
Note: This class implements PyTorch's Dataset class, however
it is advised to use the custom iterator instead, because PyTorch's
Dataloader only supports tensors, we need, however, to return
Sentence objects as well.
Parameters
----------
sentences : list of Sentence objects
The sentences for this corpus.
vocabulary : Signature object
Converts sentence tokens into integers. If this is not a training
corpus, it is advised to set the Signature object into a read-only
mode to avoid dimension mismatches in the embeddings.
device : str
The PyTorch device to copy the sentence representations to.
"""
self.vocabulary = vocabulary
self.sentences = sentences
self.device = device
self.sentence_tensors = []
self.sentence_token_freq_tensors = []
self.sentence_lengths = [len(sentence) for sentence in sentences]
self.sorted_indices = np.argsort(self.sentence_lengths)
self.num_oov_tokens = 0
assert self.vocabulary.get_id("<<<PADDING>>>") == 1
# Count words, we need them later to blank out seldom word to train the OOV
# word embedding.
[self.vocabulary.add(token.form) for sentence in sentences for token in sentence]
# Add ID to sentences so that we can reconstruct the order later
self.logger.info(f"Loading {len(self.sentences)} sentences...")
for i, sentence in enumerate(self.sentences):
sentence.id = i
tokens, frequencies = self._prepare_sentence(sentence)
self.sentence_tensors.append([tokens])
self.sentence_token_freq_tensors.append([frequencies])
# Copy sentence representations to device memory
self.logger.info(f"The corpus contains {self.num_oov_tokens} OOV tokens.")
self.logger.info("Copying sentence representation to device memory...")
self.sentence_tensors = torch.tensor(
self.sentence_tensors, dtype=torch.long, device=self.device
)
self.sentence_token_freq_tensors = torch.tensor(
self.sentence_token_freq_tensors, dtype=torch.float, device=self.device
)
@staticmethod
def _pad_list(list_: List[int], max_sentence_length: int) -> np.ndarray:
"""
Pad the rest of the list with zeros.
"""
pad_size = max_sentence_length - len(list_)
padding = np.ones(pad_size)
return np.concatenate((list_, padding))
def _prepare_sentence(self, sentence: Sentence) -> Tuple[np.ndarray, np.ndarray]:
"""
Replaces the tokens in the sentence by integers and pads the output.
This is the place to add more features in the future like characters
or part-of-speech tags.
"""
tokens = [self.vocabulary.get_id(token.form) for token in sentence]
self.num_oov_tokens += tokens.count(self.vocabulary.get_id("<<<OOV>>>"))
sentence_padded = self._pad_list(tokens, max(self.sentence_lengths))
token_frequencies = [self.vocabulary.get_count(token.form) for token in sentence]
frequencies_padded = self._pad_list(token_frequencies, max(self.sentence_lengths))
return sentence_padded, frequencies_padded
def __len__(self) -> int:
return len(self.sentence_lengths)
def __getitem__(self, index: int) -> torch.Tensor:
return self.sentence_tensors[index]
class CorpusIterator(LoggerMixin):
def __init__(
self,
corpus: Corpus,
batch_size: int = 8,
shuffle: bool = False,
drop_last: bool = False,
train: bool = False,
oov_probability: float = 0.25,
group_by_length: bool = True,
token_dropout: float = 0.1,
):
"""
Helper class to iterate over the batches produced by the Corpus class.
Most importantly, it has the ability to shuffle the order of the batches.
This helper is needed because the corpus returns not only a Tensor with
the numeric representation of a sentence, but also the sentence object
itself, which is not supported by PyTorch's DataLoader class.
"""
self.corpus = corpus
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
self.oov_probability = oov_probability
self._iter = 0
assert self.corpus.vocabulary.get_id("<<<OOV>>>") == 0
assert self.corpus.vocabulary.get_id("<<<PADDING>>>") == 1
self.sentence_tensors = self.corpus.sentence_tensors
self._num_batches = len(self.corpus) / self.batch_size
# Replace the ids of some infrequent words randomly with the OOV id to train
# the OOV embedding vector.
if train and oov_probability > 0:
self.sentence_tensors = self.replace_infrequent_words_with_oov(
self.sentence_tensors,
self.corpus.sentence_token_freq_tensors,
self.oov_probability,
)
# As a regularization technique, we randomly replace tokens with the OOV id.
# In contrast to the OOV handling, this can affect all words.
# Note: The percentage of dropped out tokens is smaller than the dropout
# probability, as it is applied to the whole data, including the padding.
if train and token_dropout > 0:
self.sentence_tensors = self.apply_token_dropout(
self.sentence_tensors, p=token_dropout
)
# If len(self.corpus) % self.batch_size != 0, one batch will be slightly
# larger / smaller than the other ones. Use drop_last to ignore this one batch.
if self.drop_last:
self._num_batches = math.floor(self._num_batches)
else:
self._num_batches = math.ceil(self._num_batches)
# When using a batch_size > 1, performance and possibly accuracy can be improved
# by grouping sentences with similar length together to make better use of the
# batch processing. To do so, the content of all batches will be static,
# but their order will be randomized if shuffle is activated.
if group_by_length and batch_size > 1:
self._order = self.group_batches_by_length(
self.corpus.sentences, self.batch_size, self.shuffle
)
else:
# The naive way: Take the ids of all sentences and randomize them if wanted.
self._order = list(range(len(self.corpus)))
if self.shuffle:
random.shuffle(self._order)
@staticmethod
def replace_infrequent_words_with_oov(
sentence_tensors, frequency_tensors, oov_probability
):
# Compute the relative frequency
oov_probability_tensor = torch.zeros_like(frequency_tensors).fill_(oov_probability)
frequency_tensors = frequency_tensors / (frequency_tensors + oov_probability_tensor)
rand = torch.rand_like(sentence_tensors, dtype=torch.float)
mask = torch.lt(rand, frequency_tensors).type(torch.long)
return torch.mul(sentence_tensors, mask)
@staticmethod
def apply_token_dropout(sentence_tensors, p):
dropout = torch.rand_like(sentence_tensors, dtype=torch.float).fill_(p)
rand = torch.rand_like(sentence_tensors, dtype=torch.float)
mask = torch.lt(dropout, rand).type(torch.long)
return torch.mul(sentence_tensors, mask)
@staticmethod
def group_batches_by_length(sentences, batch_size, shuffle):
sentences_sorted = [
sentence.id for sentence in sorted(sentences, key=lambda s: len(s))
]
# Make the list dividable by batch_size
rest_size = len(sentences_sorted) % batch_size
rest = sentences_sorted[-rest_size:]
order = sentences_sorted[:-rest_size]
chunks = np.array_split(order, len(order) / batch_size)
if shuffle:
random.shuffle(chunks)
return list(chain(*chunks)) + rest
def __len__(self):
return self._num_batches
def __iter__(self):
return self
def __next__(self):
if self._iter >= len(self):
raise StopIteration
else:
start = self._iter * self.batch_size
indices = self._order[start : start + self.batch_size]
# Ignore an incomplete batch at the end if wished
if len(indices) < self.batch_size and self.drop_last:
raise StopIteration
batch_sentences = [self.corpus.sentences[i] for i in indices]
# Sort the indices in descending order - this is required for
# batch processing in PyTorch.
batch_sentences = sorted(batch_sentences, key=lambda s: len(s), reverse=True)
indices_sorted = [sentence.id for sentence in batch_sentences]
batch_tensors = self.sentence_tensors[indices_sorted]
# Cut of unnecessary padding
longest_sentence = max([len(s) for s in batch_sentences])
batch_tensors = batch_tensors[:, :, :longest_sentence]
self._iter += 1
return batch_tensors, deepcopy(batch_sentences)
|
#!/usr/bin/env python3
import argparse
import biotools
parser = argparse.ArgumentParser(
description='Prokaryotic gene finder.')
parser.add_argument('--file', required=True, type=str,
metavar='<str>', help='FASTA file')
parser.add_argument('--minorf', required=False, type=int, default=300,
metavar='<int>', help='minimum open reading frame length [%(default)i]')
arg = parser.parse_args()
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
def anti(seq):
forward = 'ACGTRYMKWSBDHV'
reverse = 'TGCAYRKMWSVHBD'
table = seq.maketrans(forward, reverse)
return seq.translate(table)[::-1]
seq = 'AAAACCCGGT'
print(seq, anti(seq))
|
from django.urls import path
from . import views
from django.conf.urls import handler404
urlpatterns = [
path('', views.home, name='home'),
path('register', views.register, name='register_pizza_app'),
path('login', views.login, name='login_pizza_app'),
path('logout', views.logout, name='logout_pizza_app'),
path('create_pizza', views.create_pizza, name='create_pizza'),
path('get_price', views.get_price, name='get_price'),
path('add_pizza/<int:id_pizza>', views.add_pizza, name='add_pizza'),
path('add_extra/<int:id_extra>', views.add_extra, name='add_extra'),
path('ver_carrito', views.ver_carrito, name='ver_carrito'),
path('del_item_carrito/<str:carrito_id>', views.del_item_carrito, name='del_item_carrito'),
path('delete_pizza/<int:pizza_id>', views.del_pizza, name='delete_pizza'),
path('create_address', views.create_address, name='create_address'),
path('make_purchases', views.make_purchases, name='make_purchases'),
path('make_pizzas_data', views.make_pizzas_data, name='make_pizzas_data'),
path('make_extras_data', views.make_extras_data, name='make_extras_data'),
path('del_data', views.del_data, name='del_data'),
]
handler404 = 'pizza_app.views.error_404_view'
|
# -*- coding: utf-8 -*-
from extensions import db
# Alias common SQLAlchemy names
Column = db.Column
Model = db.Model
relationship = db.relationship
backref = db.backref
metadata = db.metadata
|
# print("Pengaplikasian List")
# print("Contoh 1 (Menambahkan anggota pada list kosong)")
# List1 = [] # Membuat list kosong
# List1.append('Ahmad')
# List1.append(2212)
# print("Output : ", List1)
# print("=================")
# print("Contoh 2 (Menghapus anggota pada list)")
# List2 = [1, 4, 'U']
# List2.remove('U') # Cara pertama menghapus list
# print("Output 1 : ", List2)
# List2_1 = [1, 4, 'U']
# del (List2_1[2]) # Cara kedua menghapus list
# print("Output 2 : ", List2)
# print("=================")
# print("Contoh 3 (Mencari anggota dalam list)")
# List3 = [1, 2, 3, 5, 5]
# x = 5 in List3
# print("Output : ", x)
print("Pengaplikasian Dictionary")
print("Contoh 1 (Menambahkan Anggota dictionary)")
Dict1 = {"A":1, "B":2, "C":3}
Dict1["D"] = 4
print("Output : ", Dict1)
print("=================")
print("Contoh 2 (Memanggil anggota dictionary)")
Dict2 = {"A":1, "B":2, "C":3}
print("Output : ", Dict2["A"])
print("=================")
print("Contoh 3 (Mencari anggota dictionary)")
Dict3 = {"A":1, "B":2, "C":3}
print("Output : ", "B" in Dict3)
|
#!/bin/python
from flask import Flask
import threading
import time
from datetime import datetime, timedelta
import sys
import RPi.GPIO as GPIO
PIN = 25
app = Flask(__name__)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(PIN, GPIO.OUT)
led_status = False
time_thread = None
@app.route('/time/off')
def time_trigger_off():
print "off timer"
global time_thread
if time_thread != None:
time_thread.working = False
return "ok"
@app.route('/time/<h>/<m>')
def time_trigger(h,m):
print "start timer", h,m
global time_thread
time_thread = timeThread("%s:%s" % (h,m))
time_thread.start()
return "ok"
@app.route('/location/<x>/<y>')
def location_trigger(x,y):
print "location", x, y
return "ok"
@app.route('/status/<status>')
def led(status=None):
global led_status
global PIN
if not status in ["on", "off"]:
if led_status:
return "on"
else:
return "off"
if led_status:
led_status = False
GPIO.output(PIN, GPIO.LOW)
return "off"
else:
led_status = True
GPIO.output(PIN, GPIO.HIGH)
return "on"
@app.route('/info')
def info():
return """<div style="font-size:20px">
System: HOME<br/>
Status: ON<br/>
Uptime: %(uptime)s<br/>
</div>
""" % {
'uptime': get_uptime()
}
def get_uptime():
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
uptime_string = str(timedelta(seconds = uptime_seconds))
return uptime_string
def date_diff(time1, time2):
diff = datetime.strptime(time1, '%H:%M') - datetime.strptime(time2, '%H:%M')
return diff.seconds
class timeThread(threading.Thread):
def __init__(self, time1):
threading.Thread.__init__(self)
self.time1 = time1
self.working = True
def run(self):
print "Starting " + self.name
self.print_time(self.time1)
print "Exiting " + self.name
def print_time(self, time1):
now = datetime.now()
while self.working and date_diff(time1, now.strftime("%H:%M")) > 0:
now = datetime.now()
time.sleep(1)
print "date diff: %d" % ( date_diff(time1, now.strftime("%H:%M")) )
# event
doEvent()
def doEvent():
print "event"
if __name__ == '__main__':
app.run(host='0.0.0.0', debug = True)
GPIO.cleanup()
|
"""
Heber Cooke 10/3/2019
Chapter 2 Exercise 8
This program calculates and displays the value of a light year
the program takes the light speed of 3 * 10^8 meters per second
the program takes that value and multiply by seconds in a year
program displays the meters traveled per year
"""
SPEED = 3 * (10 **8)# meters per second
secondsPerYr = 60*60*24*365
print(SPEED * secondsPerYr ,"Meters traveled in a year ")
|
def check(moves): # 1,2 - win, 404 - draw
draw = []
for i in range(0, 3):
draw.append(moves[0][i])
draw.append(moves[i][0])
if moves[i][0] != 0 and moves[i][0] == moves[i][1] and moves[i][1] == moves[i][2]: # horizontal
return True, moves[i][0]
elif moves[i][0] != 0 and moves[0][i] == moves[1][i] and moves[1][i] == moves[2][i]: # vertical
return True, moves[0][i]
if moves[0][0] != 0 and moves[0][0] == moves[1][1] and moves[1][1] == moves[2][2]: # 1st diagonal
return True, moves[0][0]
if moves[0][2] != 0 and moves[0][2] == moves[1][1] and moves[1][1] == moves[2][0]: # 2nd diagonal
return True, moves[0][0]
if 0 not in draw:
return True, 404
game = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
game = [[1, 1, 2],
[2, 2, 1],
[1, 1, 2]]
print(check(game))
|
import pandas as pd
import numpy as np
import ipaddress
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
import sklearn.metrics as sm
import sys
import matplotlib as mp
import seaborn as sb
from sklearn.preprocessing import Normalizer
from sklearn.naive_bayes import GaussianNB
from sklearn import linear_model
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import sklearn.ensemble as ske
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.metrics import (precision_score, recall_score,f1_score, accuracy_score,mean_squared_error,mean_absolute_error)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
import warnings
warnings.filterwarnings('ignore')
np.random.seed(3268)
data=pd.read_csv("C:\\Users\Ankita Gupta\Desktop\Major_Project\KDDDataset.csv")
data.info()
print(data.head())
#print(data.isnull().sum())
print(data.shape)
print(data.describe())
print("Variety in Label:")
print(data["Label"].value_counts())
for columns in data.columns:
if(data[columns].dtypes=='object'):
count=data[columns].nunique()
print("column Name:",columns)
print(count)
data_column = ['Protocol_type']
data_values = data[data_column]
data_enc=data_values.apply(LabelEncoder().fit_transform)
print(data_enc.head())
data['Protocol_type'] = np.array(data_enc)
data_column = ['Service']
data_values = data[data_column]
data_enc=data_values.apply(LabelEncoder().fit_transform)
print(data_enc.head())
data['Service'] = np.array(data_enc)
data_column = ['Flag']
data_values = data[data_column]
data_enc=data_values.apply(LabelEncoder().fit_transform)
print(data_enc.head())
data['Flag'] = np.array(data_enc)
newLabel=data['Label']
BnewLabel=data['Label']
# change the label column
newLabel=newLabel.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,
'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2
,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,
'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})
BnewLabel=BnewLabel.replace({ 'normal' : 0, 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,
'ipsweep' : 1,'nmap' : 1,'portsweep' : 1,'satan' : 1,'mscan' : 1,'saint' : 1
,'ftp_write': 1,'guess_passwd': 1,'imap': 1,'multihop': 1,'phf': 1,'spy': 1,'warezclient': 1,'warezmaster': 1,'sendmail': 1,'named': 1,'snmpgetattack': 1,'snmpguess': 1,'xlock': 1,'xsnoop': 1,'httptunnel': 1,
'buffer_overflow': 1,'loadmodule': 1,'perl': 1,'rootkit': 1,'ps': 1,'sqlattack': 1,'xterm': 1})
# put the new label column back
#Normal : 0
#DoS Attack : 1
#System scanners scan your local host for security vulnerabilities/ Probing : 2
#R2L Remote to Local :3
#U2R User to Remote : 4
data1=data;
data['Label'] = newLabel
x = data.drop('Label',1)
y = data.Label
data1['Label']=BnewLabel;
x1 = data1.drop('Label',1)
y1= data1.Label
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.3,random_state=105)
X1_train, X1_test, y1_train, y1_test = train_test_split(x1, y1, test_size=.3,random_state=105)
scaler = Normalizer().fit(X_train)
x_train = scaler.transform(X_train)
scaler = Normalizer().fit(X_test)
x_test = scaler.transform(X_test)
scaler = Normalizer().fit(X1_train)
x1_train = scaler.transform(X1_train)
scaler = Normalizer().fit(X1_test)
x1_test = scaler.transform(X1_test)
#GAUSSIAN_NB
print("GAUSSIAN_NB:")
from sklearn.naive_bayes import GaussianNB
Gb=GaussianNB()
Gb.fit(x_train,y_train)
y_predict = Gb.predict(x_test)
acc = r2_score(y_predict, y_test)
accuracy = accuracy_score(y_test, y_predict)
recall = recall_score(y_test, y_predict , average="macro")
precision = precision_score(y_test,y_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0 )
f1 = f1_score(y_test,y_predict,average="macro")
#print("NaiveBayes (r2 score):-")
#print(acc)
print("Accuracy")
print(accuracy)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("F1score")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, y_predict))
print("Classification Report:")
print(classification_report(y_test, y_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, y_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(Gb, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
Gb=GaussianNB()
Gb.fit(x1_train,y1_train)
y1_predict = Gb.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
#LINEARMODEL
print("LINEAR_MODEL:")
lm = linear_model.BayesianRidge()
lm.fit(x_train,y_train)
lm_predict=lm.predict(x_test)
lm_predict=lm_predict.round()
acc=r2_score(y_test,lm_predict)
lmaccuracy = accuracy_score(y_test, lm_predict)
lmrecall = recall_score(y_test, lm_predict , average="macro")
lmprecision = precision_score(y_test,lm_predict, pos_label=1, average='macro', sample_weight=None, zero_division=0 )
lmf1 = f1_score(y_test,lm_predict,average="macro")
print("Linear Model (r2 score):-")
print(acc)
print("Accuracy")
print(lmaccuracy)
print("Precision")
print(lmprecision)
print("Recall")
print(lmrecall)
print("F1score")
print(lmf1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, y_predict))
print("Classification Report:")
print(classification_report(y_test, lm_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, lm_predict))
""""
TN, FP, FN, TP = confusion_matrix(y_test,lm_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
"""
#LOGISTICREGRESSION
print("LOGISTIC_REGRESSION:")
lg = LogisticRegression()
lg.fit(x_train,y_train)
lg_predict=lg.predict(x_test)
lg_predict=lg_predict.round()
acc=r2_score(y_test,lg_predict)
accuracy = accuracy_score(y_test, lg_predict)
recall = recall_score(y_test, lg_predict , average="macro")
precision = precision_score(y_test,lg_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,lg_predict,average="macro")
print("Logistic Regression (r2 score):-")
print(acc)
print("Accuracy")
print(accuracy)
print("Precision")
print(precision)
print("Recall")
print(recall)
print("F1score")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, lg_predict))
print("Classification Report:")
print(classification_report(y_test, lg_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, lg_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(lg, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
lg.fit(x1_train,y1_train)
y1_predict = lg.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
#KNEIGHBOURCLASSIFIER
print("KNEIGHBOUR_CLASSIFIER:")
knc = KNeighborsClassifier()
knc.fit(x_train,y_train)
knc_predict=knc.predict(x_test)
knc_predict=knc_predict.round()
acc=r2_score(y_test,knc_predict)
accuracy = accuracy_score(y_test, knc_predict)
recall = recall_score(y_test, knc_predict , average="macro")
precision = precision_score(y_test,knc_predict, pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,knc_predict,average="macro")
print("KNeighbour Classifier (r2 score):-")
print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, knc_predict))
print("Classification Report:")
print(classification_report(y_test, knc_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, knc_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(knc, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
knc.fit(x1_train,y1_train)
y1_predict = knc.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
#DECISIONTREECLASSIFIER
print("DECISION_TREE_CLASSIFIER:")
DTree = DecisionTreeClassifier()
DTree.fit(x_train, y_train)
D_predict = DTree.predict(x_test)
#print(y_test.head())
#print(D_predict)
from sklearn.metrics import r2_score
acc=r2_score(y_test, D_predict)
accuracy = accuracy_score(y_test, D_predict)
recall = recall_score(y_test, D_predict , average='macro')
precision = precision_score(y_test,D_predict ,pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,D_predict,average="macro")
print("Decision Tree Classifier (r2 score):-")
print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, D_predict))
print("Classification Report:")
print(classification_report(y_test, D_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, D_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(DTree, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
DTree.fit(x1_train,y1_train)
y1_predict = DTree.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
#RANDOMFORESTCLASSIFIER
print("RANDOMFOREST_CLASSIFIER:")
rf=RandomForestClassifier()
rf.fit(x_train,y_train)
#RandomForestRegressor(bootstrap=True, ccp_alpha=0.0, criterion='mse',
# max_depth=None, max_features='auto', max_leaf_nodes=None,
#max_samples=None, min_impurity_decrease=0.0,
#min_impurity_split=None, min_samples_leaf=1,
#min_samples_split=2, min_weight_fraction_leaf=0.0,
#n_estimators=100, n_jobs=None, oob_score=False,
#random_state=None, verbose=0, warm_start=False)
rfy_predict=rf.predict(x_test)
acc=r2_score(y_test,rfy_predict)
#print(y_test.head())
#print(rfy_predict)
accuracy = accuracy_score(y_test, rfy_predict)
recall = recall_score(y_test, rfy_predict,average='macro')
precision = precision_score(y_test,rfy_predict,pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,rfy_predict,average='macro')
print("Random Forest Classifier (r2 score):-")
print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, rfy_predict))
print("Classification Report:")
print(classification_report(y_test, rfy_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, rfy_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(rf, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
rf.fit(x1_train,y1_train)
y1_predict = rf.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
#ADABOOSTCLASSIFIER
print("ADABOOST_CLASSIFIER:")
ab=ske.AdaBoostClassifier(n_estimators=100)
ab.fit(x_train, y_train)#fit may be called as 'trained'
ab_predict = ab.predict(x_test)
#print(y_test.head())
#print(ab_predict)
acc = r2_score(y_test,ab_predict)
accuracy = accuracy_score(y_test, ab_predict)
recall = recall_score(y_test, ab_predict , average="macro")
precision = precision_score(y_test,ab_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,ab_predict,average="macro")
#print("Adaptive boost Classifier (r2 score):-")
#print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, ab_predict))
print("Classification Report:")
print(classification_report(y_test, ab_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, ab_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(ab, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
ab.fit(x1_train,y1_train)
y1_predict = ab.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
"""
#GRADIENTBOOSTINGCLASSIFIER
print("GRADIENTBOOSTING_CLASSIFIER:")
GB=ske.GradientBoostingClassifier(n_estimators=50)
GB.fit(x_train, y_train)#fit may be called as 'trained'
GB_predict=GB.predict(x_test)
print(y_test.head())
print(GB_predict)
acc=r2_score(y_test,GB_predict)
accuracy = accuracy_score(y_test, GB_predict)
recall = recall_score(y_test, GB_predict , average='macro')
precision = precision_score(y_test,GB_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,GB_predict,average='macro')
print("Gradient Boosting Classifier (r2 score):-")
print(acc)
print("accuracy")
print(accuracy)
print("precision")
print(precision)
print("recall")
print(recall)
print("f1score")
print(f1)
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, y_predict))
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_predict))
print("Classification Report:")
print(classification_report(y_test, y_predict))
"""
#HISTOGRAMBOOSTINGCLASSIFIER
print("HISTOGRAMBOOSTING_CLASSIFIER:")
Hgb= HistGradientBoostingClassifier()
Hgb.fit(x_train,y_train)
hgb_predict=Hgb.predict(x_test)
#print(y_test.head())
#print(hgb_predict)
acc = r2_score(y_test,hgb_predict)
accuracy = accuracy_score(y_test, hgb_predict)
recall = recall_score(y_test, hgb_predict , average='macro')
precision = precision_score(y_test,hgb_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,hgb_predict,average='macro')
print("Histogram Gradient Boosting Classifier(r2_score):-")
print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, hgb_predict))
print("Classification Report:")
print(classification_report(y_test, hgb_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, hgb_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(Hgb, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
Hgb.fit(x1_train,y1_train)
y1_predict = Hgb.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
""""
#XGBoost is an implementation of gradient boosted decision trees designed for speed and performance.
from xgboost import XGBClassifier
xgb = XGBClassifier(objective='reg:squarederror')
xgb.fit(x_train,y_train)
xgb_predict=xgb.predict(x_test)
acc=r2_score(y_test,xgb_predict)
print("Extreme Gradient Boosting Classifier(r2_score):-")
print(acc)
"""
#GRADIENTBOOSTINGCLASSIFIER
print("GRADIENTBOOSTING_CLASSIFIER:")
GB=ske.GradientBoostingClassifier(n_estimators=50)
GB.fit(x_train, y_train)#fit may be called as 'trained'
GB_predict=GB.predict(x_test)
#print(y_test.head())
#print(GB_predict)
acc=r2_score(y_test,GB_predict)
accuracy = accuracy_score(y_test, GB_predict)
recall = recall_score(y_test, GB_predict , average='macro')
precision = precision_score(y_test,GB_predict , pos_label=1, average='macro', sample_weight=None, zero_division=0)
f1 = f1_score(y_test,GB_predict,average='macro')
print("Gradient Boosting Classifier (r2 score):-")
print(acc)
print("Accuracy:")
print(accuracy)
print("Precision:")
print(precision)
print("Recall:")
print(recall)
print("F1score:")
print(f1)
#print("Confusion Matrix:")
#print(confusion_matrix(y_test, GB_predict))
print("Confusion Matrix(Multilabel):")
print(sm.multilabel_confusion_matrix(y_test, GB_predict))
print("Classification Report:")
print(classification_report(y_test, GB_predict))
titles_options = [("Confusion matrix, without normalization", None),
("Normalized confusion matrix", 'true')]
for title, normalize in titles_options:
disp = plot_confusion_matrix(GB, x_test, y_test,
cmap=plt.cm.Blues,
normalize=normalize)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
plt.show()
#for binary classification
GB.fit(x1_train,y1_train)
y1_predict = GB.predict(x1_test)
TN, FP, FN, TP = confusion_matrix(y1_test,y1_predict).ravel()
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
FPR = FP/(FP+TN)
FNR = FN/(TP+FN)
print("True Positive Rate:")
print(TPR)
print("True Negative Rate:")
print(TNR)
print("False Positive Rate:")
print(FPR)
print("False Negative Rate:")
print(FNR)
|
import utils
import configuration , callbacks
import combos
import db
import gtk
try :
import hildon
except :
hildon = None
def cell_data_func (column, renderer, model, iter, user_data) :
value = model.get( iter, user_data[0] )[0]
if user_data[0] == configuration.column_dict["CONSUM"] or user_data[0] == configuration.column_dict["CO2EMISSION"] :
if (value*value) < 1e-10 :
renderer.set_property( "text", "-")
return
renderer.set_property( "text" , user_data[1] % value )
# Reworked, but completelly ported (except for gettext)
def get_column_header ( info , config ) :
format = info[3] or "%s"
label = config.unit_label( info[2] )
return format % label
class FuelpadModel ( gtk.TreeModelSort ) :
def __init__( self , config ) :
if not config.db.is_open() :
raise Exception( "There is no database available" )
store = gtk.ListStore(str, float, float, float, float, float , float , float , float , float , float , float , str, int, bool)
query = "SELECT day,km,trip,fill,consum,price,priceperlitre,service,oil,tires,notes,id FROM record WHERE carid=%d ORDER BY km" % config.db.currentcar
for row in config.db.get_rows( query ) :
date = utils.convdate( config.dateformat , None , row[0] )
convnotes = row[10]
trip , fill = utils.doubleornothing( row[2] ) , utils.doubleornothing( row[3] )
price , consum = utils.doubleornothing( row[5] ) , utils.doubleornothing( row[4] )
length , priceperlitre = utils.doubleornothing( row[1] ) , utils.doubleornothing( row[6] )
co2 = utils.calc_co2_emission( consum , config.db.fueltype() )
if price and trip :
pricepertrip = price / config.SIlength2user(trip)
else :
pricepertrip = 0
iter = store.append()
store.set( iter ,
configuration.column_dict['DAY'], date,
configuration.column_dict['KM'], config.SIlength2user(length),
configuration.column_dict['TRIP'], config.SIlength2user(trip),
configuration.column_dict['FILL'], config.SIvolume2user(fill),
configuration.column_dict['CONSUM'], config.SIconsumption2user(consum),
configuration.column_dict['PRICE'], price,
configuration.column_dict['PRICEPERTRIP'], pricepertrip,
configuration.column_dict['PRICEPERLITRE'], config.SIppl2user(priceperlitre),
configuration.column_dict['SERVICE'], utils.doubleornothing( row[7] ),
configuration.column_dict['OIL'], utils.doubleornothing( row[8] ),
configuration.column_dict['TIRES'], utils.doubleornothing( row[9] ),
configuration.column_dict['CO2EMISSION'], config.SIemission2user( co2 ),
configuration.column_dict['NOTES'], convnotes,
configuration.column_dict['ID'], row[11],
configuration.column_dict['VISIBLE'], True
)
gtk.TreeModelSort.__init__( self , store )
self.connect_object( "sort-column-changed", self.sort_change , config )
def sort_change( self , config ) :
colid , order = self.get_sort_column_id()
if colid is None :
config.maintablesorted = False
else :
config.maintablesorted = True
config.maintablesortcol = colid
config.maintablesortorder = int(order)
class FuelpadAbstractView :
def __init__ ( self , config ) :
attrs = configuration.font_attrs ( config.fontsize )
for info in configuration.column_info :
col = gtk.TreeViewColumn()
if info[6] : # column is showable
label = gtk.Label()
col.set_widget( label )
label.set_attributes( attrs )
label.show()
# pack cell renderer into tree view column
renderer = gtk.CellRendererText()
col.pack_start(renderer, True)
col.add_attribute(renderer, "text", info[0])
if info[4] :
col.set_cell_data_func( renderer , cell_data_func, ( info[0] , info[4] ) )
col.set_resizable( True )
renderer.set_property( "scale" , configuration.fontscalefactors[config.fontsize] )
col.set_sort_column_id( info[0] )
else :
col.set_visible( False )
# pack tree view column into tree view
self.append_column(col)
self.update_column_headers( config )
self.set_headers_visible( True )
select = self.get_selection()
select.set_mode( gtk.SELECTION_SINGLE )
def update_column_headers ( self , config ) :
for col in self.get_columns() :
colid = col.get_sort_column_id()
if colid != -1 :
colinfo = configuration.column_info[colid]
header = get_column_header( colinfo , config )
col.get_widget().set_text( header )
self.get_column(colinfo[0]).set_visible( config.dispcol & (1<<colinfo[0]) )
def update ( self , pui ) :
# Update the UI
while gtk.events_pending() :
gtk.main_iteration()
if pui.config.changed :
pui.stb_car.select_combo_item( pui.view.get_model() , pui.config.db )
pui.config.changed = False
self.set_model( FuelpadModel( pui.config ) )
self.update_column_headers( pui.config )
pui.update_totalkm()
class FuelpadAbstractWindow :
def __init__ ( self , config ) :
self.connect( "delete_event" , callbacks.delete_event , self )
self.mainfullscreen = False
self.config = config
self.warn = False
self.create_mainwin_widgets()
# Begin the main application
self.show_all()
self.toolbar_show_hide()
# Show or hide toolbars (with reversed logic)
def toolbar_show_hide ( self ) :
if self.config.main_toolbar_visible :
self.main_toolbar.show()
else :
self.main_toolbar.hide()
if self.config.secondary_toolbar_visible :
self.secondary_toolbar.show()
else :
self.secondary_toolbar.hide()
def create_mainwin_widgets ( self ) :
vbox = gtk.VBox(False, 0)
self.add( vbox )
self.create_mainwin_menu( vbox )
vbox.pack_start( self.datascrollwin, True , True , 0 )
self.view = FuelpadView( self.config )
self.datascrollwin.add( self.view )
self.create_mainwin_toolbar( )
self.create_secondary_toolbar( )
# Add toolbars
self.pack_toolbars( vbox )
self.enable_mainmenu_items()
vbox.show_all()
def create_mainwin_menu ( self , vbox ) :
self.main_menu.show_all()
def create_mainwin_toolbar ( self ) :
# Create toolbar
self.main_toolbar = gtk.Toolbar();
# Create toolbar button items
self.mtb_add = gtk.ToolButton( gtk.STOCK_ADD )
self.mtb_edit = gtk.ToolButton( gtk.STOCK_EDIT )
self.mtb_delete = gtk.ToolButton( gtk.STOCK_DELETE )
self.mtb_close = gtk.ToolButton( gtk.STOCK_QUIT )
self.mtb_add.set_expand( True )
self.mtb_edit.set_expand( True )
self.mtb_delete.set_expand( True )
self.mtb_close.set_expand( True )
# Add all items to toolbar
self.main_toolbar.insert( self.mtb_add, -1)
self.main_toolbar.insert( gtk.SeparatorToolItem(), -1)
self.main_toolbar.insert( self.mtb_edit, -1)
self.main_toolbar.insert( self.mtb_delete, -1)
self.main_toolbar.insert( gtk.SeparatorToolItem(), -1)
self.main_toolbar.insert( self.mtb_close, -1)
self.mtb_add.connect( "clicked" , callbacks.newrecord , self )
self.mtb_edit.connect( "clicked", callbacks.editrecord, self)
self.mtb_delete.connect( "clicked", callbacks.deleterecord, self)
self.mtb_close.connect( "clicked" , callbacks.delete_event , self )
def create_secondary_toolbar( self ) :
# Create toolbar
self.secondary_toolbar = gtk.Toolbar()
# Create toolbar items
attrs = configuration.font_attrs ( 1 )
# Car combo
self.stb_car = combos.FuelpadCarItem( self.config )
# Total distance
self.stb_totalkm = gtk.ToolItem()
self.stb_totalkmlabel = gtk.Label()
self.stb_totalkmlabel.set_selectable( True )
self.stb_totalkmlabel.set_attributes( attrs )
self.stb_totalkm.add( self.stb_totalkmlabel )
# Average consumption
self.stb_avgconsum = gtk.ToolItem()
self.stb_avgconsumlabel = gtk.Label()
self.stb_avgconsumlabel.set_selectable( True )
self.stb_avgconsumlabel.set_attributes( attrs )
self.stb_avgconsum.add( self.stb_avgconsumlabel )
# Total cost
self.stb_totcost = gtk.ToolItem()
self.stb_totcostlabel = gtk.Label()
self.stb_totcostlabel.set_selectable( True )
self.stb_totcostlabel.set_attributes( attrs )
self.stb_totcost.add( self.stb_totcostlabel )
self.update_totalkm()
self.stb_car.set_expand( False )
self.stb_totalkm.set_expand( True )
self.stb_avgconsum.set_expand( True )
self.stb_totcost.set_expand( True )
self.stb_add.set_expand( True )
# Add all items to toolbar
self.stb_car.add_to( self.secondary_toolbar , -1 )
self.secondary_toolbar.insert(self.stb_totalkm, -1);
self.secondary_toolbar.insert(self.stb_avgconsum, -1);
self.secondary_toolbar.insert(self.stb_totcost, -1);
self.secondary_toolbar.insert(self.stb_add, -1);
self.stb_car.set_action_callback( callbacks.car_apply_cb , self )
def enable_mainmenu_items( self ) :
dbopen = self.config.db.is_open()
self.mtb_add.set_sensitive( dbopen )
self.mtb_edit.set_sensitive( dbopen )
def update_totalkm ( self ) :
totalkm = self.config.db.totalkm(self.config.stbstattime)
str = "%.0f %s" % ( self.config.SIlength2user(totalkm) , self.config.unit_label( "length" ) )
self.stb_totalkmlabel.set_text( str )
totalfill = self.config.db.totalfill(self.config.stbstattime)
totalfillkm = self.config.db.totalfillkm(self.config.stbstattime)
if totalfillkm != 0.0 :
str = "%.1f %s" % ( self.config.SIconsumption2user(totalfill/totalfillkm*100) , self.config.unit_label( "consume" ) )
else :
str = "%.1f %s" % ( 0.0 , self.config.unit_label( "consume" ) )
self.stb_avgconsumlabel.set_text( str )
str = "%.0f %s" % ( self.config.db.totalcost() , self.config.currency )
self.stb_totcostlabel.set_text( str )
if hildon :
class FuelpadView ( hildon.GtkTreeView , FuelpadAbstractView ) :
def __init__ ( self , config ) :
config.main_toolbar_visible = False
config.secondary_toolbar_visible = True
model = FuelpadModel( config )
hildon.GtkTreeView.__init__( self , gtk.HILDON_UI_MODE_EDIT , model )
FuelpadAbstractView.__init__( self , config )
self.connect( "hildon-row-tapped" , callbacks.recordactivated )
self.taptime , self.taprow = -1 , -1
if config.maintablesorted :
self.get_model().set_sort_column_id( config.maintablesortcol , config.maintablesortorder )
class FuelpadWindow( hildon.StackableWindow , FuelpadAbstractWindow ) :
def __init__ ( self , config ) :
# Create the hildon program and setup the title
program = hildon.Program.get_instance()
gtk.set_application_name( "Fuelpad" )
# Create HildonWindow and set it to HildonProgram
hildon.StackableWindow.__init__( self )
program.add_window( self )
FuelpadAbstractWindow.__init__( self , config )
def create_mainwin_widgets ( self ) :
self.datascrollwin = hildon.PannableArea()
self.datascrollwin.set_property( "mov-mode" , hildon.MOVEMENT_MODE_BOTH )
FuelpadAbstractWindow.create_mainwin_widgets( self )
def pack_toolbars( self , widget=None ) :
self.add_toolbar( self.main_toolbar )
self.add_toolbar( self.secondary_toolbar )
def create_secondary_toolbar( self ) :
self.stb_add = gtk.ToolItem()
self.stb_addbutton = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_HORIZONTAL,
None,
"Add record")
image = gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_BUTTON)
self.stb_addbutton.set_image(image)
self.stb_add.add( self.stb_addbutton )
self.stb_addbutton.connect( "clicked" , callbacks.newrecord , self , True )
FuelpadAbstractWindow.create_secondary_toolbar( self )
def create_mainwin_menu ( self , vbox ) :
self.main_menu = hildon.AppMenu()
self.mm_item_new = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_VERTICAL,
"Add record",
None)
self.main_menu.append( self.mm_item_new )
self.mm_item_edit = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_VERTICAL,
"Edit record" ,
None)
self.main_menu.append( self.mm_item_edit )
self.mm_item_delete = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_VERTICAL,
"Delete record" ,
None)
self.main_menu.append( self.mm_item_delete )
self.mm_item_settings = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_VERTICAL,
"Settings",
None)
self.main_menu.append( self.mm_item_settings )
self.mm_item_about = hildon.Button(gtk.HILDON_SIZE_AUTO,
hildon.BUTTON_ARRANGEMENT_VERTICAL,
"About ..." ,
None)
self.main_menu.append( self.mm_item_about )
self.mm_item_new.connect( "clicked", callbacks.newrecord , self )
self.mm_item_edit.connect( "clicked", callbacks.editrecord , self )
self.mm_item_settings.connect( "clicked", callbacks.settings , self )
self.mm_item_delete.connect( "clicked", callbacks.deleterecord , self )
self.mm_item_about.connect( "clicked", callbacks.about , self )
self.set_app_menu( self.main_menu )
FuelpadAbstractWindow.create_mainwin_menu( self , vbox )
else :
class FuelpadView ( gtk.TreeView , FuelpadAbstractView ) :
def __init__ ( self , config ) :
model = FuelpadModel( config )
gtk.TreeView.__init__( self , model )
FuelpadAbstractView.__init__( self , config )
self.connect( "row-activated" , callbacks.recordactivated )
self.taptime , self.taprow = -1 , -1
class FuelpadWindow( gtk.Window , FuelpadAbstractWindow ) :
def __init__ ( self , config ) :
# Create the main window
gtk.Window.__init__( self , gtk.WINDOW_TOPLEVEL )
self.set_title( "fuelpad" )
# NOTE : temporary to get a decent window
self.set_size_request(640,480)
FuelpadAbstractWindow.__init__( self , config )
def create_mainwin_widgets ( self ) :
self.datascrollwin = gtk.ScrolledWindow( None , None )
FuelpadAbstractWindow.create_mainwin_widgets( self )
def pack_toolbars( self , widget ) :
widget.pack_start( self.main_toolbar , False , False , 5 )
widget.pack_start( self.secondary_toolbar , False , False , 5 )
def create_secondary_toolbar( self ) :
self.stb_add = gtk.ToolButton( gtk.STOCK_ADD )
self.stb_add.connect( "clicked" , callbacks.newrecord , self , True )
FuelpadAbstractWindow.create_secondary_toolbar( self )
def create_mainwin_menu ( self , vbox ) :
self.main_menu = gtk.Menu()
self.mm_menu_db = gtk.Menu()
self.mm_menu_record = gtk.Menu()
self.mm_menu_stat = gtk.Menu()
self.mm_menu_view = gtk.Menu()
self.mm_menu_toolbar = gtk.Menu()
self.mm_menu_fontsize = gtk.Menu()
# Create main menu items
self.mm_item_db = gtk.MenuItem( label="Database" )
self.mm_item_record = gtk.MenuItem( label="Record" )
self.mm_item_stat = gtk.MenuItem( label="Statistics" )
self.mm_item_alarm = gtk.MenuItem( label="Reminders..." )
self.mm_item_view = gtk.MenuItem( label="View" )
self.mm_item_settings = gtk.MenuItem( label="Settings..." )
self.mm_item_about = gtk.MenuItem( label="About" )
self.mm_item_exit = gtk.MenuItem( label="Exit" )
# Create database menu items
self.mm_item_open = gtk.MenuItem( label="Open..." )
self.mm_item_close = gtk.MenuItem( label="Close" )
self.mm_item_import = gtk.MenuItem( label="Import..." )
self.mm_item_export = gtk.MenuItem( label="Export..." )
# Create record menu items
self.mm_item_new = gtk.MenuItem( label="New" )
self.mm_item_edit = gtk.MenuItem( label="Edit" )
self.mm_item_delete = gtk.MenuItem( label="Delete" )
# Create statistics menu items
self.mm_item_quick = gtk.MenuItem( label="Quick" )
self.mm_item_monthly = gtk.MenuItem( label="Graphical" )
self.mm_item_report = gtk.MenuItem( label="Yearly report" )
# Create view menu items
self.main_menu_item_fullscreen = gtk.CheckMenuItem("Full screen" )
self.main_menu_item_fullscreen.set_active( self.mainfullscreen )
self.mm_item_toolbar = gtk.MenuItem( label="Toolbars" )
self.mm_item_fontsize = gtk.MenuItem( label="Table font size" )
self.mm_item_columnselect = gtk.MenuItem( label="Select columns..." )
self.mm_item_filter = gtk.MenuItem( label="Filter records..." )
# Create toolbar menu items
self.mm_item_toolbar_main = gtk.CheckMenuItem( "Buttons" )
self.mm_item_toolbar_main.set_active( self.config.main_toolbar_visible )
self.mm_item_toolbar_secondary = gtk.CheckMenuItem( "Information" )
self.mm_item_toolbar_secondary.set_active( self.config.secondary_toolbar_visible )
# Create fontsize menu items
radio_menu_group = self.mm_item_fontsize_x_small = gtk.RadioMenuItem( None , "Extra small" )
if self.config.fontsize == configuration.XSMALL :
self.mm_item_fontsize_x_small.set_active(True)
radio_menu_group = self.mm_item_fontsize_small = gtk.RadioMenuItem( radio_menu_group , "Small" )
if self.config.fontsize == configuration.SMALL :
self.mm_item_fontsize_small.set_active( True )
radio_menu_group = self.mm_item_fontsize_medium = gtk.RadioMenuItem( radio_menu_group , "Medium" )
if self.config.fontsize == configuration.MEDIUM :
self.mm_item_fontsize_medium.set_active( True )
radio_menu_group = self.mm_item_fontsize_large = gtk.RadioMenuItem( radio_menu_group , "Large" )
if self.config.fontsize == configuration.LARGE :
self.mm_item_fontsize_large.set_active( True )
# Add menu items to right menus
# Main menu
self.main_menu.append( self.mm_item_record )
self.main_menu.append( self.mm_item_stat )
self.main_menu.append( gtk.SeparatorMenuItem() )
self.main_menu.append( self.mm_item_alarm )
self.main_menu.append( gtk.SeparatorMenuItem() )
self.main_menu.append( self.mm_item_view )
self.main_menu.append( gtk.SeparatorMenuItem() )
self.main_menu.append( self.mm_item_settings )
self.main_menu.append( gtk.SeparatorMenuItem() )
self.main_menu.append( self.mm_item_about )
self.main_menu.append( self.mm_item_exit )
# Database menu
self.mm_menu_db.append( self.mm_item_open )
self.mm_menu_db.append( self.mm_item_close )
self.mm_menu_db.append( self.mm_item_import )
self.mm_menu_db.append( self.mm_item_export )
# Record menu
self.mm_menu_record.append( self.mm_item_new )
self.mm_menu_record.append( self.mm_item_edit )
self.mm_menu_record.append( self.mm_item_delete )
# Statistics menu
self.mm_menu_stat.append( self.mm_item_quick )
self.mm_menu_stat.append( self.mm_item_monthly )
self.mm_menu_stat.append( self.mm_item_report )
# View menu
self.mm_menu_view.append( self.main_menu_item_fullscreen )
self.mm_menu_view.append( self.mm_item_toolbar )
self.mm_menu_view.append( self.mm_item_fontsize )
self.mm_menu_view.append( self.mm_item_columnselect )
self.mm_menu_view.append( self.mm_item_filter )
# Toolbars menu
self.mm_menu_toolbar.append( self.mm_item_toolbar_main )
self.mm_menu_toolbar.append( self.mm_item_toolbar_secondary )
# Font size menu
self.mm_menu_fontsize.append( self.mm_item_fontsize_x_small )
self.mm_menu_fontsize.append( self.mm_item_fontsize_small )
self.mm_menu_fontsize.append( self.mm_item_fontsize_medium )
self.mm_menu_fontsize.append( self.mm_item_fontsize_large )
self.mm_menubar = gtk.MenuBar()
vbox.pack_start( self.mm_menubar, False, False, 2)
self.mm_menubar.show()
self.mm_item_db.set_submenu( self.mm_menu_db )
self.mm_menubar.append( self.mm_item_db )
self.mm_item_fuelpad = gtk.MenuItem( label="fuelpad" )
self.mm_item_fuelpad.show()
self.mm_item_fuelpad.set_submenu( self.main_menu )
self.mm_menubar.append( self.mm_item_fuelpad )
self.mm_item_record.set_submenu( self.mm_menu_record )
self.mm_item_stat.set_submenu( self.mm_menu_stat )
self.mm_item_view.set_submenu( self.mm_menu_view )
self.mm_item_toolbar.set_submenu( self.mm_menu_toolbar )
self.mm_item_fontsize.set_submenu( self.mm_menu_fontsize )
# Attach the callback functions to the activate signal
self.mm_item_settings.connect( "activate", callbacks.settings, self)
self.mm_item_about.connect( "activate", callbacks.about, self )
self.mm_item_exit.connect( "activate", callbacks.delete_event, self )
self.mm_item_new.connect( "activate" , callbacks.newrecord, self )
self.mm_item_edit.connect( "activate" , callbacks.editrecord, self )
self.mm_item_delete.connect( "activate" , callbacks.deleterecord, self )
self.main_menu_item_fullscreen.connect( "toggled" , callbacks.main_fullscreen , self )
self.mm_item_toolbar_main.connect( "toggled" , callbacks.main_toolbar , self )
self.mm_item_toolbar_secondary.connect( "toggled" , callbacks.secondary_toolbar , self )
self.mm_item_fontsize_x_small.connect( "toggled", callbacks.fontsize_x_small , self )
self.mm_item_fontsize_small.connect( "toggled" , callbacks.fontsize_small , self )
self.mm_item_fontsize_medium.connect( "toggled", callbacks.fontsize_medium , self )
self.mm_item_fontsize_large.connect( "toggled" , callbacks.fontsize_large , self )
FuelpadAbstractWindow.create_mainwin_menu( self , vbox )
def enable_mainmenu_items( self ) :
dbopen = self.config.db.is_open()
self.mm_item_new.set_sensitive( dbopen )
self.mm_item_edit.set_sensitive( dbopen )
self.mm_item_delete.set_sensitive( dbopen )
self.mm_item_monthly.set_sensitive( dbopen )
self.mm_item_report.set_sensitive( dbopen )
self.mm_item_close.set_sensitive( dbopen )
self.mm_item_import.set_sensitive( dbopen )
self.mm_item_export.set_sensitive( dbopen )
FuelpadAbstractWindow.enable_mainmenu_items( self )
def main_loop ( ) :
gtk.main()
|
from .basic import (IdDict, intervals, LazyProperty, logger, keydefaultdict,
natsorted, set_log_level)
from . import kit
from .system import caffeine
|
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from shoutout.models import organization, user, shoutout
from rest_framework.test import APIRequestFactory
class OrganizationTests(APITestCase):
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
# url = reverse('OrganizationList')
response = self.client.post('/v1/org/',
{
"name": "org3",
"slack_org_id": "wwddwdd",
"channel_name": "wddwwddw",
"channel_id": "dwwddww",
"access_token": "wdwdwdw",
"installation_date": "2019-09-23T11:49:49.858572Z",
"bot_access_token": "wdwdwwddw"
}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(organization.objects.count(), 1)
self.assertEqual(organization.objects.get().name, 'org3')
class UserTests(APITestCase):
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
self.client.post('/v1/org/',
{
"name": "org3",
"slack_org_id": "wwddwdd",
"channel_name": "wddwwddw",
"channel_id": "dwwddww",
"access_token": "wdwdwdw",
"installation_date": "2019-09-23T11:49:49.858572Z",
"bot_access_token": "wdwdwwddw"
}, format='json')
url = '/v1/user/'
data = {
"org_id": 1,
"slack_mem_id": "123432",
"email": "samar@samar.com",
"name": "samar",
"avatar": "google.com"
}
response = self.client.post('/v1/user/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.objects.count(), 1)
self.assertEqual(user.objects.get().name, 'samar')
class ShoutoutTests(APITestCase):
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
self.client.post('/v1/org/',
{
"name": "org3",
"slack_org_id": "wwddwdd",
"channel_name": "wddwwddw",
"channel_id": "dwwddww",
"access_token": "wdwdwdw",
"installation_date": "2019-09-23T11:49:49.858572Z",
"bot_access_token": "wdwdwwddw"
}, format='json')
url = '/v1/user/'
data = {
"org_id": 1,
"slack_mem_id": "123432",
"email": "samar@samar.com",
"name": "samar",
"avatar": "google.com"
}
response = self.client.post('/v1/user/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.objects.count(), 1)
self.assertEqual(user.objects.get().name, 'samar')
data = {
"org_id": 1,
"slack_mem_id": "123322432",
"email": "bobr@samar.com",
"name": "bob",
"avatar": "google.com"
}
response = self.client.post('/v1/user/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(user.objects.count(), 2)
data = {
"giver_id": 1,
"receiver_id": 2,
"message": "good boi",
"timestamps": "2019-09-23T10:10:51.768501Z",
"message_ts": "134554323413"
}
response = self.client.post('/v1/shoutout/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(shoutout.objects.count(), 1)
self.assertEqual(shoutout.objects.get().message, 'good boi')
|
# streamlit_app.py
import streamlit as st
import s3fs
import os
# Create connection object.
# `anon=False` means not anonymous, i.e. it uses access keys to pull data.
fs = s3fs.S3FileSystem(anon=False)
# Retrieve file contents.
# Uses st.cache to only rerun when the query changes or after 10 min.
@st.cache(ttl=600)
def read_file(filename):
with fs.open(filename) as f:
return f.read().decode("utf-8")
content = read_file("streamlitbucket-khhangau/myfile.csv")
# Print results.
for line in content.strip().split("\n"):
name, pet = line.split(",")
st.write(f"{name} has a :{pet}:")
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""The `BuildFileDefaultsParserState.set_defaults` is used by the pants.engine.internals.Parser,
exposed as the `__defaults__` BUILD file symbol.
When parsing a BUILD (from the rule `pants.engine.internals.build_files.parse_address_family`) the
defaults from the closest parent BUILD file is passed as input to the parser, and the new defaults
resulting after the BUILD file have been parsed is returned in the `AddressFamily`.
These defaults are then applied when creating the `TargetAdaptor` targets by the `Registrar` in the
parser.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Iterable, Mapping, Tuple, Union
from pants.engine.addresses import Address
from pants.engine.internals.parametrize import Parametrize
from pants.engine.target import (
Field,
ImmutableValue,
InvalidFieldException,
RegisteredTargetTypes,
Target,
TargetGenerator,
)
from pants.engine.unions import UnionMembership
from pants.util.frozendict import FrozenDict
SetDefaultsValueT = Mapping[str, Any]
SetDefaultsKeyT = Union[str, Tuple[str, ...]]
SetDefaultsT = Mapping[SetDefaultsKeyT, SetDefaultsValueT]
class BuildFileDefaults(FrozenDict[str, FrozenDict[str, ImmutableValue]]):
"""Map target types to default field values."""
class ParametrizeDefault(Parametrize):
"""Parametrize for default field values.
This is to have eager validation on the field values rather than erroring first when applied on
an actual target.
"""
@classmethod
def create(
cls, freeze: Callable[[Any], ImmutableValue], parametrize: Parametrize
) -> ParametrizeDefault:
return cls(
*map(freeze, parametrize.args),
**{kw: freeze(arg) for kw, arg in parametrize.kwargs.items()},
)
@dataclass
class BuildFileDefaultsParserState:
address: Address
defaults: dict[str, Mapping[str, Any]]
registered_target_types: RegisteredTargetTypes
union_membership: UnionMembership
@classmethod
def create(
cls,
path: str,
defaults: BuildFileDefaults,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
) -> BuildFileDefaultsParserState:
return cls(
address=Address(path, generated_name="__defaults__"),
defaults=dict(defaults),
registered_target_types=registered_target_types,
union_membership=union_membership,
)
def _freeze_field_value(self, field_type: type[Field], value: Any) -> ImmutableValue:
if isinstance(value, ParametrizeDefault):
return value
elif isinstance(value, Parametrize):
def freeze(v: Any) -> ImmutableValue:
return self._freeze_field_value(field_type, v)
return ParametrizeDefault.create(freeze, value)
else:
return field_type.compute_value(raw_value=value, address=self.address)
def get_frozen_defaults(self) -> BuildFileDefaults:
types = self.registered_target_types.aliases_to_types
return BuildFileDefaults(
{
target_alias: FrozenDict(
{
field_type.alias: self._freeze_field_value(field_type, default)
for field_alias, default in fields.items()
for field_type in self._target_type_field_types(types[target_alias])
if field_alias in (field_type.alias, field_type.deprecated_alias)
}
)
for target_alias, fields in self.defaults.items()
}
)
def get(self, target_alias: str) -> Mapping[str, Any]:
# Used by `pants.engine.internals.parser.Parser._generate_symbols.Registrar.__call__`
return self.defaults.get(target_alias, {})
def set_defaults(
self,
*args: SetDefaultsT,
all: SetDefaultsValueT | None = None,
extend: bool = False,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
) -> None:
defaults: dict[str, dict[str, Any]] = (
{} if not extend else {k: dict(v) for k, v in self.defaults.items()}
)
if all is not None:
self._process_defaults(
defaults,
{tuple(self.registered_target_types.aliases): all},
ignore_unknown_fields=True,
ignore_unknown_targets=ignore_unknown_targets,
)
for arg in args:
self._process_defaults(
defaults,
arg,
ignore_unknown_fields=ignore_unknown_fields,
ignore_unknown_targets=ignore_unknown_targets,
)
# Update with new defaults, dropping targets without any default values.
for tgt, default in defaults.items():
if not default:
self.defaults.pop(tgt, None)
else:
self.defaults[tgt] = default
def _target_type_field_types(self, target_type: type[Target]) -> tuple[type[Field], ...]:
return (
*target_type.class_field_types(self.union_membership),
*(target_type.moved_fields if issubclass(target_type, TargetGenerator) else ()),
)
def _process_defaults(
self,
defaults: dict[str, dict[str, Any]],
targets_defaults: SetDefaultsT,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
):
if not isinstance(targets_defaults, dict):
raise ValueError(
f"Expected dictionary mapping targets to default field values for {self.address} "
f"but got: {type(targets_defaults).__name__}."
)
types = self.registered_target_types.aliases_to_types
for target, default in targets_defaults.items():
if not isinstance(default, dict):
raise ValueError(
f"Invalid default field values in {self.address} for target type {target}, "
f"must be an `dict` but was {default!r} with type `{type(default).__name__}`."
)
targets: Iterable[str]
targets = target if isinstance(target, tuple) else (target,)
for target_alias in map(str, targets):
if target_alias in types:
target_type = types[target_alias]
elif ignore_unknown_targets:
continue
else:
raise ValueError(f"Unrecognized target type {target_alias} in {self.address}.")
# Copy default dict if we may mutate it.
raw_values = dict(default) if ignore_unknown_fields else default
# Validate that field exists on target
valid_field_aliases = set(
target_type._get_field_aliases_to_field_types(
self._target_type_field_types(target_type)
).keys()
)
for field_alias in default.keys():
if field_alias not in valid_field_aliases:
if ignore_unknown_fields:
del raw_values[field_alias]
else:
raise InvalidFieldException(
f"Unrecognized field `{field_alias}` for target {target_type.alias}. "
f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.",
)
# Merge all provided defaults for this call.
defaults.setdefault(target_type.alias, {}).update(raw_values)
|
class bioconductor:
def __init__(self):
# initialize logger
import logging
self.logger = logging.getLogger('metapath')
# check if python module 'rpy2' is available
try:
import rpy2.robjects as robjects
except:
self.logger.critical("could not import python module 'rpy2'")
quit()
self.robjects = robjects
self.r = robjects.r
def install(self, package_list = []):
# check if python module 'rpy2' is available
try:
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
except:
self.logger.critical("could not import python module 'rpy2'")
quit()
# evaluate bioconductor R script
base = importr('base')
base.source("http://www.bioconductor.org/biocLite.R")
bioclite = self.robjects.globalenv['biocLite']
# install bioconductor packages
if package_list == []:
bioclite()
else:
for package in package_list:
bioclite(package)
def csv_to_dict(self, file = None, header = False):
# check if python module 'csv' is available
try:
import csv
except:
self.logger.critical("could not import python module 'csv'")
quit()
# check if file is readable
try:
csvfile = open(file, "rb")
except:
self.logger.critical("could not open file '%s'" % (file))
quit()
# try to detect csv dialect
try:
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.DictReader(csvfile, dialect)
except:
csvfile.seek(0)
reader = csv.DictReader(csvfile, delimiter='\t', quotechar='\'')
return reader
def convert_geneids(self, input_list = [],
input_format = 'alias', output_format = 'entrezid',
input_file = None, output_file = None,
filter_results = False):
import sys
# make local copy of input list
list = input_list[:]
# prapare format strings
input_format = input_format.lower().strip()
output_format = output_format.lower().strip()
if not list:
if not input_file:
self.logger.critical("you have to specify at least one of the parameters: 'list', 'input_file'")
quit()
self.csv_to_dict(input_file)
#
# convert using annotation packages in bioconductor
#
annotation_packages = [
"hgu95a",
"hgu95av2",
"hgu95b",
"hgu95c",
"hgu95d",
"hgu95e",
"hgu133a",
"hgu133a2",
"hgu133b",
"hgu133plus2",
"hgug4100a",
"hgug4101a",
"hgug4110b",
"hgug4111a",
"hgug4112a",
"hguqiagenv3"
]
original_stdout = sys.stdout
if input_format in annotation_packages:
# load bioconductor annotation package
self.logger.info("sending command to R: library('%s.db')" % (input_format))
try:
sys.stdout = NullDevice()
self.r.library("%s.db" % (input_format))
sys.stdout = original_stdout
except:
sys.stdout = original_stdout
self.logger.critical("you have to install the R/bioconductor package: '%s.db'" % (input_format))
quit()
# get listvector
self.logger.info("sending command to R: x <- %s%s" % (input_format, output_format.upper()))
try:
sys.stdout = NullDevice()
self.r('x <- %s%s' % (input_format, output_format.upper()))
sys.stdout = original_stdout
except:
sys.stdout = original_stdout
self.logger.critical("output format '%s' is not supported by '%s.db'" % (output_format, input_format))
quit()
self.logger.info("sending command to R: mapped_genes <- mappedkeys(x)")
self.r('mapped_genes <- mappedkeys(x)')
self.logger.info("sending command to R: listmap <- as.list(x[mapped_genes])")
self.r('listmap <- as.list(x[mapped_genes])')
# prepare search list
search_list = []
for a in list:
if a[0] == 'X':
a = a[1:]
search_list.append(a)
elif input_format in ['entrezgeneid', 'entrezgene', 'entrezid', 'entrez']:
# load bioconductor annotation package
self.logger.info("sending command to R: library('org.Hs.eg.db')")
try:
sys.stdout = NullDevice()
self.r.library("org.Hs.eg.db")
sys.stdout = original_stdout
except:
sys.stdout = original_stdout
self.logger.critical("you have to install the R/bioconductor package: 'org.Hs.eg.db'")
quit()
# get listvector
self.logger.info("sending command to R: x <- org.Hs.eg%s" % (output_format.upper()))
try:
self.r('x <- org.Hs.eg%s' % (output_format.upper()))
except:
self.logger.critical("output format '%s' is not supported by 'org.Hs.eg.db'" % (output_format))
quit()
self.logger.info("sending command to R: mapped_genes <- mappedkeys(x)")
self.r('mapped_genes <- mappedkeys(x)')
self.logger.info("sending command to R: listmap <- as.list(x[mapped_genes])")
self.r('listmap <- as.list(x[mapped_genes])')
# prepare search list
search_list = list
elif output_format in ['entrezgeneid', 'entrezgene', 'entrezid', 'entrez']:
# load bioconductor annotation package
self.logger.info("sending command to R: library('org.Hs.eg.db')")
try:
sys.stdout = NullDevice()
self.r.library("org.Hs.eg.db")
sys.stdout = original_stdout
except:
sys.stdout = original_stdout
self.logger.critical("you have to install the R/bioconductor package: 'org.Hs.eg.db'")
quit()
# get listvector
self.logger.info("sending command to R: x <- org.Hs.eg%s2EG" % (input_format.upper()))
try:
self.r('x <- org.Hs.eg%s2EG' % (input_format.upper()))
except:
self.logger.critical("input format '%s' is not supported by 'org.Hs.eg.db'" % (input_format))
quit()
self.logger.info("sending command to R: mapped_genes <- mappedkeys(x)")
self.r('mapped_genes <- mappedkeys(x)')
self.logger.info("sending command to R: listmap <- as.list(x[mapped_genes])")
self.r('listmap <- as.list(x[mapped_genes])')
# prepare search list
search_list = list
else:
self.logger.critical("conversion from '%s' to '%s' is not supported" % \
(input_format, output_format))
quit()
# search listvector
black_list = []
for i, a in enumerate(search_list):
try:
self.r("sym <- listmap['%s']" % (a))
self.r("sym <- unlist(sym)")
list[i] = self.robjects.globalenv["sym"][0]
found = True
except:
black_list.append(list[i])
# filter results
if filter_results:
list = [item for item in list if item not in black_list]
return list, black_list
class NullDevice():
def write(self, s):
pass
def flush(self, s):
pass
|
import torch
from torch import nn
from .metric import Metric
class Accuracy(Metric):
def __init__(self, name="accuracy", dtype=None,
reduction="sum", **kwargs):
super().__init__(name, dtype, **kwargs)
assert reduction in {"sum", "mean", "max", "min"}
# TODO: more reduction
self.reduction = reduction
def forward(self, y_true, y_pred,
out_weight=None):
return self.update_state(y_true, y_pred,
out_weight=out_weight)
@torch.no_grad()
def update_state(self, y_true, y_pred,
out_weight=None):
if out_weight is not None:
# TODO
raise NotImplementedError("out_weight")
if y_pred.ndim == 2:
y_pred = y_pred.argmax(1)
if y_true.ndim ==2:
y_true = y_true.argmax(1)
self.correct += torch.sum(y_pred == y_true)
self.total += y_true.numel()
def reset_states(self):
# K.batch_set_value([(v, 0) for v in self.variables])
self.total = torch.tensor(0)
self.correct = torch.tensor(0)
def result(self):
return (self.correct.float() / self.total).detach()
|
# coding=utf-8
import sys
from ualfred import Workflow3, notify
log = None
def main(wf):
import pickle
from data import Data
from ualfred import web
city = wf.stored_data('cy-city')
api_key = wf.get_password('apiKey')
if city is None:
wf.add_item('请通过cy-opt 设置所在城市')
wf.send_feedback()
return
if api_key is None:
api_key = 'TAkhjf8d1nlSlspN'
city_name = city[1]
url = 'https://api.caiyunapp.com/v2/' + api_key + '/' + city[2] + ',' + city[3] + '/forecast.json'
log.debug(url)
r = web.get(url)
data = r.json()
log.debug(data)
if 'ok' == data['status']:
result = data['result']
wf.add_item(subtitle=city_name + '未来24小时天气预报', title=result['hourly']['description'])
wf.add_item(title=result['minutely']['description'])
daily = result['daily']
for i in range(1, 5):
skycon = daily['skycon'][i]
temp = daily['temperature'][i]
wind = daily['wind'][i]
ultraviolet = daily['ultraviolet'][i]
aqi = daily['aqi'][i]
subtitle = city_name + skycon['date'] + '天气预报'
item = Data.weather_dict.get(skycon['value'])
# add first
title = item.get('name')
# add temperature
title += '\t温度:' + str(temp['max']) + '°~' + str(temp['min']) + '° \t'
# add wind
title += Data.get_wind_direction(wind['avg']['direction']) + Data.get_wind_speed(wind['avg']['speed'])
# add ultraviolet
title += '\t紫外线:' + ultraviolet['desc']
# add aqi
title += '\tAQI:' + str(aqi['min']) + '~' + str(aqi['max'])
wf.add_item(subtitle=subtitle, title=title, icon=item.get('icon'), copytext=subtitle + ':' + title)
wf.send_feedback()
if __name__ == '__main__':
# Create a global `Workflow` object
wf = Workflow3()
# Call your entry function via `Workflow.run()` to enable its helper
# functions, like exception catching, ARGV normalization, magic
# arguments etc.
log = wf.logger
sys.exit(wf.run(main))
|
# -*- coding: utf-8 -*-
# @File : paint_tree.py
# @Author : jianhuChen
# @Date : 2018-12-29 14:19:07
# @License : Copyright(C), USTC
# @Last Modified by : jianhuChen
# @Last Modified time: 2018-12-29 14:44:07
import numpy as np
from sklearn import neighbors
from sklearn import datasets
from sklearn.model_selection import train_test_split # 用于从数据中划分测试集
# 得到数据集
iris = datasets.load_iris()
X, y = iris.data, iris.target
# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, shuffle=True)
# 定义分类器并传入训练集
knn = neighbors.KNeighborsClassifier()
knn.fit(X_train, y_train)
# 预测
y_pred = knn.predict(X_test)
# 计算正确率
accuracy = np.sum(y_test == y_pred)/len(y_test)
print("accuracy = ", accuracy)
|
names = ['Lucy', 'Frank', 'Ann', 'Garry', 'Frank', 'Alex', 'Penny', 'Garry', 'Tom', 'Frank']
Emails = ['josh', 'alex', 'lord', 'marry', 'penny', 'dog']
Magazine = ['alex', 'frank', 'harry', 'yourCrash', 'lord']
setNames = set(names)
print(setNames)
witoutDuplicates = []
for name in names:
if (not name in witoutDuplicates):
witoutDuplicates.append(name)
print(witoutDuplicates)
emails = set(Emails)
magazine = set(Magazine)
print(emails.difference(magazine))
print(magazine.difference(emails))
print(emails.intersection(magazine))
|
# -*- coding: utf-8 -*-
import wx
from wx.html2 import WebView
class MyTestFrame(wx.Frame):
def __init__(self, parent, title):
super().__init__(parent, wx.ID_ANY, title, size=(1200, 700))
bSizer9 = wx.BoxSizer(wx.VERTICAL)
self._browser = WebView.New(self)
bSizer9.Add(self._browser, 1, wx.ALL | wx.EXPAND, 5) # widget, proportion, flags, border
bSizer8 = wx.BoxSizer(wx.HORIZONTAL)
for i in range(5):
btn = wx.Button(self, wx.ID_OK, f"Btn{i}", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer8.Add(btn, 0, wx.ALL | wx.EXPAND, 5)
self.Bind(wx.EVT_BUTTON, self.OnButtonClick, btn)
bSizer9.Add(bSizer8, 0)
self.SetSizer(bSizer9)
self.Layout()
# self._browser.LoadURL('http://pynsource.com')
self._browser.LoadURL('http://google.com')
self.Show()
def OnButtonClick(self, event):
button = event.GetEventObject() # get the button that was clicked
wx.MessageBox(f"Hi from a button click {button.GetLabel()}")
if __name__ == '__main__':
app = wx.App()
frame = MyTestFrame(None, 'Test')
app.MainLoop()
|
from flask import Blueprint, request, redirect, url_for
from flask.templating import render_template
from sqlalchemy.sql.operators import nullsfirst_op
from database.models import Alimenti, Log
from database.db import db
from datetime import datetime
main = Blueprint('main', __name__)
@main.route("/")
def home():
return render_template("layout.html")
@main.route('/create_log', methods=['POST'])
def create_log():
date = request.form.get('date')
log = Log(date=datetime.strptime(date, '%Y-%m-%d'))
db.session.add(log)
db.session.commit()
return redirect(url_for('main.view', log_id=log.id))
@main.route('/add', methods=['GET'])
def add():
foods = Alimenti.query.all()
return render_template('add.html', foods=foods, food=None)
@main.route('/add', methods=['POST'])
def add_post():
food_name = request.form.get('food-name')
proteins = request.form.get('protein')
carbs = request.form.get('carbohydrates')
fats = request.form.get('fat')
food_id = request.form.get('food-id')
if food_id:
food = Alimenti.query.get_or_404(food_id)
food.name= food_name
food.proteins= proteins
food.carbs= carbs
food.fats= fats
else:
new_Food = Alimenti(
name=food_name,
proteins=proteins,
carbs=carbs, fats=fats
)
db.session.add(new_Food)
db.session.commit()
return redirect(url_for('main.add'))
@main.route('/delete_food/<int:food_id>')
def delete_food(food_id):
food = Alimenti.query.get_or_404(food_id)
db.session.delete(food)
db.session.commit()
return redirect(url_for('main.add'))
@main.route('/edit_food/<int:food_id>')
def edit_food(food_id):
food = Alimenti.query.get_or_404(food_id)
foods = Alimenti.query.all()
return render_template('add.html', food=food, foods=foods)
@main.route("/view/<int:log_id>")
def view(log_id):
log = Log.query.get_or_404(log_id)
foods = Alimenti.query.all()
totali = {
'proteins' : 0,
'carbs' : 0,
'fats' : 0,
'calories' : 0
}
for food in log.foods:
totali['proteins'] += food.proteins
totali['carbs'] += food.carbs
totali['fats'] += food.fats
totali['calories'] += food.calories
return render_template("view.html", foods=foods, log=log, totali=totali)
@main.route("/add_food_to_log/<int:log_id>", methods=['POST'])
def add_food_to_log(log_id):
log = Log.query.get_or_404(log_id)
selected_food = request.form.get('food-select')
food = Alimenti.query.get(int(selected_food))
log.foods.append(food)
db.session.commit()
return redirect(url_for('main.view', log_id=log_id))
@main.route("/remove_food_from_log/<int:log_id>/<int:food_id>", methods=['GET'])
def remove_food_from_log(log_id, food_id):
log = Log.query.get(log_id)
food = Alimenti.query.get(food_id)
log.foods.remove(food)
db.session.commit()
return redirect(url_for('main.view', log_id=log_id))
|
"""Entry point for treadmill manage ecosystem.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import click
import pkg_resources
from treadmill import cli, plugin_manager, utils
_LOGGER = logging.getLogger(__name__)
def make_manage_multi_command(module_name, **click_args):
"""Make a Click multicommand from all submodules of the module."""
commands = cli.make_commands(module_name, **click_args)
class MCommand(click.MultiCommand):
"""Treadmill CLI driver."""
def __init__(self, *args, **kwargs):
self.commands = commands(*args, **kwargs)
if kwargs and click_args:
kwargs.update(click_args)
click.MultiCommand.__init__(self, *args, **kwargs)
def list_commands(self, ctx):
return sorted(set(self.commands.list_commands(ctx)))
def invoke(self, ctx):
"""
invoke the command in a subprocess if it is executable
otherwise use it in process
"""
name = ctx.protected_args[0]
try:
module = plugin_manager.load(module_name, name)
except KeyError:
return super(MCommand, self).invoke(ctx)
module_path = module.__file__
if module_path.endswith('pyc'):
module_path = module_path[:-1]
# shebang doesn't work on windows
# we use .cmd or a hardcoded default interpreter
if os.name == 'nt':
nt_path = module_path[:-2] + 'cmd'
if os.path.exists(nt_path):
os.execvp(nt_path, [nt_path] + ctx.args)
else:
_LOGGER.critical(
"%s cli is not supported on windows", name)
else:
is_exec = os.access(module_path, os.X_OK)
if not is_exec:
return super(MCommand, self).invoke(ctx)
utils.sane_execvp(module_path,
[os.path.basename(module_path)] + ctx.args)
def get_command(self, ctx, cmd_name):
return self.commands.get_command(ctx, cmd_name)
def format_commands(self, ctx, formatter):
rows = []
for subcommand in self.list_commands(ctx):
entry_points = list(pkg_resources.iter_entry_points(
module_name, subcommand))
# Try get the help with importlib if entry_point not found
dist = entry_points[0].dist
if dist.has_metadata('cli_help'):
help_text = dist.get_metadata('cli_help')
else:
help_text = ''
rows.append((subcommand, help_text))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
return MCommand
def init():
"""Return top level command handler."""
@click.group(cls=make_manage_multi_command('treadmill.cli.manage'))
def manage():
"""Manage applications."""
pass
return manage
|
n = int(input("enter a n value :")) #n means number of blood banks
blood_bank_name = {}
for i in range(n):
keys1 = int(input()) #keys1 means index of blood bank name
values1 = input() #values1 means name of blood bank
blood_bank_name[keys1] = values1
print(blood_bank_name)
areas1 = {}
for i in range(n):
keys2 = int(input()) #keys2 means index of blood bank name
values2 = input() # values2 means area of blood bank
areas1[keys2] = values2
print(areas1)
m = int(input("enter a m value :")) #m means number of blood donation camps
blood_donation_camps = {}
areas2 = {}
for i in range(m):
keys3= int(input()) #keys3 means index of blood donation camps
values3 = input() #values3 means name of blood donation camps
blood_donation_camps[keys3] = values3
values4 = input() #values4 means area of blood donation camps
areas2[keys3] = values4
print(blood_donation_camps)
print(areas2)
blood_donor_details = {}
areas3 = {}
y = int(input("enter a y value :")) #y means number of donors
for i in range(y):
keys4 = input() #keys4 means name of donor
values5 = input() #values5 means blood group of donor
blood_donor_details[keys4] = values5
values6 = input() #values6 means area of donor
areas3[keys4] = values6
print(blood_donor_details)
print(areas3)
b = int(input("enter a b value :")) #b means number of users
user = {}
user_blood_group = {}
for i in range(b):
keys5 = int(input()) #keys5 means index of username
values7 = input() #values7 means name of user
values8 = input() #values8 means blood group of user
user[keys5] = values7
user_blood_group[values7] = values8
print(user)
print(user_blood_group)
c = int(input("enter a c value :")) #c means number of blood camps
check_blood_camps = {}
check_blood_camps_area = {}
for i in range(c):
keys6 = keys3 #keys6 means index of blood donation camps
values9 = values3 #values9 means name of blood donation camps
values10 = values4 #values10 means area of blood donation camps
check_blood_camps[keys6] = values9
check_blood_camps_area[keys6] = values10
print(check_blood_camps)
print(check_blood_camps_area)
check_blood_bank = {}
check_blood_bank_area = {}
for i in range(c):
keys7 = keys1 #keys7 means index of blood bank name
values11 = values1 #values11 means name of blood bank
values12 = values2 #values12 means area of blood bank
check_blood_bank[keys7] = values11
check_blood_bank_area[values12] = values12
print(check_blood_bank)
print(check_blood_bank_area)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 08:43:41 2019
@author: meiying
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import seaborn as sns
from sklearn.externals.joblib import load
import numpy as np
data = pd.read_excel('FoodDatabase v1.8.xlsx', sheet_name='AllFood-FoodType')
data = data[(data['Cuisine'].isin(['chinese', 'malay', 'indian', 'western']) & (data['Opta Type'] == 'Main') & (data['Analysis'] == 1))]
data = data.drop(['Analysis', 'MealPlanner', 'HasBeef', 'IsCaffeinated', 'Multiplier', 'Carbohydrates %', 'Protein %', 'Fats %'], axis=1)
data['weight'] = data['Per Serving Household Measure'].str.extract('.*?(\(([0-9]*?) g\))')[1].astype('float')
data.describe()
data['pct_protein'] = data['Protein (g)'] * 4 / data['Energy'] * 100
data['pct_fat'] = data['Fats (g)'] * 9 / data['Energy'] * 100
data['pct_carb'] = data['Carbohydrates (g)'] * 4 / data['Energy'] * 100
sum_corr = data.corr().sum().sort_values(ascending=True).index.values
corr = data[sum_corr].corr()
plt.figure(figsize=(13, 8))
sns.heatmap(corr, annot=True, cmap='Greens');
data_new = data[['pct_protein', 'pct_fat', 'pct_carb', 'Sugar (g)', 'Dietary Fibre (g)', 'Sodium (mg)', 'Energy']]
data_new.dropna(axis=0, inplace=True)
data_new_std = StandardScaler().fit_transform(data_new)
sse = {}
silhouette_avg = {}
for k in range(2,9):
kmeans = KMeans(n_clusters = k, random_state = 0)
kmeans.fit(data_new_std)
sse[k] = kmeans.inertia_
cluster_labels = kmeans.labels_
silhouette_avg[k] = silhouette_score(data_new_std, cluster_labels)
print("For n_clusters =", k,
"The average silhouette_score is :", silhouette_avg[k])
plt.figure()
#plt.title('The Elbow Method')
plt.xlabel('No. of clusters')
plt.ylabel('Sum of square error')
sns.pointplot(list(sse.keys()), list(sse.values()))
plt.show()
best_k = [i for i,j in silhouette_avg.items() if j == max(list(silhouette_avg.values()))].pop()
print("The average silhouette score is highest when there are " + str(best_k) + " clusters.")
kmeans = KMeans(n_clusters = 5, random_state = 0)
kmeans.fit(data_new_std)
cluster_labels = kmeans.labels_
data_new_k = data_new.assign(cluster=cluster_labels, cuisine=data['Cuisine'])
grouped = data_new_k.groupby(['cluster']).agg({
'pct_protein': 'mean',
'pct_fat': 'mean',
'pct_carb': 'mean',
'Sugar (g)': 'mean',
'Dietary Fibre (g)': 'mean',
'Sodium (mg)': 'mean',
'Energy': 'mean'}).round(2)
count = data_new_k.groupby(['cluster']).count()
from scipy.stats import sem, t
confidence = 0.95
grouped_sem = data_new_k.groupby(['cluster']).sem() * t.ppf((1 + confidence) / 2, count - 1)
#------------------------------------------------------------------------------
pct_protein = list(grouped['pct_protein'])
pct_fat = list(grouped['pct_fat'])
pct_carb = list(grouped['pct_carb'])
protein_sem = list(grouped_sem['pct_protein'])
fat_sem = list(grouped_sem['pct_fat'])
carb_sem = list(grouped_sem['pct_carb'])
components_dict = {
'energy':[grouped['Energy'], grouped_sem['Energy'], 'Energy (kcal)'],
'sugar':[grouped['Sugar (g)'], grouped_sem['Sugar (g)'], 'Amount of sugar (g)'],
'dietary_fibre':[grouped['Dietary Fibre (g)'], grouped_sem['Sugar (g)'], 'Amount of dietary fibre (g)'],
'sodium':[grouped['Sodium (mg)'], grouped_sem['Sodium (mg)'], 'Sodium (mg)']
}
barwidth = 0.25
# set height of bar
barwidth = 0.25
cap = 5
# Set position of bar on X axis
r1 = np.arange(len(pct_protein))
r2 = [x + barwidth for x in r1]
r3 = [x + barwidth for x in r2]
# Make the plot
plt.bar(r1, pct_protein, yerr=protein_sem, color='yellow', capsize=cap, width=barwidth, label='protein')
plt.bar([x + barwidth for x in r1], pct_fat, yerr=fat_sem, capsize=cap, color='orange', width=barwidth, label='fat')
plt.bar([x + barwidth for x in r2], pct_carb, yerr=carb_sem, capsize=cap, color='red', width=barwidth, label='carbohydrates')
# Add xticks on the middle of the group bars
plt.xlabel('Cluster')
plt.ylabel('Percentage')
plt.xticks([r + barwidth for r in range(len(pct_protein))], r1 + 1)
# Create legend & Show graphic
plt.legend(loc='upper right', bbox_to_anchor=(1.38, 1))
plt.show()
#------------------------------------------------------------------------------
components = list(components_dict.keys())
new_r1 = r1 * 0.5
for i in range(len(components_dict)):
plt.subplot(2, 2, i+1)
plt.bar(new_r1, list(components_dict[components[i]][0]), yerr=list(components_dict[components[i]][1]), capsize=cap, color='orange', width=barwidth)
plt.xlabel('Cluster')
plt.ylabel(components_dict[components[i]][2])
plt.xticks(new_r1, r1+1)
plt.tight_layout()
import matplotlib.patches as mpatches
# data
cuisine_clusters_no = pd.pivot_table(data_new_k, index='cuisine', columns='cluster', aggfunc=len).iloc[:,0:5]
cuisine_clusters_no.fillna(0, inplace=True)
cuisine_clusters = cuisine_clusters_no.div(cuisine_clusters_no.sum(axis=1), axis=0) * 100
new_index = ['chinese', 'malay', 'indian', 'western']
cuisine_clusters = cuisine_clusters.reindex(index=new_index)
cluster1 = list(cuisine_clusters.iloc[:,0].astype(int))
cluster2 = list(cuisine_clusters.iloc[:,1].astype(int))
cluster3 = list(cuisine_clusters.iloc[:,2].astype(int))
cluster4 = list(cuisine_clusters.iloc[:,3].astype(int))
cluster5 = list(cuisine_clusters.iloc[:,4].astype(int))
cluster_high_fat_sodium_sugar = list(cuisine_clusters.iloc[:,2].astype(int))
x = np.arange(len(cluster1))
# plot
new_x = x * 0.5
plt.figure(figsize=(5, 3))
plt.bar(new_x, cluster1, width=barwidth, color='yellow')
plt.bar(new_x, cluster2, width=barwidth, color='red', bottom=cluster1)
plt.bar(new_x, cluster3, width=barwidth, color='orange', bottom=list(map(lambda w,x: w+x, cluster1, cluster2)))
plt.bar(new_x, cluster4, width=barwidth, color='green', bottom=list(map(lambda w,x,y: w+x+y, cluster1, cluster2, cluster3)))
plt.bar(new_x, cluster5, width=barwidth, color='blue', bottom=list(map(lambda w,x,y,z: w+x+y+z, cluster1, cluster2, cluster3, cluster4)))
# labels
plt.xticks(new_x, [i.capitalize() for i in new_index])
# plt.yticks(numpy.arange(10))
# plt.grid(axis='y')
plt.xlabel('Cuisine')
plt.ylabel('Percentage (%)')
# legend
green_patch = mpatches.Patch(color='green', label='high protein (cluster 4)')
blue_patch = mpatches.Patch(color='blue', label='high carbohydrates (cluster 5)')
yellow_patch = mpatches.Patch(color='yellow', label='high fat (cluster 1)')
orange_patch = mpatches.Patch(color='orange', label='high sugar and sodium (cluster 3)')
red_patch = mpatches.Patch(color='red', label='high fat, sugar, sodium and calories (cluster 2)')
plt.legend(handles=[yellow_patch, red_patch, orange_patch, green_patch, blue_patch], loc='best', bbox_to_anchor=(2, 1))
plt.show()
|
from elk.libs.db.mysql import db
class Users(object):
_table = 'users'
def __init__(
self, id_, name, passwd, email, role,
alert, deleted, created, updated):
self.id_ = str(id_)
self.name = name
self.passwd = passwd
self.email = email
self.role = role
self.alert = alert
self.deleted = deleted
self.created = created
self.updated = updated
def dump(self):
req = dict(
id = self.id_,
name = self.name,
passwd = self.passwd,
email = self.email,
role = self.role,
alert = self.alert,
deleted = self.deleted,
created = self.created,
updated = self.updated)
return req
@classmethod
def list(cls):
sql = ("select id as id_, name, passwd, email, role, "
"alert, deleted, created, updated "
"from {table}").format(table=cls._table)
rs = db.execute(sql).fetchall()
db.commit()
return [cls(*line) for line in rs] if rs else []
@classmethod
def get_passwd_by_name(cls, name):
sql = ("select id, name, passwd, role from "
"{table} where name=:name and deleted=0").format(table=cls._table)
params = dict(name = name)
rs = db.execute(sql, params=params).fetchone()
db.commit()
return rs if rs else ''
@classmethod
def get_id_by_name(cls, name):
sql = ("select id from "
"{table} where name=:name").format(table=cls._table)
params = dict(name = name)
rs = db.execute(sql, params=params).fetchone()
db.commit()
return rs if rs else ''
@classmethod
def get_name_by_id(cls, id):
sql = ("select id, name, passwd, role from "
"{table} where id=:id and deleted=0").format(table=cls._table)
params = dict(id = id)
rs = db.execute(sql, params=params).fetchone()
db.commit()
return rs if rs else ''
@classmethod
def add(cls, insert_dict):
mysql_table = cls._table
sql = ('insert into {table} '
'(name, passwd, email, role, alert, deleted) '
'values (:name, :passwd, :email, :role, :alert, :deleted) '
).format(table=mysql_table)
params = insert_dict
id_ = db.execute(sql, params=params).lastrowid
if not id_:
db.rollback()
return
db.commit()
return str(id_)
@classmethod
def update(cls, update_dict):
mysql_table = cls._table
sql = ('update {table} set name=:name, passwd=:passwd, '
'email=:email, alert=:alert, deleted=:deleted '
'where id=:id').format(table=mysql_table)
params = update_dict
db.execute(sql, params=params).lastrowid
db.commit()
return
@classmethod
def delete(cls, delete_id):
sql = 'delete from {table} where id=:id_'.format(
table=cls._table)
params = dict(id_=delete_id)
db.execute(sql, params)
db.commit()
return delete_id
|
import translator
from .. import utils
class ASCIITranslator(translator.Translator):
"""Simple ASCII translation using unichr"""
def parseInput(self, cipher):
return map(int, utils.split(str(cipher)))
def translate(self, cipher):
return "".join([unichr(i) for i in self.parseInput(cipher)])
def encode(self, cipher):
return " ".join([str(ord(i)) for i in cipher])
|
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.urls import reverse
from django.views import generic
from communities.models import Community, CommunityMember
from django.contrib import messages
from . import models
# Create your views here.
class CreateCommunity(LoginRequiredMixin,generic.CreateView):
fields = ('name','description')
model = Community
class SingleCommunity(generic.DetailView):
model = Community
class ListCommunities(generic.ListView):
model = Community
class JoinCommunity(LoginRequiredMixin,generic.RedirectView):
def get_redirect_url(self,*args,**kwargs):
return reverse('communities:single', kwargs={'slug':self.kwargs.get('slug')})
def get(self,request,*args,**kwargs):
community = get_object_or_404(Community, slug=self.kwargs.get('slug'))
try:
CommunityMember.object.create(user=self.request.user, community=community)
except:
messages.warning(self.request,'You are already a member of this Community!')
else:
messages.success(self.request,'You are now a member of this Community')
return super().get(request,*args,**kwargs)
class LeaveCommunity(LoginRequiredMixin,generic.RedirectView):
def get_redirect_url(self,*args,**kwargs):
return reverse('communities:single', kwargs={'slug':self.kwargs.get('slug')})
def get(self,request,*args,**kwargs):
try:
membership = models.CommunityMember.objects.filter(
user = self.request.user,
community__slug = self.kwargs.get('slug')
).get()
except models.CommunityMember.DoesNotExist:
messages.warning(self.request,'You are not a member of this Community!')
else:
membership.delete()
messages.success(self.request,'You have left the Community!')
return super().get(request,*args,**kwargs)
|
import os
import json
import sys
from troposphere import Ref, Template, Parameter, GetAtt, serverless, awslambda, stepfunctions, Sub, events, Output
MONGODB_CONNECTION_STRING_PARAMETER = 'MongoDbConnectionString'
REDIS_HOST = 'RedisHost'
REDIS_PORT = 'RedisPort'
REDIS_PASSWORD = 'RedisPassword'
def add_parameters(template):
template.add_parameter(Parameter(MONGODB_CONNECTION_STRING_PARAMETER, Type='String'))
template.add_parameter(Parameter(REDIS_HOST, Type='String'))
template.add_parameter(Parameter(REDIS_PORT, Type='String'))
template.add_parameter(Parameter(REDIS_PASSWORD, Type='String'))
return template
def add_outputs(template):
template.add_output(Output('StateMachineArn', Description='StateMachineArn', Value=Ref('StateMachine')))
return template
def create_lambda_function_resource(function_name, handler_path, lambda_execution_role_arn):
return serverless.Function(
function_name,
Role=lambda_execution_role_arn,
CodeUri='./',
Handler=handler_path,
Runtime='python3.6',
Timeout=900,
Environment=awslambda.Environment(Variables={
'MONGODB_CONNECTION_STRING': Ref(MONGODB_CONNECTION_STRING_PARAMETER),
'REDIS_HOST': Ref(REDIS_HOST),
'REDIS_PORT': Ref(REDIS_PORT),
'REDIS_PASSWORD': Ref(REDIS_PASSWORD)
})
)
def get_lambda_functions_from_file():
with open(os.path.join(os.path.dirname(__file__), 'lambda_functions.json'), 'r') as lambda_functions_file:
return json.load(lambda_functions_file)
def add_lambda_function_resources(template, lambda_execution_role_arn):
lambda_functions = get_lambda_functions_from_file()
for lambda_function in lambda_functions:
template.add_resource(create_lambda_function_resource(
lambda_function.get('function_name'),
lambda_function.get('handler_path'),
lambda_execution_role_arn
))
return template
def add_state_machine_resource(template, step_functions_role_arn):
lambda_functions = get_lambda_functions_from_file()
with open(os.path.join(os.path.dirname(__file__), 'state_machine.json'), 'r') as state_machine_file:
state_machine = stepfunctions.StateMachine(
'StateMachine',
RoleArn=step_functions_role_arn,
DefinitionString=Sub([
json.dumps(json.load(state_machine_file)),
{
lambda_function['function_name']: GetAtt(lambda_function['function_name'], 'Arn')
for lambda_function in lambda_functions
}
])
)
template.add_resource(state_machine)
template.add_resource(
events.Rule(
'StateMachineSchedule',
ScheduleExpression='rate(1 hour)',
State='ENABLED',
Targets=[
events.Target(
'StateMachine',
Arn=Ref(state_machine),
RoleArn=step_functions_role_arn,
Id='state_machine'
)
]
)
)
return template
def add_resources(template, lambda_execution_role_arn, step_functions_role_arn):
template = add_lambda_function_resources(template, lambda_execution_role_arn)
template = add_state_machine_resource(template, step_functions_role_arn)
return template
def main():
lambda_execution_role_arn = sys.argv[1]
step_functions_role_arn = sys.argv[2]
template = Template()
template = add_parameters(template)
template = add_resources(template, lambda_execution_role_arn, step_functions_role_arn)
template = add_outputs(template)
template_yaml = template.to_yaml()
template_yaml = template_yaml.replace('|-\n', '')
print(template_yaml)
if __name__ == '__main__':
main()
|
import unittest
import homport
homport.start()
class NodeWrapTestCase(unittest.TestCase):
def setUp(self):
self.assertTrue('hou' in globals())
def testWrapped(self):
self.assertTrue(isinstance(hou.node('/obj'), homport.NodeWrap))
def testInvalidNode(self):
self.assertRaises(homport.NodeWrapError, hou.node, 'test')
def testGetNode(self):
hou.node('/obj').createNode('geo')
self.assertEquals(hou.node('/obj/geo1').node, hou.node('/obj').geo1.node)
def testGetParm(self):
null = hou.node('/obj').createNode('null')
self.assertEquals(null.tx.parm, null.parm('tx'))
def testRshift(self):
geo = hou.node('/obj').createNode('geo')
null = hou.node('/obj').createNode('null')
geo >> null
def testLshift(self):
geo = hou.node('/obj').createNode('geo')
null = hou.node('/obj').createNode('null')
geo << null
def testFloorDiv(self):
geo = hou.node('/obj').createNode('geo')
null = hou.node('/obj').createNode('null')
geo >> null
self.assertTrue(len(null.inputConnections()) == 1)
geo // null
self.assertTrue(len(null.inputConnections()) == 0)
def testDefinedInputConn(self):
geo = hou.node('/obj').createNode('geo')
subnet = hou.node('/obj').createNode('subnet')
geo >> subnet.input_two
self.assert_(subnet.inputConnectors()[1])
def testInputConnectReset(self):
geo = hou.node('/obj').createNode('geo')
subnet = hou.node('/obj').createNode('subnet')
geo >> subnet.input_two
self.assert_(subnet.inputConnectors()[1])
# this should properly connect geo to input 0 of the subnet
geo >> subnet
self.assert_(subnet.inputConnectors()[0])
class ParmWrapTestCase(unittest.TestCase):
def setUp(self):
self.geo1 = hou.node('/obj').createNode('geo')
self.geo2 = hou.node('/obj').createNode('geo')
def testParmsWrapped(self):
self.assertEquals(self.geo1.tx.parm, self.geo1.node.parm('tx'))
def testSetParm(self):
self.geo1.tx = 500
self.assertEquals(self.geo1.tx.eval(), 500)
def testEvalParm(self):
self.geo1.tx.set(500.0)
self.assertEquals(self.geo1.tx.eval(), 500.0)
def testStrParm(self):
self.geo1.tx.set(500.0)
self.assertEquals(str(self.geo1.tx), str(500.0))
def testLinkParms(self):
self.geo1.tx >> self.geo2.tx
self.geo1.tx = 450.0
self.assertEquals(str(self.geo1.tx), str(self.geo2.tx))
if __name__ == "__main__":
unittest.main()
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
"""
Data Representation
===================
Data is stored in three main vessels:
:class:`Factor`:
stores categorical data
:class:`Var`:
stores numeric data
:class:`NDVar`:
stores numerical data where each cell contains an array if data (e.g., EEG
or MEG data)
managed by
* Dataset
"""
from __future__ import division
from collections import OrderedDict, defaultdict
from copy import deepcopy
from fnmatch import fnmatchcase
import itertools
from itertools import chain, izip
from keyword import iskeyword
from math import ceil, log10
import cPickle as pickle
import operator
import os
import re
import string
from warnings import warn
import mne
from mne import Evoked as _mne_Evoked
from nibabel.freesurfer import read_annot
import numpy as np
from numpy import dot
import scipy.stats
from scipy.linalg import inv
from scipy.optimize import leastsq
from scipy.spatial import ConvexHull
from scipy.spatial.distance import cdist, pdist, squareform
from . import fmtxt
from . import _colorspaces as cs
from ._utils import ui, LazyProperty, natsorted #, logger
from ._utils.numpy_utils import slice_to_arange, full_slice
preferences = dict(fullrepr=False, # whether to display full arrays/dicts in __repr__ methods
repr_len=5, # length of repr
dataset_str_n_cases=500,
var_repr_n_cases=100,
factor_repr_n_cases=100,
bool_fmt='%s',
float_fmt='%.6g',
int_fmt='%s',
factor_repr_use_labels=True,
short_repr=True, # "A % B" vs "Interaction(A, B)"
)
UNNAMED = '<?>'
SEQUENCE_TYPES = (tuple, list)
_pickled_ds_wildcard = ("Pickled Dataset (*.pickled)", '*.pickled')
_tex_wildcard = ("TeX (*.tex)", '*.tex')
_tsv_wildcard = ("Plain Text Tab Separated Values (*.txt)", '*.txt')
_txt_wildcard = ("Plain Text (*.txt)", '*.txt')
class DimensionMismatchError(Exception):
pass
def _effect_eye(n):
"""Effect coding for n categories. E.g.::
Examples
--------
>>> _effect_eye(4)
array([[ 1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1],
[-1, -1, -1]])
"""
x = np.empty((n, n - 1), dtype=np.int8)
x[:n - 1] = np.eye(n - 1, dtype=np.int8)
x[n - 1] = -1
return x
def _effect_interaction(a, b):
k = a.shape[1]
out = [a[:, i, None] * b for i in range(k)]
return np.hstack(out)
def cellname(cell, delim=' '):
"""
Returns a consistent ``str`` representation for cells.
* for Factor cells: the cell (str)
* for Interaction cell: delim.join(cell).
"""
if isinstance(cell, str):
return cell
elif isinstance(cell, (list, tuple)):
return delim.join(cell)
elif cell is None:
return ''
else:
return unicode(cell)
def longname(x):
if isnumeric(x) and 'longname' in x.info:
return x.info['longname']
elif getattr(x, 'name', None) is not None:
return x.name
elif np.isscalar(x):
return repr(x)
return '<unnamed>'
def rank(x, tol=1e-8):
"""
Rank of a matrix, from
http://mail.scipy.org/pipermail/numpy-discussion/2008-February/031218.html
"""
s = np.linalg.svd(x, compute_uv=0)
return np.sum(np.where(s > tol, 1, 0))
def check_length(objs, n=None):
for obj in objs:
if obj is None:
pass
elif n is None:
n = len(obj)
elif n != len(obj):
err = ("%r has wrong length: %i (%i needed)." %
(obj.name, len(obj), n))
raise ValueError(err)
def isbalanced(x):
"""Determine whether x is balanced
Parameters
----------
x : categorial
Categorial Model, Factor or Interaction.
"""
if ismodel(x):
return all(isbalanced(e) for e in x.effects)
else:
return len({np.sum(x == c) for c in x.cells}) <= 1
def iscategorial(x):
"factors as well as interactions are categorial"
if isfactor(x) or isnested(x):
return True
elif isinteraction(x):
return x.is_categorial
elif ismodel(x):
return all(iscategorial(e) for e in x.effects)
else:
return False
def isdataobject(x):
return getattr(x, '_stype', None) in ("model", "var", "ndvar", "factor",
"interaction", "nonbasic", "nested", "list")
def isdataset(x):
return getattr(x, '_stype', None) == 'dataset'
def iseffect(x):
return getattr(x, '_stype', None) in ("factor", "var", "interaction", "nonbasic", "nested")
def isdatalist(x, contains=None, test_all=True):
"""Test whether x is a Datalist instance
Parameters
----------
x : object
Object to test.
contains : None | class
Test whether the content is instances of a specific class.
test_all : bool
If contains is provided, test all items' class (otherwise just test the
first item).
"""
is_dl = isinstance(x, Datalist)
if is_dl and contains:
if test_all:
is_dl = all(isinstance(item, contains) for item in x)
else:
is_dl = isinstance(x[0], contains)
return is_dl
def isfactor(x):
return getattr(x, '_stype', None) == "factor"
def isinteraction(x):
return getattr(x, '_stype', None) == "interaction"
def ismodel(x):
return getattr(x, '_stype', None) == "model"
def isnested(x):
"Determine whether x is nested"
return getattr(x, '_stype', None) == "nested"
def isnestedin(item, item2):
"Returns True if item is nested in item2, False otherwise"
if hasattr(item, 'nestedin'):
return item.nestedin and (item2 in find_factors(item.nestedin))
else:
return False
def isndvar(x):
"Determine whether x is an NDVar"
return getattr(x, '_stype', None) == "ndvar"
def isnumeric(x):
"Determine wether x is numeric (a Var or an NDVar)"
return getattr(x, '_stype', None) in ("ndvar", "var")
def isuv(x):
"Determine whether x is univariate (a Var or a Factor)"
return getattr(x, '_stype', None) in ("factor", "var")
def isvar(x):
"Determine whether x is a Var"
return getattr(x, '_stype', None) == "var"
def isboolvar(x):
"Determine whether x is a Var whose data type is boolean"
return isvar(x) and x.x.dtype.kind == 'b'
def isintvar(x):
"Determine whether x is a Var whose data type is integer"
return isvar(x) and x.x.dtype.kind in 'iu'
def is_higher_order_effect(e1, e0):
"""Determine whether e1 is a higher order term of e0
Returns True if e1 is a higher order term of e0 (i.e., if all factors in
e0 are contained in e1).
Parameters
----------
e1, e0 : effects
The effects to compare.
"""
f1s = find_factors(e1)
return all(f in f1s for f in find_factors(e0))
def hasemptycells(x):
"True iff a categorial has one or more empty cells"
if isfactor(x):
return False
elif isinteraction(x):
if x.is_categorial:
for cell in x.cells:
if not np.any(x == cell):
return True
return False
elif ismodel(x):
for e in x.effects:
if isinteraction(e) and e.is_categorial:
for cell in e.cells:
if not np.any(e == cell):
return True
return False
raise TypeError("Need categorial (got %s)" % type(x))
def hasrandom(x):
"""True if x is or contains a random effect, False otherwise"""
if isfactor(x) or isnested(x):
return x.random
elif isinteraction(x):
for e in x.base:
if isfactor(e) and e.random:
return True
elif ismodel(x):
return any(hasrandom(e) for e in x.effects)
return False
def as_case_identifier(x, sub=None, ds=None):
"Coerce input to a variable that can identify each of its cases"
if isinstance(x, basestring):
if ds is None:
err = ("Parameter was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if sub is not None:
x = x[sub]
if isvar(x):
n = len(x.values)
elif isfactor(x):
n = len(x.n_cells)
elif isinteraction(x):
n = set(map(tuple, x.as_effects))
else:
raise TypeError("Need a Var, Factor or Interaction to identify cases, "
"got %s" % repr(x))
if n < len(x):
raise ValueError("%s can not serve as a case identifier because it has "
"at least one non-unique value" % x.name.capitalize())
return x
def asarray(x, kind=None):
"Coerce input to array"
if isvar(x):
x = x.x
else:
x = np.asarray(x)
if kind is not None and x.dtype.kind not in kind:
# boolean->int conversion
if 'i' in kind and x.dtype.kind == 'b':
x = x.astype(int)
else:
raise TypeError("Expected array of kind %r, got %r (%s)"
% (kind, x.dtype.kind, x.dtype))
return x
def ascategorial(x, sub=None, ds=None, n=None):
if isinstance(x, basestring):
if ds is None:
err = ("Parameter was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if iscategorial(x):
pass
elif isinteraction(x):
x = Interaction([e if isfactor(e) else e.as_factor() for e in x.base])
else:
x = asfactor(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asdataobject(x, sub=None, ds=None, n=None):
"Convert to any data object or numpy array."
if isinstance(x, basestring):
if ds is None:
err = ("Data object was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if isdataobject(x):
pass
elif isinstance(x, np.ndarray):
pass
else:
x = Datalist(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asepochs(x, sub=None, ds=None, n=None):
"Convert to mne Epochs object"
if isinstance(x, basestring):
if ds is None:
err = ("Epochs object was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if isinstance(x, mne.Epochs):
pass
else:
raise TypeError("Need mne Epochs object, got %s" % repr(x))
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asfactor(x, sub=None, ds=None, n=None):
if isinstance(x, basestring):
if ds is None:
err = ("Factor was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if isfactor(x):
pass
elif hasattr(x, 'as_factor'):
x = x.as_factor(name=x.name)
else:
x = Factor(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asmodel(x, sub=None, ds=None, n=None):
if isinstance(x, basestring):
if ds is None:
err = ("Model was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if ismodel(x):
pass
else:
x = Model(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asndvar(x, sub=None, ds=None, n=None):
if isinstance(x, basestring):
if ds is None:
err = ("Ndvar was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
# convert MNE objects
if isinstance(x, mne.Epochs):
from .load.fiff import epochs_ndvar
x = epochs_ndvar(x)
elif isinstance(x, _mne_Evoked):
from .load.fiff import evoked_ndvar
x = evoked_ndvar(x)
elif isinstance(x, list):
item_0 = x[0]
if isinstance(item_0, _mne_Evoked):
from .load.fiff import evoked_ndvar
x = evoked_ndvar(x)
if not isndvar(x):
raise TypeError("NDVar required, got %s" % repr(x))
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asnumeric(x, sub=None, ds=None, n=None):
"Var, NDVar"
if isinstance(x, basestring):
if ds is None:
err = ("Numeric argument was specified as string, but no Dataset "
"was specified")
raise TypeError(err)
x = ds.eval(x)
if not isnumeric(x):
raise TypeError("Numeric argument required (Var or NDVar), got %s" % repr(x))
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def assub(sub, ds=None):
"Interpret the sub argument."
if isinstance(sub, basestring):
if ds is None:
err = ("the sub parameter was specified as string, but no Dataset "
"was specified")
raise TypeError(err)
sub = ds.eval(sub)
return sub
def asuv(x, sub=None, ds=None, n=None):
"As Var or Factor"
if isinstance(x, basestring):
if ds is None:
err = ("Parameter was specified as string, but no Dataset was "
"specified")
raise TypeError(err)
x = ds.eval(x)
if isuv(x):
pass
elif all(isinstance(v, basestring) for v in x):
x = Factor(x)
else:
x = Var(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def asvar(x, sub=None, ds=None, n=None):
if isinstance(x, basestring):
if ds is None:
err = "Var was specified as string, but no Dataset was specified"
raise TypeError(err)
x = ds.eval(x)
if isvar(x):
pass
else:
x = Var(x)
if sub is not None:
x = x[sub]
if n is not None and len(x) != n:
raise ValueError("Arguments have different length")
return x
def index_ndim(index):
"""Determine the dimensionality of an index
Parameters
----------
index : numpy_index
Any valid numpy index.
Returns
-------
ndim : int
Number of index dimensions: 0 for an index to a single element, 1 for
an index to a sequence.
"""
if np.iterable(index):
return 1
elif isinstance(index, slice):
return 1
elif isinstance(index, int):
return 0
else:
raise TypeError("unknown index type: %s" % repr(index))
def _empty_like(obj, n=None, name=None):
"Create an empty object of the same type as obj"
n = n or len(obj)
name = name or obj.name
if isfactor(obj):
return Factor([''], repeat=n, name=name)
elif isvar(obj):
return Var(np.empty(n) * np.NaN, name=name)
elif isndvar(obj):
shape = (n,) + obj.shape[1:]
return NDVar(np.empty(shape) * np.NaN, dims=obj.dims, name=name)
elif isdatalist(obj):
return Datalist([None] * n, name, obj._fmt)
else:
err = "Type not supported: %s" % type(obj)
raise TypeError(err)
# --- sorting ---
def align(d1, d2, i1='index', i2=None, out='data'):
"""
Aligns two data-objects d1 and d2 based on two index variables, i1 and i2.
Before aligning, d1 and d2 describe the same cases, but their order does
not correspond. Align uses the indexes (i1 and i2) to match each case in
d2 to a case in d1 (i.e., d1 is used as the basis for the case order).
Cases that are not present in both d1 and d2 are dropped.
Parameters
----------
d1, d2 : data-object
Two data objects which are to be aligned
i1, i2 : str | Var | Factor | Interaction
Indexes for cases in d1 and d2. If d1 and d2 are Datasets, i1 and i2
can be keys for variables in d1 and d2 (if i2 is identical to i1 it can
be omitted). Indexes have to supply a unique value for each case.
out : 'data' | 'index'
**'data'**: returns the two aligned data objects. **'index'**: returns two
indices index1 and index2 which can be used to align the datasets with
``d1[index1]; d2[index2]``.
Examples
--------
See `examples/datasets/align.py <https://github.com/christianbrodbeck/
Eelbrain/blob/master/examples/datasets/align.py>`_.
"""
if i2 is None and isinstance(i1, basestring):
i2 = i1
i1 = as_case_identifier(i1, ds=d1)
i2 = as_case_identifier(i2, ds=d2)
if not ((isvar(i1) and isvar(i2))
or (isfactor(i1) and isfactor(i2))
or (isinteraction(i1) and isinteraction(i2))):
raise TypeError("i1 and i2 need to be of the same type, got: \n"
"i1=%s\ni2=%s." % (repr(i1), repr(i2)))
idx1 = []
idx2 = []
for i, case_id in enumerate(i1):
if case_id in i2:
idx1.append(i)
idx2.append(i2.index(case_id)[0])
if out == 'data':
if all(i == v for i, v in enumerate(idx1)):
return d1, d2[idx2]
else:
return d1[idx1], d2[idx2]
elif out == 'index':
return idx1, idx2
else:
raise ValueError("Invalid value for out parameter: %r" % out)
def align1(d, idx, d_idx='index', out='data'):
"""
Align a data object to an index
Parameters
----------
d : data object, n_cases = n1
Data object with cases that should be aligned to idx.
idx : Var | array_like, len = n2
Index array to which d should be aligned.
d_idx : str | index array, len = n1
Indices of cases in d. If d is a Dataset, d_idx can be a name in d.
out : 'data' | 'index'
Return a restructured copy of d or an array of numerical indices into
d.
"""
idx = asuv(idx)
if not isinstance(d_idx, basestring):
# check d_idx length
if isdataset(d):
if len(d_idx) != d.n_cases:
msg = ("d_idx does not have the same number of cases as d "
"(d_idx: %i, d: %i)" % (len(d_idx), d.n_cases))
raise ValueError(msg)
else:
if len(d_idx) != len(d):
msg = ("d_idx does not have the same number of cases as d "
"(d_idx: %i, d: %i)" % (len(d_idx), len(d)))
raise ValueError(msg)
d_idx = asuv(d_idx, ds=d)
align_idx = np.empty(len(idx), int)
for i, v in enumerate(idx):
where = d_idx.index(v)
if len(where) == 1:
align_idx[i] = where[0]
elif len(where) == 0:
raise ValueError("%s does not occur in d_idx" % v)
else:
raise ValueError("%s occurs more than once in d_idx" % v)
if out == 'data':
return d[align_idx]
elif out == 'index':
return align_idx
else:
ValueError("Invalid value for out parameter: %r" % out)
def choose(choice, sources, name=None):
"""Combine data-objects picking from a different object for each case
Parameters
----------
choice : array of int
Array specifying for each case from which of the sources the data should
be taken.
sources : list of data-objects
Data that should be combined.
name : str
Name for the new data-object (optional).
Notes
-----
Analogous to :func:`numpy.choose`. Only implemented for NDVars at this time.
"""
choice = asarray(choice, 'i')
if choice.min() < 0:
raise ValueError("Choice can not be < 0")
elif choice.max() > len(sources) - 1:
raise ValueError("choice contains values exceeding the number of sources")
s0 = sources[0]
s1 = sources[1:]
if isndvar(s0):
if not all(isndvar(s) for s in s1):
raise TypeError("Sources have different types")
elif any(s.dims != s0.dims for s in s1):
raise DimensionMismatchError("Sources have different dimensions")
x = np.empty_like(s0.x)
index_flat = np.empty(len(choice), bool)
index = index_flat.reshape(index_flat.shape + (1,) * (x.ndim - 1))
for i, s in enumerate(sources):
np.equal(choice, i, index_flat)
np.copyto(x, s.x, where=index)
return NDVar(x, s0.dims, {}, name)
else:
raise NotImplementedError
class Celltable(object):
"""Divide Y into cells defined by X.
Parameters
----------
Y : data-object
dependent measurement
X : categorial
Model (Factor or Interaction) for dividing Y.
match : categorial
Factor on which cases are matched (i.e. subject for a repeated
measures comparisons). If several data points with the same
case fall into one cell of X, they are combined using
match_func. If match is not None, Celltable.groups contains the
{Xcell -> [match values of data points], ...} mapping corres-
ponding to self.data
sub : bool array
Bool array of length N specifying which cases to include
match_func : callable
see match
cat : None | sequence of cells of X
Only retain data for these cells. Data will be sorted in the order
of cells occuring in cat.
ds : Dataset
If a Dataset is specified, input items (Y / X / match / sub) can
be str instead of data-objects, in which case they will be
retrieved from the Dataset.
coercion : callable
Function to convert the Y parameter to to the dependent varaible
(default: asdataobject).
Examples
--------
Split a repeated-measure variable Y into cells defined by the
interaction of A and B::
>>> c = Celltable(Y, A % B, match=subject)
Attributes
----------
.Y, .X,
Y and X after sub was applied.
.sub, .match:
Input arguments.
.cells : list of (str | tuple)
List of all cells in X.
.data : dict(cell -> data)
Data (``Y[index]``) in each cell.
.data_indexes : dict(cell -> index-array)
For each cell, a boolean-array specifying the index for that cell in
``X``.
**If ``match`` is specified**:
.within : dict(cell1, cell2 -> bool)
Dictionary that specifies for each cell pair whether the corresponding
comparison is a repeated-measures or an independent measures
comparison (only available when the input argument ``match`` is
specified.
.all_within : bool
Whether all comparison are repeated-measures comparisons or not.
.groups : dict(cell -> group)
A slice of the match argument describing the group members for each
cell.
"""
def __init__(self, Y, X=None, match=None, sub=None, match_func=np.mean,
cat=None, ds=None, coercion=asdataobject):
self.sub = sub
sub = assub(sub, ds)
if X is None:
Y = coercion(Y, sub, ds)
else:
X = ascategorial(X, sub, ds)
if cat is not None:
# determine cat
is_none = list(c is None for c in cat)
if any(is_none):
if len(cat) == len(X.cells):
if all(is_none):
cat = X.cells
else:
cells = [c for c in X.cells if c not in cat]
cat = tuple(cells.pop(0) if c is None else c
for c in cat)
else:
err = ("Categories can only be specified as None if X "
"contains exactly as many cells as categories are "
"required (%i)." % len(cat))
raise ValueError(err)
if not isinteraction(X):
cat = tuple(str(c) for c in cat)
# make sure all categories are in data
missing = [c for c in cat if c not in X.cells]
if missing:
raise ValueError("Categories not in data: %s" % ', '.join(missing))
# apply cat
sort_idx = X.sort_idx(order=cat)
X = X[sort_idx]
if sub is None:
sub = sort_idx
else:
imax = max(len(sub), np.max(sub))
sub = np.arange(imax)[sub][sort_idx]
Y = coercion(Y, sub, ds, len(X))
if match is not None:
match = ascategorial(match, sub, ds, len(Y))
cell_model = match if X is None else X % match
sort_idx = None
if len(cell_model) > len(cell_model.cells):
# need to aggregate
Y = Y.aggregate(cell_model)
match = match.aggregate(cell_model)
if X is not None:
X = X.aggregate(cell_model)
if cat is not None:
sort_idx = X.sort_idx(order=cat)
else:
sort_idx = cell_model.sort_idx()
if X is not None and cat is not None:
X_ = X[sort_idx]
sort_X_idx = X_.sort_idx(order=cat)
sort_idx = sort_idx[sort_X_idx]
if (sort_idx is not None) and (not np.all(np.diff(sort_idx) == 1)):
Y = Y[sort_idx]
match = match[sort_idx]
if X is not None:
X = X[sort_idx]
# save args
self.Y = Y
self.X = X
self.cat = cat
self.match = match
self.coercion = coercion.__name__
self.n_cases = len(Y)
# extract cell data
self.data = {}
self.data_indexes = {}
if X is None:
self.data[None] = Y
self.data_indexes[None] = full_slice
self.cells = [None]
self.n_cells = 1
self.all_within = match is not None
return
self.cells = X.cells
self.n_cells = len(self.cells)
self.groups = {}
for cell in X.cells:
idx = X.index_opt(cell)
self.data_indexes[cell] = idx
self.data[cell] = Y[idx]
if match:
self.groups[cell] = match[idx]
# determine which comparisons are within subject comparisons
if match:
self.within = {}
for cell1, cell2 in itertools.combinations(X.cells, 2):
group1 = self.groups[cell1]
if len(group1) == 0:
continue
group2 = self.groups[cell2]
if len(group2) == 0:
continue
within = np.all(group1 == group2)
self.within[cell1, cell2] = within
self.within[cell2, cell1] = within
self.any_within = any(self.within.values())
self.all_within = all(self.within.values())
else:
self.any_within = False
self.all_within = False
def __repr__(self):
args = [self.Y.name, self.X.name]
rpr = "Celltable(%s)"
if self.match is not None:
args.append("match=%s" % self.match.name)
if self.sub is not None:
if isvar(self.sub):
args.append('sub=%s' % self.sub.name)
else:
indexes = ' '.join(str(i) for i in self.sub[:4])
args.append("sub=[%s...]" % indexes)
if self.coercion != 'asdataobject':
args.append("coercion=%s" % self.coercion)
return rpr % (', '.join(args))
def __len__(self):
return self.n_cells
def cellname(self, cell, delim=' '):
"""Produce a str label for a cell.
Parameters
----------
cell : tuple | str
Cell.
delim : str
Interaction cells (represented as tuple of strings) are joined by
``delim``.
"""
return cellname(cell, delim=delim)
def cellnames(self, delim=' '):
"""Returns a list of all cell names as strings.
See Also
--------
.cellname : Produce a str label for a single cell.
"""
return [cellname(cell, delim) for cell in self.cells]
def data_for_cell(self, cell):
"""Retrieve data for a cell, allowing advanced cell combinations
Parameters
----------
cell : str | tuple of str
Name fo the cell. See notes for special cell names. After a special
cell is retrieved for the first time it is also add to
``self.data``.
Notes
-----
Special cell names can be used to retrieve averages between different
primary cells. The names should be composed so that a case sensitive
version of fnmatch will find the source cells. For examples, if all
cells are ``[('a', '1'), ('a', '2'), ('b', '1'), ('b', '2')]``,
``('a', '*')`` will retrieve the average of ``('a', '1')`` and
``('a', '2')``.
"""
if cell in self.data:
return self.data[cell]
# find cells matched by `cell`
if isinstance(cell, basestring):
cells = [c for c in self.cells if fnmatchcase(c, cell)]
name = cell
else:
cells = [c for c in self.cells if all(fnmatchcase(c_, cp)
for c_, cp in izip(c, cell))]
name = '|'.join(cell)
# check that all are repeated measures
for cell1, cell2 in itertools.combinations(cells, 2):
if not self.within[(cell1, cell2)]:
err = ("Combinatory cells can only be formed from repeated "
"measures cells, %r and %r are not." % (cell1, cell2))
raise ValueError(err)
# combine data
cell0 = cells[0]
x = np.empty_like(self.data[cell0].x)
for cell_ in cells:
x += self.data[cell_].x
x /= len(cells)
out = NDVar(x, cell0.dims, {}, name)
self.data[cell] = out
return out
def get_data(self, out=list):
if out is dict:
return self.data
elif out is list:
return [self.data[cell] for cell in self.cells]
def get_statistic(self, func=np.mean, a=1, **kwargs):
"""
Returns a list with a * func(data) for each data cell.
Parameters
----------
func : callable | str
statistics function that is applied to the data. Can be string,
such as '[X]sem', '[X]std', or '[X]ci', e.g. '2sem'.
a : scalar
Multiplier (if not provided in ``function`` string).
kwargs :
Are submitted to the statistic function.
Notes
----
:py:meth:`get_statistic_dict`
See also
--------
Celltable.get_statistic_dict : return statistics in a dict
"""
if isinstance(func, basestring):
if func.endswith('ci'):
if len(func) > 2:
a = float(func[:-2])
elif a == 1:
a = .95
from ._stats.stats import confidence_interval
func = confidence_interval
elif func.endswith('sem'):
if len(func) > 3:
a = float(func[:-3])
func = scipy.stats.sem
elif func.endswith('std'):
if len(func) > 3:
a = float(func[:-3])
func = np.std
if 'ddof' not in kwargs:
kwargs['ddof'] = 1
else:
raise ValueError('unrecognized statistic: %r' % func)
Y = [a * func(self.data[cell].x, **kwargs) for cell in self.cells]
return Y
def get_statistic_dict(self, func=np.mean, a=1, **kwargs):
"""
Same as :py:meth:`~Celltable.get_statistic`, except that he result is returned in
a {cell: value} dictionary.
"""
return zip(self.cells, self.get_statistic(func=func, a=a, **kwargs))
def combine(items, name=None, check_dims=True, incomplete='raise',
fill_in_missing=None):
"""Combine a list of items of the same type into one item.
Parameters
----------
items : collection
Collection (:py:class:`list`, :py:class:`tuple`, ...) of data objects
of a single type (Dataset, Var, Factor, NDVar or Datalist).
name : None | str
Name for the resulting data-object. If None, the name of the combined
item is the common prefix of all items.
check_dims : bool
For NDVars, check dimensions for consistency between items (e.g.,
channel locations in a Sensor dimension). Default is ``True``. Set to
``False`` to ignore non-fatal mismatches.
incomplete : "raise" | "drop" | "fill in"
Only applies when combining Datasets: how to handle variables that are
missing from some of the input Datasets. With ``"raise"`` (default), a
KeyError to be raised. With ``"drop"``, partially missing variables are
dropped. With ``"fill in"``, they are retained and missing values are
filled in with empty values (``""`` for factors, ``NaN`` for variables).
Notes
-----
The info dict inherits only entries that are equal (``x is y or
np.array_equal(x, y)``) for all items.
"""
if fill_in_missing is not None:
warn("The fill_in_missing argument to combine() is deprecated and will "
"be removed after version 0.19. Use the new incomplete argument "
"instead.", DeprecationWarning)
incomplete = 'fill in' if fill_in_missing else 'raise'
elif not isinstance(incomplete, basestring):
warn("The fill_in_missing argument to combine() has ben renamed to "
"`incomplete` and should be a string (got %s). After version 0.19 "
"this will raise an error" % repr(incomplete), DeprecationWarning)
incomplete = 'fill in' if incomplete else 'raise'
elif incomplete not in ('raise', 'drop', 'fill in'):
raise ValueError("incomplete=%s" % repr(incomplete))
# check input
if len(items) == 0:
raise ValueError("combine() called with empty sequence %s" % repr(items))
# find type
stypes = set(getattr(item, '_stype', None) for item in items)
if None in stypes:
raise TypeError("Can only combine data-objects, got at least one other "
"item.")
elif len(stypes) > 1:
raise TypeError("All items to be combined need to have the same type, "
"got %s." % ', '.join(tuple(stypes)))
stype = stypes.pop()
# find name
if name is None:
names = filter(None, (item.name for item in items))
name = os.path.commonprefix(names) or None
# combine objects
if stype == 'dataset':
out = Dataset(name=name, info=_merge_info(items))
item0 = items[0]
if incomplete == 'fill in':
# find all keys and data types
keys = item0.keys()
sample = dict(item0)
for item in items:
for key in item.keys():
if key not in keys:
keys.append(key)
sample[key] = item[key]
# create output
for key in keys:
pieces = [ds[key] if key in ds else
_empty_like(sample[key], ds.n_cases) for ds in items]
out[key] = combine(pieces, check_dims=check_dims)
else:
keys = set(item0)
if incomplete == 'raise':
if any(set(item) != keys for item in items[1:]):
raise KeyError("Datasets have unequal keys. Combine with "
"fill_in_missing=True to combine anyways.")
out_keys = item0
else:
keys.intersection_update(*items[1:])
out_keys = (k for k in item0 if k in keys)
for key in out_keys:
out[key] = combine([ds[key] for ds in items])
return out
elif stype == 'var':
x = np.hstack(i.x for i in items)
return Var(x, name, info=_merge_info(items))
elif stype == 'factor':
random = set(f.random for f in items)
if len(random) > 1:
raise ValueError("Factors have different values for random parameter")
random = random.pop()
item0 = items[0]
labels = item0._labels
if all(f._labels == labels for f in items[1:]):
x = np.hstack(f.x for f in items)
return Factor(x, name, random, labels=labels)
else:
x = sum((i.as_labels() for i in items), [])
return Factor(x, name, random)
elif stype == 'ndvar':
v_have_case = [v.has_case for v in items]
if all(v_have_case):
has_case = True
all_dims = (item.dims[1:] for item in items)
elif any(v_have_case):
raise DimensionMismatchError("Some items have a 'case' dimension, "
"others do not")
else:
has_case = False
all_dims = (item.dims for item in items)
dims = reduce(lambda x, y: intersect_dims(x, y, check_dims), all_dims)
idx = {d.name: d for d in dims}
items = [item.sub(**idx) for item in items]
if has_case:
x = np.concatenate([v.x for v in items], axis=0)
else:
x = np.array([v.x for v in items])
dims = ('case',) + dims
return NDVar(x, dims, _merge_info(items), name)
elif stype == 'list':
return Datalist(sum(items, []), name, items[0]._fmt)
else:
raise RuntimeError("combine with stype = %r" % stype)
def _is_equal(a, b):
"Test equality, taking into account array values"
if a is b:
return True
elif type(a) is not type(b):
return False
a_iterable = np.iterable(a)
b_iterable = np.iterable(b)
if a_iterable != b_iterable:
return False
elif not a_iterable:
return a == b
elif len(a) != len(b):
return False
elif isinstance(a, np.ndarray):
if a.shape == b.shape:
return (a == b).all()
else:
return False
elif isinstance(a, SEQUENCE_TYPES):
return all(_is_equal(a_, b_) for a_, b_ in izip(a, b))
elif isinstance(a, dict):
if a.viewkeys() == b.viewkeys():
return all(_is_equal(a[k], b[k]) for k in a)
else:
return False
else:
return a == b
def _merge_info(items):
"Merge info dicts from several objects"
info0 = items[0].info
other_infos = [i.info for i in items[1:]]
# find shared keys
info_keys = set(info0.keys())
for info in other_infos:
info_keys.intersection_update(info.keys())
# find shared values
out = {}
for key in info_keys:
v0 = info0[key]
if all(_is_equal(info[key], v0) for info in other_infos):
out[key] = v0
return out
def find_factors(obj):
"returns a list of all factors contained in obj"
if isinstance(obj, EffectList):
f = set()
for e in obj:
f.update(find_factors(e))
return EffectList(f)
elif isuv(obj):
return EffectList([obj])
elif ismodel(obj):
f = set()
for e in obj.effects:
f.update(find_factors(e))
return EffectList(f)
elif isnested(obj):
return find_factors(obj.effect)
elif isinteraction(obj):
return obj.base
else: # NonbasicEffect
try:
return EffectList(obj.factors)
except:
raise TypeError("%r has no factors" % obj)
class EffectList(list):
def __repr__(self):
return 'EffectList((%s))' % ', '.join(self.names())
def __contains__(self, item):
for f in self:
if ((f.name == item.name) and (f._stype == item._stype)
and (len(f) == len(item)) and np.all(item == f)):
return True
return False
def index(self, item):
for i, f in enumerate(self):
if (len(f) == len(item)) and np.all(item == f):
return i
raise ValueError("Factor %r not in EffectList" % item.name)
def names(self):
names = [e.name if isuv(e) else repr(e) for e in self]
return [UNNAMED if n is None else n for n in names]
class Var(object):
"""Container for scalar data.
Parameters
----------
x : array_like
Data; is converted with ``np.asarray(x)``. Multidimensional arrays
are flattened as long as only 1 dimension is longer than 1.
name : str | None
Name of the variable
repeat : int | array of int
repeat each element in ``x``, either a constant or a different number
for each element.
tile : int
Repeat ``x`` as a whole ``tile`` many times.
Attributes
----------
x : numpy.ndarray
The data stored in the Var.
name : None | str
The Var's name.
Notes
-----
While :py:class:`Var` objects support a few basic operations in a
:py:mod:`numpy`-like fashion (``+``, ``-``, ``*``, ``/``, ``//``), their
:py:attr:`Var.x` attribute provides access to the corresponding
:py:class:`numpy.array` which can be used for anything more complicated.
:py:attr:`Var.x` can be read and modified, but should not be replaced.
"""
_stype = "var"
ndim = 1
def __init__(self, x, name=None, repeat=1, tile=1, info=None):
if isinstance(x, basestring):
raise TypeError("Var can't be initialized with a string")
x = np.asarray(x)
if x.ndim > 1:
if np.count_nonzero(i > 1 for i in x.shape) <= 1:
x = np.ravel(x)
else:
err = ("X needs to be one-dimensional. Use NDVar class for "
"data with more than one dimension.")
raise ValueError(err)
if not (isinstance(repeat, int) and repeat == 1):
x = np.repeat(x, repeat)
if tile > 1:
x = np.tile(x, tile)
if info is None:
info = {}
self.__setstate__((x, name, info))
def __setstate__(self, state):
if len(state) == 3:
x, name, info = state
else:
x, name = state
info = {}
# raw
self.name = name
self.x = x
self.info = info
# constants
self._n_cases = len(x)
self.df = 1
self.random = False
def __getstate__(self):
return (self.x, self.name, self.info)
def __repr__(self, full=False):
n_cases = preferences['var_repr_n_cases']
if isintvar(self):
fmt = preferences['int_fmt']
elif isboolvar(self):
fmt = preferences['bool_fmt']
else:
fmt = preferences['float_fmt']
if full or len(self.x) <= n_cases:
x = [fmt % v for v in self.x]
else:
x = [fmt % v for v in self.x[:n_cases]]
x.append('... (N=%s)' % len(self.x))
args = ['[%s]' % ', '.join(x)]
if self.name is not None:
args.append('name=%r' % self.name)
if self.info:
args.append('info=%r' % self.info)
return "Var(%s)" % ', '.join(args)
def __str__(self):
return self.__repr__(True)
@property
def __array_interface__(self):
return self.x.__array_interface__
# container ---
def __len__(self):
return self._n_cases
def __getitem__(self, index):
"if Factor: return new variable with mean values per Factor category"
if isfactor(index):
f = index
x = []
for v in np.unique(f.x):
x.append(np.mean(self.x[f == v]))
return Var(x, self.name, info=self.info.copy())
elif isvar(index):
index = index.x
x = self.x[index]
if np.iterable(x):
return Var(x, self.name, info=self.info.copy())
else:
return x
def __setitem__(self, index, value):
self.x[index] = value
def __contains__(self, value):
return value in self.x
# numeric ---
def __neg__(self):
x = -self.x
info = self.info.copy()
info['longname'] = '-' + longname(self)
return Var(x, info=info)
def __pos__(self):
return self
def __abs__(self):
return self.abs()
def __add__(self, other):
if isdataobject(other):
# ??? should Var + Var return sum or Model?
return Model((self, other))
x = self.x + other
info = self.info.copy()
info['longname'] = longname(self) + ' + ' + longname(other)
return Var(x, info=info)
def __sub__(self, other):
"subtract: values are assumed to be ordered. Otherwise use .sub method."
if np.isscalar(other):
x = self.x - other
elif len(other) != len(self):
err = ("Objects have different length (%i vs "
"%i)" % (len(self), len(other)))
raise ValueError(err)
else:
x = self.x - other.x
info = self.info.copy()
info['longname'] = longname(self) + ' - ' + longname(other)
return Var(x, info=info)
def __mul__(self, other):
if iscategorial(other):
return Model((self, other, self % other))
elif isvar(other):
x = self.x * other.x
else:
x = self.x * other
info = self.info.copy()
info['longname'] = longname(self) + ' * ' + longname(other)
return Var(x, info=info)
def __floordiv__(self, other):
if isvar(other):
x = self.x // other.x
else:
x = self.x // other
info = self.info.copy()
info['longname'] = longname(self) + ' // ' + longname(other)
return Var(x, info=info)
def __mod__(self, other):
if ismodel(other):
return Model(self) % other
elif isvar(other):
x = self.x % other.x
elif isdataobject(other):
return Interaction((self, other))
else:
x = self.x % other
info = self.info.copy()
info['longname'] = longname(self) + ' % ' + longname(other)
return Var(x, info=info)
def __lt__(self, y):
return self.x < y
def __le__(self, y):
return self.x <= y
def __eq__(self, y):
return self.x == y
def __ne__(self, y):
return self.x != y
def __gt__(self, y):
return self.x > y
def __ge__(self, y):
return self.x >= y
def __truediv__(self, other):
return self.__div__(other)
def __div__(self, other):
"""
type of other:
scalar:
returns var divided by other
Factor:
returns a separate slope for each level of the Factor; needed for
ANCOVA
"""
if isvar(other):
x = self.x / other.x
elif iscategorial(other):
dummy_factor = other.as_dummy_complete
codes = dummy_factor * self.as_effects
# center
means = codes.sum(0) / dummy_factor.sum(0)
codes -= dummy_factor * means
# create effect
name = '%s per %s' % (self.name, other.name)
return NonbasicEffect(codes, [self, other], name,
beta_labels=other.dummy_complete_labels)
else:
x = self.x / other
info = self.info.copy()
info['longname'] = longname(self) + ' / ' + longname(other)
return Var(x, info=info)
def _coefficient_names(self, method):
return longname(self),
def abs(self, name=None):
"Return a Var with the absolute value."
info = self.info.copy()
info['longname'] = 'abs(' + longname(self) + ')'
return Var(np.abs(self.x), name, info=info)
def argmax(self):
""":func:`numpy.argmax`"""
return np.argmax(self.x)
def argmin(self):
""":func:`numpy.argmin`"""
return np.argmin(self.x)
def argsort(self, kind='quicksort'):
""":func:`numpy.argsort`
Parameters
----------
kind : 'quicksort' | 'mergesort' | 'heapsort'
Sorting algorithm (default 'quicksort').
Returns
-------
index_array : array of int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
"""
return np.argsort(self.x, kind=kind)
@property
def as_dummy(self):
"for effect initialization"
return self.x[:, None]
@property
def as_effects(self):
"for effect initialization"
return self.centered()[:, None]
def as_factor(self, labels='%r', name=True, random=False):
"""Convert the Var into a Factor
Parameters
----------
labels : str | dict
Either a format string for converting values into labels (default:
``'%r'``) or a dictionary mapping values to labels (see examples).
In a dictionary, multiple values can be assigned the same label by
providing multiple keys in a tuple. A special key 'default' can be
used to assign values that are not otherwise specified in the
dictionary (by default this is the empty string ``''``).
name : None | True | str
Name of the output Factor, ``True`` to keep the current name
(default ``True``).
random : bool
Whether the Factor is a random Factor (default ``False``).
Examples
--------
>>> v = Var([0, 1, 2, 3])
>>> v.as_factor()
Factor(['0', '1', '2', '3'])
>>> v.as_factor({0: 'a', 1: 'b'})
Factor(['a', 'b', '', ''])
>>> v.as_factor({(0, 1): 'a', (2, 3): 'b'})
Factor(['a', 'a', 'b', 'b'])
>>> v.as_factor({0: 'a', 1: 'b', 'default': 'c'})
Factor(['a', 'b', 'c', 'c'])
"""
labels_ = {}
if isinstance(labels, dict):
# flatten
for key, v in labels.iteritems():
if isinstance(key, SEQUENCE_TYPES):
for k in key:
labels_[k] = v
else:
labels_[key] = v
default = labels_.pop('default', '')
if default is not None:
for key in np.unique(self.x):
if key not in labels_:
labels_[key] = default
else:
for value in np.unique(self.x):
labels_[value] = labels % value
if name is True:
name = self.name
return Factor(self.x, name, random, labels=labels_)
def centered(self):
return self.x - self.x.mean()
def copy(self, name=True):
"returns a deep copy of itself"
x = self.x.copy()
if name is True:
name = self.name
return Var(x, name, info=deepcopy(self.info))
def compress(self, X, func=np.mean, name=True):
"Deprecated. Use .aggregate()."
warn("Var.compress s deprecated; use Var.aggregate instead"
"(with identical functionality).", DeprecationWarning)
self.aggregate(X, func, name)
def count(self):
"""Count the number of occurrence of each value
Notes
-----
Counting starts with zero (see examples). This is to facilitate
integration with indexing.
Examples
--------
>>> v = Var([1, 2, 3, 1, 1, 1, 3])
>>> v.count()
Var([0, 0, 0, 1, 2, 3, 1])
"""
x = np.empty(len(self.x), int)
index = np.empty(len(self.x), bool)
for v in np.unique(self.x):
np.equal(self.x, v, index)
x[index] = np.arange(index.sum())
return Var(x)
def aggregate(self, X, func=np.mean, name=True):
"""Summarize cases within cells of X
Parameters
----------
X : categorial
Model defining cells in which to aggregate.
func : callable
Function that converts arrays into scalars, used to summarize data
within each cell of X.
name : None | True | str
Name of the output Var, ``True`` to keep the current name (default
``True``).
Returns
-------
aggregated_var : Var
A Var instance with a single value for each cell in X.
"""
if len(X) != len(self):
err = "Length mismatch: %i (Var) != %i (X)" % (len(self), len(X))
raise ValueError(err)
x = []
for cell in X.cells:
x_cell = self.x[X == cell]
if len(x_cell) > 0:
x.append(func(x_cell))
if name is True:
name = self.name
x = np.array(x)
return Var(x, name, info=self.info.copy())
@property
def beta_labels(self):
return [self.name]
def diff(self, X, v1, v2, match):
"""
Subtract X==v2 from X==v1; sorts values according to match (ascending)
Parameters
----------
X : categorial
Model to define cells.
v1, v2 : str | tuple
Cells on X for subtraction.
match : categorial
Model that defines how to mach cells in v1 to cells in v2.
"""
raise NotImplementedError
# FIXME: use celltable
assert isfactor(X)
I1 = (X == v1); I2 = (X == v2)
Y1 = self[I1]; Y2 = self[I2]
m1 = match[I1]; m2 = match[I2]
s1 = np.argsort(m1); s2 = np.argsort(m2)
y = Y1[s1] - Y2[s2]
name = "{n}({x1}-{x2})".format(n=self.name,
x1=X.cells[v1],
x2=X.cells[v2])
return Var(y, name, info=self.info.copy())
@classmethod
def from_dict(cls, base, values, name=None, default=0, info=None):
"""
Construct a Var object by mapping ``base`` to ``values``.
Parameters
----------
base : sequence
Sequence to be mapped to the new Var.
values : dict
Mapping from values in base to values in the new Var.
name : None | str
Name for the new Var.
default : scalar
Default value to supply for entries in ``base`` that are not in
``values``.
Examples
--------
>>> base = Factor('aabbcde')
>>> Var.from_dict(base, {'a': 5, 'e': 8}, default=0)
Var([5, 5, 0, 0, 0, 0, 8])
"""
return cls([values.get(b, default) for b in base], name, info=info)
@classmethod
def from_apply(cls, base, func, name=None, info=None):
"""
Construct a Var instance by applying a function to each value in a base
Parameters
----------
base : sequence, len = n
Base for the new Var. Can be an NDVar, if ``func`` is a
dimensionality reducing function such as :func:`numpy.mean`.
func : callable
A function that when applied to each element in ``base`` returns
the desired value for the resulting Var.
"""
if isvar(base) or isndvar(base):
base = base.x
if isinstance(func, np.ufunc):
x = func(base)
elif getattr(base, 'ndim', 1) > 1:
x = func(base.reshape((len(base), -1)), axis=1)
else:
x = np.array([func(val) for val in base])
return cls(x, name, info=info)
def index(self, value):
"``v.index(value)`` returns an array of indices where v equals value"
return np.flatnonzero(self == value)
def isany(self, *values):
"Boolean index, True where the Var is equal to one of the values"
return np.in1d(self.x, values)
def isin(self, values):
"Boolean index, True where the Var value is in values"
return np.in1d(self.x, values)
def isnot(self, *values):
"Boolean index, True where the Var is not equal to one of the values"
return np.in1d(self.x, values, invert=True)
def isnotin(self, values):
"Boolean index, True where the Var value is not in values"
return np.in1d(self.x, values, invert=True)
def max(self):
"Returns the highest value"
return self.x.max()
def mean(self):
"Returns the mean"
return self.x.mean()
def min(self):
"Returns the smallest value"
return self.x.min()
def repeat(self, repeats, name=True):
"""
Repeat each element ``repeats`` times
Parameters
----------
repeats : int | array of int
Number of repeats, either a constant or a different number for each
element.
name : None | True | str
Name of the output Var, ``True`` to keep the current name (default
``True``).
"""
if name is True:
name = self.name
return Var(self.x.repeat(repeats), name, info=self.info.copy())
def split(self, n=2, name=None):
"""
A Factor splitting Y in ``n`` categories with equal number of cases
Parameters
----------
n : int
number of categories
name : str
Name of the output Factor.
Examples
--------
Use n = 2 for a median split::
>>> y = Var([1,2,3,4])
>>> y.split(2)
Factor(['0', '0', '1', '1'])
>>> z = Var([7, 6, 5, 4, 3, 2])
>>> z.split(3)
Factor(['2', '2', '1', '1', '0', '0'])
"""
y = self.x
d = 100. / n
percentile = np.arange(d, 100., d)
values = [scipy.stats.scoreatpercentile(y, p) for p in percentile]
x = np.zeros(len(y), dtype=int)
for v in values:
x += y > v
return Factor(x, name)
def std(self):
"Returns the standard deviation"
return self.x.std()
def sort_idx(self, descending=False):
"""Create an index that could be used to sort the Var.
Parameters
----------
descending : bool
Sort in descending instead of an ascending order.
"""
idx = np.argsort(self.x, kind='mergesort')
if descending:
idx = idx[::-1]
return idx
@property
def values(self):
return np.unique(self.x)
class _Effect(object):
# numeric ---
def __add__(self, other):
return Model(self) + other
def __mul__(self, other):
if isinstance(other, Model):
return Model((self, other, self % other))
return Model((self, other, self % other))
def __mod__(self, other):
if isinstance(other, Model):
return Model((self % e for e in other.effects))
return Interaction((self, other))
def count(self, value, start=-1):
"""Cumulative count of the occurrences of ``value``
Parameters
----------
value : str | tuple (value in .cells)
Cell value which is to be counted.
start : int
Value at which to start counting (with the default of -1, the first
occurrence will be 0).
Returns
-------
count : array of int, len = len(self)
Cumulative count of value in self.
Examples
--------
>>> a = Factor('abc', tile=3)
>>> a
Factor(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'])
>>> a.count('a')
array([0, 0, 0, 1, 1, 1, 2, 2, 2])
"""
count = np.cumsum(self == value) + start
return count
def enumerate_cells(self, name=None):
"""Enumerate the occurrence of each cell value throughout the data
Parameters
----------
name : None | str
Name for the returned Var.
Returns
-------
enum : Var
Result.
Examples
--------
>>> f = Factor('aabbccabc')
>>> f.enumerate_cells()
Var([0, 1, 0, 1, 0, 1, 2, 2, 2])
"""
counts = {cell: 0 for cell in self.cells}
enum = np.empty(len(self), int)
for i, value in enumerate(self):
enum[i] = counts[value]
counts[value] += 1
return Var(enum, name)
def index(self, cell):
"""``e.index(cell)`` returns an array of indices where e equals cell
Examples
--------
>>> f = Factor('abcabcabc')
>>> f
Factor(['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'])
>>> f.index('b')
array([1, 4, 7])
>>> f[f.index('b')] = 'new_b'
>>> f
Factor(['a', 'new_b', 'c', 'a', 'new_b', 'c', 'a', 'new_b', 'c'])
"""
return np.flatnonzero(self == cell)
def index_opt(self, cell):
"""Find an optimized index for a given cell.
Returns
-------
index : slice | array
If possible, a ``slice`` object is returned. Otherwise, an array
of indices (as with ``e.index(cell)``).
"""
index = np.flatnonzero(self == cell)
d_values = np.unique(np.diff(index))
if len(d_values) == 1:
start = index.min() or None
step = d_values[0]
stop = index.max() + 1
if stop > len(self) - step:
stop = None
if step == 1:
step = None
index = slice(start, stop, step)
return index
def sort_idx(self, descending=False, order=None):
"""Create an index that could be used to sort this data_object.
Parameters
----------
descending : bool
Sort in descending instead of the default ascending order.
order : None | sequence
Sequence of cells to define a custom order. Any cells that are not
present in ``order`` will be omitted in the sort_index, i.e. the
sort_index will be shorter than its source.
Returns
-------
sort_index : array of int
Array which can be used to sort a data_object in the desired order.
"""
idx = np.empty(len(self), dtype=np.uint32)
if order is None:
cells = self.cells
else:
cells = order
idx[:] = -1
for i, cell in enumerate(cells):
idx[self == cell] = i
sort_idx = np.argsort(idx, kind='mergesort')
if order is not None:
i_cut = -np.count_nonzero(idx == np.uint32(-1))
if i_cut:
sort_idx = sort_idx[:i_cut]
if descending:
sort_idx = sort_idx[::-1]
return sort_idx
class Factor(_Effect):
"""Container for categorial data.
Parameters
----------
x : iterator
Sequence of Factor values (see also the ``labels`` kwarg).
name : str
Name of the Factor.
random : bool
Treat Factor as random factor (for ANOVA; default is False).
repeat : int | array of int
repeat each element in ``x``, either a constant or a different number
for each element.
tile : int
Repeat ``x`` as a whole ``tile`` many times.
labels : dict | OrderedDict | tuple
An optional dictionary mapping values as they occur in ``x`` to the
Factor's cell labels. Since :class`dict`s are unordered, labels are
sorted alphabetically by default. In order to define cells in a
different order, use a :class:`collections.OrderedDict` object or
define labels as ``((key, value), ...)`` tuple.
Attributes
----------
.name : None | str
The Factor's name.
.cells : tuple of str
Sorted names of all cells.
.random : bool
Whether the Factor is defined as random factor (for ANOVA).
Examples
--------
The most obvious way to initialize a Factor is a list of strings::
>>> Factor(['in', 'in', 'in', 'out', 'out', 'out'])
Factor(['in', 'in', 'in', 'out', 'out', 'out'])
The same can be achieved with a list of integers plus a labels dict::
>>> Factor([1, 1, 1, 0, 0, 0], labels={1: 'in', 0: 'out'})
Factor(['in', 'in', 'in', 'out', 'out', 'out'])
Or more parsimoniously:
>>> Factor([1, 0], labels={1: 'in', 0: 'out'}, repeat=3)
Factor(['in', 'in', 'in', 'out', 'out', 'out'])
Since the Factor initialization simply iterates over the ``x``
argument, a Factor with one-character codes can also be initialized
with a single string::
>>> Factor('iiiooo')
Factor(['i', 'i', 'i', 'o', 'o', 'o'])
"""
_stype = "factor"
def __init__(self, x, name=None, random=False, repeat=1, tile=1, labels={}):
if not (np.any(repeat) or np.any(tile)):
self.__setstate__({'x': np.empty((0,), np.uint32), 'labels': {},
'name': name, 'random': random})
return
try:
n_cases = len(x)
except TypeError: # for generators:
x = tuple(x)
n_cases = len(x)
# find mapping and ordered values
if isinstance(labels, dict):
labels_dict = labels
values = labels.values()
if not isinstance(labels, OrderedDict):
values = natsorted(values)
else:
labels_dict = dict(labels)
values = [pair[1] for pair in labels]
# convert x to codes
highest_code = -1
codes = {} # {label -> code}
x_ = np.empty(n_cases, dtype=np.uint32)
for i, value in enumerate(x):
if value in labels_dict:
label = labels_dict[value]
elif isinstance(value, unicode):
label = value
else:
label = str(value)
if label in codes:
x_[i] = codes[label]
else: # new code
x_[i] = codes[label] = highest_code = highest_code + 1
if highest_code >= 2**32:
raise RuntimeError("Too many categories in this Factor")
# collect ordered_labels
ordered_labels = OrderedDict()
for label in values:
if label in codes:
ordered_labels[codes[label]] = label
for label in natsorted(codes):
if label not in values:
ordered_labels[codes[label]] = label
if not (isinstance(repeat, int) and repeat == 1):
x_ = x_.repeat(repeat)
if tile > 1:
x_ = np.tile(x_, tile)
self.__setstate__({'x': x_, 'ordered_labels': ordered_labels,
'name': name, 'random': random})
def __setstate__(self, state):
self.x = x = state['x']
self.name = state['name']
self.random = state['random']
if 'ordered_labels' in state:
# 0.13: ordered_labels replaced labels
self._labels = state['ordered_labels']
self._codes = {lbl: code for code, lbl in self._labels.iteritems()}
else:
labels = state['labels']
cells = natsorted(labels.values())
self._codes = codes = {lbl: code for code, lbl in labels.iteritems()}
self._labels = OrderedDict([(codes[label], label) for label in cells])
self._n_cases = len(x)
def __getstate__(self):
state = {'x': self.x,
'name': self.name,
'random': self.random,
'ordered_labels': self._labels}
return state
def __repr__(self, full=False):
use_labels = preferences['factor_repr_use_labels']
n_cases = preferences['factor_repr_n_cases']
if use_labels:
values = self.as_labels()
else:
values = self.x.tolist()
if full or len(self.x) <= n_cases:
x = repr(values)
else:
x = [repr(v) for v in values[:n_cases]]
x.append('<... N=%s>' % len(self.x))
x = '[' + ', '.join(x) + ']'
args = [x]
if self.name is not None:
args.append('name=%r' % self.name)
if self.random:
args.append('random=True')
if not use_labels:
args.append('labels=%s' % self._labels)
return 'Factor(%s)' % ', '.join(args)
def __str__(self):
return self.__repr__(True)
# container ---
def __len__(self):
return self._n_cases
def __getitem__(self, index):
"""
sub needs to be int or an array of bools of shape(self.x)
this method is valid for factors and nonbasic effects
"""
if isvar(index):
index = index.x
x = self.x[index]
if np.iterable(x):
return Factor(x, self.name, self.random, labels=self._labels)
else:
return self._labels[x]
def __setitem__(self, index, x):
# convert x to code
if isinstance(x, basestring):
code = self._get_code(x)
elif np.iterable(x):
code = np.empty(len(x), dtype=np.uint16)
for i, v in enumerate(x):
code[i] = self._get_code(v)
# assign
self.x[index] = code
# obliterate redundant labels
codes_in_use = set(np.unique(self.x))
rm = set(self._labels) - codes_in_use
for code in rm:
label = self._labels.pop(code)
del self._codes[label]
def _get_code(self, label):
"add the label if it does not exists and return its code"
try:
return self._codes[label]
except KeyError:
code = 0
while code in self._labels:
code += 1
if code >= 2**32:
raise ValueError("Too many categories in this Factor.")
self._labels[code] = label
self._codes[label] = code
return code
def __iter__(self):
return (self._labels[i] for i in self.x)
def __contains__(self, value):
try:
code = self._codes[value]
except KeyError:
return False
return code in self.x
# numeric ---
def __eq__(self, other):
return self.x == self._encode(other)
def __ne__(self, other):
return self.x != self._encode(other)
def _encode(self, x):
if isinstance(x, basestring):
return self._encode_1(x)
else:
return self._encode_seq(x)
def _encode_1(self, value):
return self._codes.get(value, -1)
def _encode_seq(self, values):
return np.array([self._codes.get(value, -1) for value in values])
def __call__(self, other):
"""
Create a nested effect. A factor A is nested in another factor B if
each level of A only occurs together with one level of B.
"""
return NestedEffect(self, other)
def _interpret_y(self, Y, create=False):
"""
Parameters
----------
Y : str | list of str
String(s) to be converted to code values.
Returns
-------
codes : int | list of int
List of values (codes) corresponding to the categories.
"""
if isinstance(Y, basestring):
if Y in self._codes:
return self._codes[Y]
elif create:
code = 0
while code in self._labels:
code += 1
if code >= 65535:
raise ValueError("Too many categories in this Factor.")
self._labels[code] = Y
self._codes[Y] = code
return code
else:
return 65535 # code for values not present in the Factor
elif np.iterable(Y):
out = np.empty(len(Y), dtype=np.uint16)
for i, y in enumerate(Y):
out[i] = self._interpret_y(y, create=create)
return out
elif Y in self._labels:
return Y
else:
raise ValueError("unknown cell: %r" % Y)
@property
def as_dummy(self): # x_dummy_coded
shape = (self._n_cases, self.df)
codes = np.empty(shape, dtype=np.int8)
for i, cell in enumerate(self.cells[:-1]):
codes[:, i] = (self == cell)
return codes
@property
def as_dummy_complete(self):
x = self.x[:, None]
categories = np.unique(x)
codes = np.hstack([x == cat for cat in categories])
return codes.astype(np.int8)
@property
def as_effects(self): # x_deviation_coded
shape = (self._n_cases, self.df)
codes = np.empty(shape, dtype=np.int8)
for i, cell in enumerate(self.cells[:-1]):
codes[:, i] = (self == cell)
contrast = (self == self.cells[-1])
codes -= contrast[:, None]
return codes
def _coefficient_names(self, method):
if method == 'dummy':
return ["%s:%s" % (self.name, cell) for cell in self.cells[:-1]]
contrast_cell = self.cells[-1]
return ["%s:%s-%s" % (self.name, cell, contrast_cell)
for cell in self.cells[:-1]]
def as_labels(self):
"Convert the Factor to a list of str"
return [self._labels[v] for v in self.x]
def as_var(self, labels, default=None, name=None):
"""Convert the Factor into a Var
Parameters
----------
labels : dict
A ``{factor_value: var_value}`` mapping.
default : None | scalar
Default value for factor values not mentioned in ``labels``. If not
specified, factor values missing from ``labels`` will raise a
``KeyError``.
name : None | True | str
Name of the output Var, ``True`` to keep the current name (default
``None``).
"""
if default is None:
x = [labels[v] for v in self]
else:
x = [labels.get(v, default) for v in self]
if name is True:
name = self.name
return Var(x, name)
@property
def beta_labels(self):
cells = self.cells
txt = '{0}=={1}'
return [txt.format(cells[i], cells[-1]) for i in range(len(cells) - 1)]
@property
def cells(self):
return tuple(self._labels.values())
def _cellsize(self):
"-1 if cell size is not equal"
codes = self._labels.keys()
buf = self.x == codes[0]
n = buf.sum()
for code in codes[1:]:
n_ = np.equal(self.x, code, buf).sum()
if n_ != n:
return -1
return n
def compress(self, X, name=None):
"Deprecated. Use .aggregate()."
warn("Factor.compress s deprecated; use Factor.aggregate instead"
"(with identical functionality).", DeprecationWarning)
self.aggregate(X, name)
def aggregate(self, X, name=True):
"""
Summarize the Factor by collapsing within cells in `X`.
Raises an error if there are cells that contain more than one value.
Parameters
----------
X : categorial
A categorial model defining cells to collapse.
name : None | True | str
Name of the output Factor, ``True`` to keep the current name
(default ``True``).
Returns
-------
f : Factor
A copy of self with only one value for each cell in X
"""
if len(X) != len(self):
err = "Length mismatch: %i (Var) != %i (X)" % (len(self), len(X))
raise ValueError(err)
x = []
for cell in X.cells:
idx = (X == cell)
if np.sum(idx):
x_i = np.unique(self.x[idx])
if len(x_i) > 1:
labels = tuple(self._labels[code] for code in x_i)
err = ("Can not determine aggregated value for Factor %r "
"in cell %r because the cell contains multiple "
"values %r. Set drop_bad=True in order to ignore "
"this inconsistency and drop the Factor."
% (self.name, cell, labels))
raise ValueError(err)
else:
x.append(x_i[0])
if name is True:
name = self.name
out = Factor(x, name, self.random, labels=self._labels)
return out
def copy(self, name=True, repeat=1, tile=1, rep=None):
"returns a deep copy of itself"
if rep is not None:
if repeat != 1:
raise TypeError("Specified rep and repeat")
repeat = rep
warn("The rep argument has been renamed to repeat", DeprecationWarning)
if name is True:
name = self.name
return Factor(self.x.copy(), name, self.random, repeat, tile, self._labels)
@property
def df(self):
return max(0, len(self._labels) - 1)
def endswith(self, substr):
"""Create an index that is true for all cases whose name ends with
``substr``
Parameters
----------
substr : str
String for selecting cells that end with substr.
Returns
-------
idx : boolean array, len = len(self)
Index that is true wherever the value ends with ``substr``.
Examples
--------
>>> a = Factor(['a1', 'a2', 'b1', 'b2'])
>>> a.endswith('1')
array([True, False, True, False], dtype=bool)
"""
values = [v for v in self.cells if v.endswith(substr)]
return self.isin(values)
def get_index_to_match(self, other):
"""
Assuming that ``other`` is a shuffled version of self, this method
returns ``index`` to transform from the order of self to the order of
``other``. To guarantee exact matching, each value can only occur once
in self.
Example::
>>> index = factor1.get_index_to_match(factor2)
>>> all(factor1[index] == factor2)
True
"""
assert self._labels == other._labels
index = []
for v in other.x:
where = np.where(self.x == v)[0]
if len(where) == 1:
index.append(where[0])
else:
msg = "%r contains several cases of %r" % (self, v)
raise ValueError(msg)
return np.array(index)
def isany(self, *values):
"""Find the index of entries matching one of the ``*values``
Returns
-------
index : array of bool
For each case True if the value is in values, else False.
Examples
--------
>>> a = Factor('aabbcc')
>>> b.isany('b', 'c')
array([False, False, True, True, True, True], dtype=bool)
"""
return self.isin(values)
def isin(self, values):
"""Find the index of entries matching one of the ``values``
Returns
-------
index : array of bool
For each case True if the value is in values, else False.
Examples
--------
>>> f = Factor('aabbcc')
>>> f.isin(('b', 'c'))
array([False, False, True, True, True, True], dtype=bool)
"""
return np.in1d(self.x, self._encode_seq(values))
def isnot(self, *values):
"""Find the index of entries not in ``values``
Returns
-------
index : array of bool
For each case False if the value is in values, else True.
"""
return self.isnotin(values)
def isnotin(self, values):
"""Find the index of entries not in ``values``
Returns
-------
index : array of bool
For each case False if the value is in values, else True.
"""
return np.in1d(self.x, self._encode_seq(values), invert=True)
def label_length(self, name=None):
"""Create Var with the length of each label string
Parameters
----------
name : str
Name of the output Var (default ``None``).
Examples
--------
>>> f = Factor(['a', 'ab', 'long_label'])
>>> f.label_length()
Var([1, 2, 10])
"""
label_lengths = {code: len(label) for code, label in self._labels.iteritems()}
x = np.empty(len(self), np.uint16)
for i, code in enumerate(self.x):
x[i] = label_lengths[code]
return Var(x, name)
@property
def n_cells(self):
return len(self._labels)
def relabel(self, labels):
"""Deprecated, use Factor.update_labels"""
warn("Factor.relabel() is deprecated, use Factor.update_labels()",
DeprecationWarning)
self.update_labels(labels)
def update_labels(self, labels):
"""Change one or more labels in place
Parameters
----------
labels : dict
Mapping from old labels to new labels. Existing labels that are not
in ``labels`` are kept.
Examples
--------
>>> f = Factor('aaabbbccc')
>>> f
Factor(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])
>>> f.update_labels({'a': 'v1', 'b': 'v2'})
>>> f
Factor(['v1', 'v1', 'v1', 'v2', 'v2', 'v2', 'c', 'c', 'c'])
In order to create a copy of the Factor with different labels just
use the labels argument when initializing a new Factor:
>>> Factor(f, labels={'c': 'v3'})
Factor(['v1', 'v1', 'v1', 'v2', 'v2', 'v2', 'v3', 'v3', 'v3'])
Notes
-----
If ``labels`` contains a key that is not a label of the Factor, a
``KeyError`` is raised.
"""
missing = tuple(old for old in labels if old not in self._codes)
if missing:
if len(missing) == 1:
msg = ("Factor does not contain label %r" % missing[0])
else:
msg = ("Factor does not contain labels %s"
% ', '.join(repr(m) for m in missing))
raise KeyError(msg)
# check for merged labels
new_labels = {c: labels.get(l, l) for c, l in self._labels.iteritems()}
codes_ = sorted(new_labels)
labels_ = tuple(new_labels[c] for c in codes_)
for i, label in enumerate(labels_):
if label in labels_[:i]:
old_code = codes_[i]
new_code = codes_[labels_.index(label)]
self.x[self.x == old_code] = new_code
del new_labels[old_code]
self._labels = new_labels
self._codes = {l: c for c, l in new_labels.iteritems()}
def startswith(self, substr):
"""Create an index that is true for all cases whose name starts with
``substr``
Parameters
----------
substr : str
String for selecting cells that start with substr.
Returns
-------
idx : boolean array, len = len(self)
Index that is true wherever the value starts with ``substr``.
Examples
--------
>>> a = Factor(['a1', 'a2', 'b1', 'b2'])
>>> a.startswith('b')
array([False, False, True, True], dtype=bool)
"""
values = [v for v in self.cells if v.startswith(substr)]
return self.isin(values)
def table_categories(self):
"returns a table containing information about categories"
table = fmtxt.Table('rll')
table.title(self.name)
for title in ['i', 'Label', 'n']:
table.cell(title)
table.midrule()
for code, label in self._labels.iteritems():
table.cell(code)
table.cell(label)
table.cell(np.sum(self.x == code))
return table
def repeat(self, repeats, name=True):
"""
Repeat each element ``repeats`` times
Parameters
----------
repeats : int | array of int
Number of repeats, either a constant or a different number for each
element.
name : None | True | str
Name of the output Var, ``True`` to keep the current name (default
``True``).
"""
if name is True:
name = self.name
return Factor(self.x, name, self.random, repeats, labels=self._labels)
class NDVar(object):
"""Container for n-dimensional data.
Parameters
----------
x : array_like
The data.
dims : tuple
The dimensions characterizing the axes of the data. If present,
``'case'`` should be provided as a :py:class:`str`, and should
always occupy the first position.
info : dict
A dictionary with data properties (can contain arbitrary
information that will be accessible in the info attribute).
name : None | str
Name for the NDVar.
Notes
-----
``x`` and ``dims`` are stored without copying. A shallow
copy of ``info`` is stored. Make sure the relevant objects
are not modified externally later.
Examples
--------
Importing 600 epochs of data for 80 time points:
>>> data.shape
(600, 80)
>>> time = UTS(-.2, .01, 80)
>>> dims = ('case', time)
>>> Y = NDVar(data, dims=dims)
"""
_stype = "ndvar"
def __init__(self, x, dims=('case',), info={}, name=None):
# check data shape
dims = tuple(dims)
ndim = len(dims)
x = np.asarray(x)
if ndim != x.ndim:
err = ("Unequal number of dimensions (data: %i, dims: %i)" %
(x.ndim, ndim))
raise DimensionMismatchError(err)
# check dimensions
d0 = dims[0]
if isinstance(d0, basestring):
if d0 == 'case':
has_case = True
else:
err = ("The only dimension that can be specified as a string"
"is 'case' (got %r)" % d0)
raise ValueError(err)
else:
has_case = False
for dim, n in zip(dims, x.shape)[has_case:]:
if isinstance(dim, basestring):
err = ("Invalid dimension: %r in %r. First dimension can be "
"'case', other dimensions need to be Dimension "
"subclasses." % (dim, dims))
raise TypeError(err)
n_dim = len(dim)
if n_dim != n:
err = ("Dimension %r length mismatch: %i in data, "
"%i in dimension %r" % (dim.name, n, n_dim, dim.name))
raise DimensionMismatchError(err)
state = {'x': x, 'dims': dims, 'info': dict(info),
'name': name}
self.__setstate__(state)
def __setstate__(self, state):
self.dims = dims = state['dims']
self.has_case = (dims[0] == 'case')
self._truedims = truedims = dims[self.has_case:]
# dimnames
self.dimnames = tuple(dim.name for dim in truedims)
if self.has_case:
self.dimnames = ('case',) + self.dimnames
self.x = x = state['x']
self.name = state['name']
if 'info' in state:
self.info = state['info']
else:
self.info = state['properties']
# derived
self.ndim = len(dims)
self.shape = x.shape
self._len = len(x)
self._dim_2_ax = dict(zip(self.dimnames, xrange(self.ndim)))
# attr
for dim in truedims:
if hasattr(self, dim.name):
err = ("invalid dimension name: %r (already present as NDVar"
" attr)" % dim.name)
raise ValueError(err)
else:
setattr(self, dim.name, dim)
def __getstate__(self):
state = {'dims': self.dims,
'x': self.x,
'name': self.name,
'info': self.info}
return state
@property
def __array_interface__(self):
return self.x.__array_interface__
# numeric ---
def __neg__(self):
x = -self.x
info = self.info.copy()
return NDVar(x, self.dims, info)
def __pos__(self):
return self
def __abs__(self):
return self.abs()
def __lt__(self, other):
y = self._ialign(other)
x = self.x < y
return NDVar(x, self.dims, self.info.copy())
def __le__(self, other):
y = self._ialign(other)
x = self.x <= y
return NDVar(x, self.dims, self.info.copy())
def __eq__(self, other):
y = self._ialign(other)
x = self.x == y
return NDVar(x, self.dims, self.info.copy())
def __ne__(self, other):
y = self._ialign(other)
x = self.x != y
return NDVar(x, self.dims, self.info.copy())
def __gt__(self, other):
y = self._ialign(other)
x = self.x > y
return NDVar(x, self.dims, self.info.copy())
def __ge__(self, other):
y = self._ialign(other)
x = self.x >= y
return NDVar(x, self.dims, self.info.copy())
def _align(self, other):
"""Align data from 2 NDVars.
Notes
-----
For unequal but overlapping dimensions, the intersection is used.
"""
if isvar(other):
return self.dims, self.x, self._ialign(other)
elif isndvar(other):
dimnames = list(self.dimnames)
i_add = 0
for dimname in other.dimnames:
if dimname not in dimnames:
dimnames.append(dimname)
i_add += 1
# find data axes
self_axes = self.dimnames
if i_add:
self_axes += (None,) * i_add
other_axes = tuple(name if name in other.dimnames else None
for name in dimnames)
# find dims
dims = []
crop = False
crop_self = []
crop_other = []
for name, other_name in izip(self_axes, other_axes):
if name is None:
dim = other.get_dim(other_name)
cs = co = full_slice
elif other_name is None:
dim = self.get_dim(name)
cs = co = full_slice
else:
self_dim = self.get_dim(name)
other_dim = other.get_dim(other_name)
if self_dim == other_dim:
dim = self_dim
cs = co = full_slice
else:
dim = self_dim.intersect(other_dim)
crop = True
cs = self_dim.dimindex(dim)
os = other_dim.dimindex(dim)
dims.append(dim)
crop_self.append(cs)
crop_other.append(co)
x_self = self.get_data(self_axes)
x_other = other.get_data(other_axes)
if crop:
x_self = x_self[tuple(crop_self)]
x_other = x_other[tuple(crop_other)]
return dims, x_self, x_other
else:
raise TypeError("Need Var or NDVar")
def _ialign(self, other):
"align for self-modifying operations (+=, ...)"
if np.isscalar(other):
return other
elif isvar(other):
assert self.has_case
n = len(other)
shape = (n,) + (1,) * (self.x.ndim - 1)
return other.x.reshape(shape)
elif isndvar(other):
assert all(dim in self.dimnames for dim in other.dimnames)
i_other = []
for dim in self.dimnames:
if dim in other.dimnames:
i_other.append(dim)
else:
i_other.append(None)
return other.get_data(i_other)
else:
raise TypeError
def __add__(self, other):
if isnumeric(other):
dims, x_self, x_other = self._align(other)
x = x_self + x_other
elif np.isscalar(other):
x = self.x + other
dims = self.dims
else:
raise ValueError("can't add %r" % other)
info = self.info.copy()
return NDVar(x, dims, info)
def __iadd__(self, other):
self.x += self._ialign(other)
return self
def __div__(self, other):
if isnumeric(other):
dims, x_self, x_other = self._align(other)
x = x_self / x_other
elif np.isscalar(other):
x = self.x / other
dims = self.dims
else:
raise ValueError("can't subtract %r" % other)
info = self.info.copy()
return NDVar(x, dims, info)
def __mul__(self, other):
if isnumeric(other):
dims, x_self, x_other = self._align(other)
x = x_self * x_other
elif np.isscalar(other):
x = self.x * other
dims = self.dims
else:
raise ValueError("can't subtract %r" % other)
info = self.info.copy()
return NDVar(x, dims, info)
def __sub__(self, other):
if isnumeric(other):
dims, x_self, x_other = self._align(other)
x = x_self - x_other
elif np.isscalar(other):
x = self.x - other
dims = self.dims
else:
raise ValueError("can't subtract %r" % other)
info = self.info.copy()
return NDVar(x, dims, info)
def __isub__(self, other):
self.x -= self._ialign(other)
return self
def __rsub__(self, other):
x = other - self.x
info = self.info.copy()
return NDVar(x, self.dims, info, self.name)
# container ---
def __getitem__(self, index):
'''Options for NDVar indexing:
- First element only: numpy-like case index (int, array).
- All elements: 1d boolean NDVar.
'''
if isinstance(index, tuple):
return self.sub(*index)
else:
return self.sub(index)
def __len__(self):
return self._len
def __repr__(self):
rep = '<NDVar %(name)r: %(dims)s>'
if self.has_case:
dims = [(self._len, 'case')]
else:
dims = []
dims.extend([(len(dim), dim.name) for dim in self._truedims])
dims = ' X '.join('%i (%s)' % fmt for fmt in dims)
args = dict(dims=dims, name=self.name or '')
return rep % args
def abs(self, name=None):
"""Compute the absolute values"""
x = np.abs(self.x)
dims = self.dims
info = self.info.copy()
return NDVar(x, dims, info, name)
def any(self, dims=None):
"""Compute presence of any value other than zero over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute whether there are any nonzero values at all.
An boolean NDVar with the same dimensions as the data can be used
to find nonzero values in specific elements (if the NDVar has cases
on a per case basis).
Returns
-------
any : NDVar | Var | float
Boolean data indicating presence of nonzero value over specified
dimensions. Returns a Var if only the case dimension remains, and a
float if the function collapses over all data.
"""
return self._aggregate_over_dims(dims, np.any)
def assert_dims(self, dims):
if self.dimnames != dims:
err = "Dimensions of %r do not match %r" % (self, dims)
raise DimensionMismatchError(err)
def compress(self, X, func=np.mean, name=None):
"Deprecated. Use .aggregate()."
warn("NDVar.compress s deprecated; use NDVar.aggregate instead"
"(with identical functionality).", DeprecationWarning)
self.aggregate(X, func, name)
def aggregate(self, X, func=np.mean, name=True):
"""
Summarize data in each cell of ``X``.
Parameters
----------
X : categorial
Categorial whose cells define which cases to aggregate.
func : function with axis argument
Function that is used to create a summary of the cases falling
into each cell of X. The function needs to accept the data as
first argument and ``axis`` as keyword-argument. Default is
``numpy.mean``.
name : None | True | str
Name of the output NDVar, ``True`` to keep the current name
(default ``True``).
Returns
-------
aggregated_ndvar : NDVar
Returns an
"""
if not self.has_case:
raise DimensionMismatchError("%r has no case dimension" % self)
if len(X) != len(self):
err = "Length mismatch: %i (Var) != %i (X)" % (len(self), len(X))
raise ValueError(err)
x = []
for cell in X.cells:
idx = (X == cell)
if np.sum(idx):
x_cell = self.x[idx]
x.append(func(x_cell, axis=0))
# update info for summary
info = self.info.copy()
if 'summary_info' in info:
info.update(info.pop('summary_info'))
if name is True:
name = self.name
x = np.array(x)
out = NDVar(x, self.dims, info, name)
return out
def _aggregate_over_dims(self, axis, func):
if axis is None:
return func(self.x)
elif isndvar(axis):
if axis.ndim == 1:
dim = axis.dims[0]
dim_axis = self.get_axis(dim.name)
if self.get_dim(dim.name) != dim:
msg = "Index dimension does not match data dimension"
raise DimensionMismatchError(msg)
index = (full_slice,) * dim_axis + (axis.x,)
x = func(self.x[index], dim_axis)
dims = (dim_ for dim_ in self.dims if not dim_ == dim)
else:
# if the index does not contain all dimensions, numpy indexing
# is weird
if self.ndim - self.has_case != axis.ndim - axis.has_case:
msg = ("If the index is not one dimensional, it needs to "
"have the same dimensions as the data.")
raise NotImplementedError(msg)
dims, self_x, index = self._align(axis)
if self.has_case:
if axis.has_case:
x = np.array([func(x_[i]) for x_, i in izip(self_x, index)])
else:
index = index[0]
x = np.array([func(x_[index]) for x_ in self_x])
return Var(x, self.name, info=self.info.copy())
elif axis.has_case:
msg = ("Index with case dimension can not be applied to "
"data without case dimension")
raise IndexError(msg)
else:
return func(self_x[index])
elif isinstance(axis, basestring):
axis = self._dim_2_ax[axis]
x = func(self.x, axis=axis)
dims = (self.dims[i] for i in xrange(self.ndim) if i != axis)
else:
axes = tuple(self._dim_2_ax[dim_name] for dim_name in axis)
x = func(self.x, axes)
dims = (self.dims[i] for i in xrange(self.ndim) if i not in axes)
dims = tuple(dims)
name = self.name
if len(dims) == 0:
return x
elif dims == ('case',):
return Var(x, name, info=self.info.copy())
else:
return NDVar(x, dims, self.info.copy(), name)
def bin(self, tstep, tstart=None, tstop=None, func=None):
"""Bin the data along the time axis
Parameters
----------
tstep : scalar
Time step between bins.
tstart : None | scalar
Earliest time point (default is from the beginning).
tstop : None | scalar
End of the data to use (default is to the end).
func : callable
Function to summarize data, needs axis argument (default is the
mean)
Returns
-------
binned_ndvar : NDVar
NDVar with data binned along the time axis (i.e., each time point
reflects one time bin).
"""
time = self.get_dim('time')
time_axis = self.get_axis('time')
# summary-func
if func is None:
meas = self.info.get('meas', '').lower()
if meas == 'p':
func = np.min
elif meas == 'f':
func = np.max
elif meas in ('t', 'r'):
func = extrema
else:
func = np.mean
# find time bin boundaries
if tstart is None:
tstart = time.tmin
if tstop is None:
tstop = time.tmax # -> avoid adding 1 sample bins
times = [tstart]
t = tstart + tstep
while t < tstop:
times.append(t)
t += tstep
times.append(min(t, time.tstop))
n_bins = len(times) - 1
out_shape = list(self.shape)
out_shape[time_axis] = n_bins
x = np.empty(out_shape)
bins = []
idx_prefix = (full_slice,) * time_axis
for i in xrange(n_bins):
t0 = times[i]
t1 = times[i + 1]
src_idx = idx_prefix + (time.dimindex((t0, t1)),)
dst_idx = idx_prefix + (i,)
x[dst_idx] = func(self.x[src_idx], axis=time_axis)
if t1 is None:
t1 = time.tmax + time.tstep
bins.append((t0, t1))
out_time = UTS(tstart + tstep / 2, tstep, n_bins)
dims = list(self.dims)
dims[time_axis] = out_time
info = self.info.copy()
info['bins'] = bins
return NDVar(x, dims, info)
def copy(self, name=True):
"""returns an NDVar with a deep copy of its data
Parameters
----------
name : None | True | str
Name of the output NDVar, ``True`` to keep the current name
(default ``True``).
Returns
-------
ndvar_copy : NDVar
An copy of the ndvar with a deep copy of the data.
Notes
-----
The info dictionary is still a shallow copy.
"""
x = self.x.copy()
info = self.info.copy()
if name is True:
name = self.name
return NDVar(x, self.dims, info, name)
def diminfo(self, str_out=False):
"""Information about the dimensions
Parameters
----------
str_out : bool
Return a string with the information (as opposed to the default
which is to print the information).
Returns
-------
info : None | str
If str_out is True, the dimension description as str.
"""
ns = []
dim_info = ["<NDVar %r" % self.name]
if self.has_case:
ns.append(len(self))
dim_info.append("cases")
for dim in self._truedims:
ns.append(len(dim))
dim_info.append(dim._diminfo())
dim_info[-1] += '>'
n_digits = int(max(ceil(log10(n)) for n in ns))
info = '\n '.join('{:{}d} {:s}'.format(n, n_digits, desc) for n, desc
in izip(ns, dim_info))
if str_out:
return info
else:
print info
def get_axis(self, name):
if self.has_dim(name):
i = self._dim_2_ax[name]
else:
msg = "%r has no dimension named %r" % (self, name)
raise DimensionMismatchError(msg)
return i
def get_data(self, dims):
"""Retrieve the NDVar's data with a specific axes order.
Parameters
----------
dims : str | sequence of str
Sequence of dimension names (or single dimension name). The array
that is returned will have axes in this order. To insert a new
axis with size 1 use ``numpy.newaxis``/``None``.
Notes
-----
A shallow copy of the data is returned. To retrieve the data with the
stored axes order use the .x attribute.
"""
if isinstance(dims, str):
dims = (dims,)
dims_ = tuple(d for d in dims if d is not np.newaxis)
if set(dims_) != set(self.dimnames) or len(dims_) != len(self.dimnames):
err = "Requested dimensions %r from %r" % (dims, self)
raise DimensionMismatchError(err)
# transpose
axes = tuple(self.dimnames.index(d) for d in dims_)
x = self.x.transpose(axes)
# insert axes
if len(dims) > len(dims_):
for ax, dim in enumerate(dims):
if dim is np.newaxis:
x = np.expand_dims(x, ax)
return x
def get_dim(self, name):
"Returns the Dimension object named ``name``"
i = self.get_axis(name)
dim = self.dims[i]
return dim
def get_dims(self, names):
"Returns a tuple with the requested Dimension objects"
return tuple(self.get_dim(name) for name in names)
def has_dim(self, name):
return name in self._dim_2_ax
def max(self, dims=None):
"""Compute the maximum over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the maximum over all dimensions.
An boolean NDVar with the same dimensions as the data can be used
to compute the maximum in specific elements (if the data has a case
dimension, the maximum is computed for each case).
Returns
-------
max : NDVar | Var | float
The maximum over specified dimensions. Returns a Var if only the
case dimension remains, and a float if the function collapses over
all data.
"""
return self._aggregate_over_dims(dims, np.max)
def mean(self, dims=None):
"""Compute the mean over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the mean over all dimensions.
A boolean NDVar with the same dimensions as the data can be used
to compute the mean in specific elements (if the data has a case
dimension, the mean is computed for each case).
Returns
-------
mean : NDVar | Var | float
The mean over specified dimensions. Returns a Var if only the case
dimension remains, and a float if the function collapses over all
data.
"""
return self._aggregate_over_dims(dims, np.mean)
def min(self, dims=None):
"""Compute the minimum over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the minimum over all dimensions.
An boolean NDVar with the same dimensions as the data can be used
to compute the minimum in specific elements (if the data has a case
dimension, the minimum is computed for each case).
Returns
-------
min : NDVar | Var | float
The minimum over specified dimensions. Returns a Var if only the
case dimension remains, and a float if the function collapses over
all data.
"""
return self._aggregate_over_dims(dims, np.min)
def ols(self, x, name=None):
"""Sample-wise ordinary least squares regressions
Parameters
----------
x : Model
Predictor or predictors. Can also be supplied as argument that can
be converted to a Model, for example ``Var`` or list of ``Var``.
name : str
Name for the output NDVar.
Returns
-------
beta : NDVar
Per sample beta weights. The case dimension reflects the predictor
variables in the same order as the Model's effects.
Notes
-----
The intercept is generated internally, and betas for the intercept are
not returned.
See Also
--------
.ols_t : T-values for regression coefficients
"""
from ._stats import stats
if not self.has_case:
msg = ("Can only apply regression to NDVar with case dimension")
raise DimensionMismatchError(msg)
x = asmodel(x)
if len(x) != len(self):
msg = ("Predictors do not have same number of cases (%i) as the "
"dependent variable (%i)" % (len(x), len(self)))
raise DimensionMismatchError(msg)
betas = stats.betas(self.x, x)[1:] # drop intercept
info = self.info.copy()
info.update(meas='beta', unit=None)
if 'summary_info' in info:
del info['summary_info']
return NDVar(betas, self.dims, info, name)
def ols_t(self, x, name=None):
"""T-values for sample-wise ordinary least squares regressions
Parameters
----------
x : Model
Predictor or predictors. Can also be supplied as argument that can
be converted to a Model, for example ``Var`` or list of ``Var``.
name : str
Name for the output NDVar.
Returns
-------
t : NDVar
Per sample t-values. The case dimension reflects the predictor
variables in the same order as the Model's effects.
Notes
-----
Betas for the intercept are not returned.
See Also
--------
.ols : Regression coefficients
"""
from ._stats import stats
if not self.has_case:
msg = ("Can only apply regression to NDVar with case dimension")
raise DimensionMismatchError(msg)
x = asmodel(x)
if len(x) != len(self):
msg = ("Predictors do not have same number of cases (%i) as the "
"dependent variable (%i)" % (len(x), len(self)))
raise DimensionMismatchError(msg)
t = stats.lm_t(self.x, x)[1:] # drop intercept
info = self.info.copy()
return NDVar(t, self.dims, info, name)
def repeat(self, repeats, dim='case', name=True):
"""
Analogous to :py:func:`numpy.repeat`
Parameters
----------
repeats : int | array of ints
The number of repetitions for each element. `repeats` is
broadcasted to fit the shape of the given dimension.
dim : str
The dimension along which to repeat values (default 'case').
name : None | True | str
Name of the output NDVar, ``True`` to keep the current name
(default ``True``).
"""
ax = self.get_axis(dim)
x = self.x.repeat(repeats, axis=ax)
repdim = self.dims[ax]
if not isinstance(repdim, str):
repdim = repdim.repeat(repeats)
dims = self.dims[:ax] + (repdim,) + self.dims[ax + 1:]
info = self.info.copy()
if name is True:
name = self.name
return NDVar(x, dims, info, name)
def residuals(self, x, name=None):
"""
Residuals of sample-wise ordinary least squares regressions
Parameters
----------
x : Model
Predictor or predictors. Can also be supplied as argument that can
be converted to a Model, for example ``Var`` or list of ``Var``.
name : str
Name for the output NDVar.
Returns
-------
residuals : NDVar
Residual for each case and sample (same dimensions as data).
"""
if not self.has_case:
msg = ("Can only apply regression to NDVar with case dimension")
raise DimensionMismatchError(msg)
x = asmodel(x)
if len(x) != len(self):
msg = ("Predictors do not have same number of cases (%i) as the "
"dependent variable (%i)" % (len(x), len(self)))
raise DimensionMismatchError(msg)
from ._stats import stats
res = stats.residuals(self.x, x)
info = self.info.copy()
return NDVar(res, self.dims, info, name)
def rms(self, axis=None):
"""Compute the root mean square over given dimensions
Parameters
----------
axis : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the standard deviation over all values.
An boolean NDVar with the same dimensions as the data can be used
to compute the RMS in specific elements (if the data has a case
dimension, the RMS is computed for each case).
Returns
-------
rms : NDVar | Var | float
The root mean square over specified dimensions. Returns a Var if
only the case dimension remains, and a float if the function
collapses over all data.
"""
from ._stats.stats import rms
return self._aggregate_over_dims(axis, rms)
def std(self, dims=None):
"""Compute the standard deviation over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the standard deviation over all values.
An boolean NDVar with the same dimensions as the data can be used
to compute the standard deviation in specific elements (if the data
has a case dimension, the standard deviation is computed for each
case).
Returns
-------
std : NDVar | Var | float
The standard deviation over specified dimensions. Returns a Var if
only the case dimension remains, and a float if the function
collapses over all data.
"""
return self._aggregate_over_dims(dims, np.std)
def summary(self, *dims, **regions):
r"""
Returns a new NDVar with specified dimensions collapsed.
.. warning::
Data is collapsed over the different dimensions in turn using the
provided function with an axis argument. For certain functions
this is not equivalent to collapsing over several axes concurrently
(e.g., np.var).
dimension:
A whole dimension is specified as string argument. This
dimension is collapsed over the whole range.
range:
A range within a dimension is specified through a keyword-argument.
Only the data in the specified range is included. Use like the
:py:meth:`.sub` method.
**additional kwargs:**
func : callable
Function used to collapse the data. Needs to accept an "axis"
kwarg (default: np.mean)
name : str
Name for the new NDVar.
Examples
--------
Assuming ``data`` is a normal time series. Get the average in a time
window::
>>> Y = data.summary(time=(.1, .2))
Get the peak in a time window::
>>> Y = data.summary(time=(.1, .2), func=np.max)
Assuming ``meg`` is an NDVar with dimensions time and sensor. Get the
average across sensors 5, 6, and 8 in a time window::
>>> roi = [5, 6, 8]
>>> Y = meg.summary(sensor=roi, time=(.1, .2))
Get the peak in the same data:
>>> roi = [5, 6, 8]
>>> peak = meg.summary(sensor=roi, time=(.1, .2), func=np.max)
Get the RMS over all sensors
>>> meg_rms = meg.summary('sensor', func=rms)
"""
if 'func' in regions:
func = regions.pop('func')
elif 'summary_func' in self.info:
func = self.info['summary_func']
else:
func = np.mean
name = regions.pop('name', None)
if len(dims) + len(regions) == 0:
dims = ('case',)
if regions:
dims = list(dims)
data = self.sub(**regions)
dims.extend(dim for dim in regions if data.has_dim(dim))
return data.summary(*dims, func=func, name=name)
else:
x = self.x
axes = [self._dim_2_ax[dim] for dim in dims]
dims = list(self.dims)
for axis in sorted(axes, reverse=True):
x = func(x, axis=axis)
dims.pop(axis)
# update info for summary
info = self.info.copy()
if 'summary_info' in info:
info.update(info.pop('summary_info'))
if len(dims) == 0:
return x
elif dims == ['case']:
return Var(x, name, info=info)
else:
return NDVar(x, dims, info, name)
def sub(self, *args, **kwargs):
"""Retrieve a slice through the NDVar.
Returns an NDVar object with a slice of the current NDVar's data.
The slice is specified using arguments and keyword arguments. Indexes
for dimensions can ether be specified in order, or with dimension names
as keywords, e.g.::
>>> Y.sub(time = 1)
returns a slice for time point 1 (second). If time is the first
dimension, this is equivalent::
>>> Y.sub(1)
For dimensions whose values change monotonically, a tuple can be used
to specify a window::
>>> Y.sub(time = (.2, .6))
returns a slice containing all values for times .2 seconds to .6
seconds.
The name of the new NDVar can be set with a ``name`` parameter. The
default is the name of the current NDVar.
"""
var_name = kwargs.pop('name', self.name)
info = self.info.copy()
dims = list(self.dims)
n_axes = len(dims)
index = [full_slice] * n_axes
index_args = [None] * n_axes
# sequence args
for i, arg in enumerate(args):
if isndvar(arg):
if arg.has_case:
raise ValueError("NDVar with case dimension can not serve"
"as NDVar index")
dimax = self.get_axis(arg.dims[0].name)
if index_args[dimax] is None:
index_args[dimax] = arg
else:
raise IndexError("Index for %s dimension specified twice."
% arg.dims[0].name)
else:
index_args[i] = arg
# sequence kwargs
for dimname, arg in kwargs.iteritems():
dimax = self.get_axis(dimname)
if index_args[dimax] is None:
index_args[dimax] = arg
else:
raise RuntimeError("Index for %s dimension specified twice." % dimname)
# process indexes
for dimax, idx in enumerate(index_args):
if idx is None:
continue
dim = self.dims[dimax]
# find index
if dimax >= self.has_case:
idx = dim.dimindex(idx)
else:
idx = dimindex_case(idx)
index[dimax] = idx
# find corresponding dim
if np.isscalar(idx):
dims[dimax] = None
elif dimax >= self.has_case:
dims[dimax] = dim[idx]
else:
dims[dimax] = dim
# adjust index dimension
if sum(isinstance(idx, np.ndarray) for idx in index) > 1:
ndim_increment = 0
for i in xrange(n_axes - 1, -1, -1):
idx = index[i]
if ndim_increment and isinstance(idx, (slice, np.ndarray)):
if isinstance(idx, slice):
idx = slice_to_arange(idx, len(dims[i]))
elif idx.dtype.kind == 'b':
idx = np.flatnonzero(idx)
index[i] = idx[(full_slice,) + (None,) * ndim_increment]
if isinstance(idx, np.ndarray):
ndim_increment += 1
# create NDVar
dims = tuple(dim for dim in dims if dim is not None)
if dims == ('case',):
return Var(self.x[tuple(index)], var_name, info=info)
elif dims:
return NDVar(self.x[tuple(index)], dims, info, var_name)
else:
return self.x[tuple(index)]
def subdata(self, **kwargs):
"Deprecated. Use .sub() method (with identical functionality)."
warn("NDVar.subdata is deprecated; use NDVar.sub instead "
"(with identical functionality).", DeprecationWarning)
return self.sub(**kwargs)
def sum(self, dims=None):
"""Compute the sum over given dimensions
Parameters
----------
dims : None | str | tuple of str | boolean NDVar
Dimensions over which to operate. A str is used to specify a single
dimension, a tuple of str to specify several dimensions, None to
compute the sum over all dimensions.
An boolean NDVar with the same dimensions as the data can be used
to compute the sum in specific elements (if the data has a case
dimension, the sum is computed for each case).
Returns
-------
sum : NDVar | Var | float
The sum over specified dimensions. Returns a Var if only the
case dimension remains, and a float if the function collapses over
all data.
"""
return self._aggregate_over_dims(dims, np.sum)
def extrema(x, axis=0):
"Extract the extreme values in x"
max = np.max(x, axis)
min = np.min(x, axis)
return np.where(np.abs(max) > np.abs(min), max, min)
class Datalist(list):
""":py:class:`list` subclass for including lists in in a Dataset.
Parameters
----------
items : sequence
Content for the Datalist.
name : str
Name of the Datalist.
fmt : 'repr' | 'str' | 'strlist'
How to format items when converting Datasets to tables (default 'repr'
uses the normal object representation).
Notes
-----
Modifications:
- adds certain methods that makes indexing behavior more similar to numpy
and other data objects
- blocks methods for in place modifications that would change the lists's
length
Examples
--------
Concise string representation:
>>> l = [['a', 'b'], [], ['a']]
>>> print Datalist(l)
[['a', 'b'], [], ['a']]
>>> print Datalist(l, fmt='strlist')
[[a, b], [], [a]]
"""
_stype = 'list'
_fmt = 'repr' # for backwards compatibility with old pickles
def __init__(self, items=None, name=None, fmt='repr'):
if fmt not in ('repr', 'str', 'strlist'):
raise ValueError("fmt=%s" % repr(fmt))
self.name = name
self._fmt = fmt
if items:
super(Datalist, self).__init__(items)
else:
super(Datalist, self).__init__()
def __repr__(self):
args = super(Datalist, self).__repr__()
if self.name is not None:
args += ', %s' % repr(self.name)
if self._fmt != 'repr':
args += ', fmt=%s' % repr(self._fmt)
return "Datalist(%s)" % args
def __str__(self):
return "[%s]" % ', '.join(self._item_repr(i) for i in self)
def _item_repr(self, item):
if self._fmt == 'str':
return str(item)
elif self._fmt == 'repr':
return repr(item)
elif self._fmt == 'strlist':
return "[%s]" % ', '.join(item)
else:
raise RuntimeError("Datalist._fmt=%s" % repr(self._fmt))
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self, index)
elif isinstance(index, slice):
return Datalist(list.__getitem__(self, index), fmt=self._fmt)
index = np.asarray(index)
if index.dtype.kind == 'b':
if len(index) != len(self):
raise ValueError("Boolean index needs to have same length as "
"Datalist")
return Datalist((self[i] for i in np.flatnonzero(index)), fmt=self._fmt)
elif index.dtype.kind == 'i':
return Datalist((self[i] for i in index), fmt=self._fmt)
else:
err = ("Unsupported type of index for Datalist: %r" % index)
raise TypeError(err)
def __getslice__(self, i, j):
return Datalist(list.__getslice__(self, i, j), fmt=self._fmt)
def __add__(self, other):
return Datalist(super(Datalist, self).__add__(other), fmt=self._fmt)
def compress(self, X, merge='mean'):
"Deprecated. Use .aggregate()."
warn("Var.compress s deprecated; use Var.aggregate instead"
"(with identical functionality).", DeprecationWarning)
self.aggregate(X, merge)
def aggregate(self, X, merge='mean'):
"""
Summarize cases for each cell in X
Parameters
----------
X : categorial
Cells which to aggregate.
merge : str
How to merge entries.
``'mean'``: sum elements and dividie by cell length
"""
if len(X) != len(self):
err = "Length mismatch: %i (Var) != %i (X)" % (len(self), len(X))
raise ValueError(err)
x = []
for cell in X.cells:
x_cell = self[X == cell]
n = len(x_cell)
if n == 1:
x.append(x_cell)
elif n > 1:
if merge == 'mean':
xc = reduce(lambda x, y: x + y, x_cell)
xc /= n
else:
raise ValueError("Invalid value for merge: %r" % merge)
x.append(xc)
return Datalist(x, fmt=self._fmt)
def __iadd__(self, other):
return self + other
def append(self, p_object):
raise TypeError("Datalist has fixed length to conform to Dataset")
def extend(self, iterable):
raise TypeError("Datalist has fixed length to conform to Dataset")
def insert(self, index, p_object):
raise TypeError("Datalist has fixed length to conform to Dataset")
def pop(self, index=None):
raise TypeError("Datalist has fixed length to conform to Dataset")
def remove(self, value):
raise TypeError("Datalist has fixed length to conform to Dataset")
legal_dataset_key_re = re.compile("[_A-Za-z][_a-zA-Z0-9]*$")
def assert_is_legal_dataset_key(key):
if iskeyword(key):
msg = ("%r is a reserved keyword and can not be used as variable name "
"in a Dataset" % key)
raise ValueError(msg)
elif not legal_dataset_key_re.match(key):
msg = ("%r is not a valid keyword and can not be used as variable name "
"in a Dataset" % key)
raise ValueError(msg)
def as_legal_dataset_key(key):
"Convert str to a legal dataset key"
if iskeyword(key):
return "%s_" % key
elif legal_dataset_key_re.match(key):
return key
else:
if ' ' in key:
key = key.replace(' ', '_')
for c in string.punctuation:
if c in key:
key = key.replace(c, '_')
if key == '':
key = '_'
elif key[0].isdigit():
key = "_%s" % key
if legal_dataset_key_re.match(key):
return key
else:
raise RuntimeError("Could not convert %r to legal dataset key")
class Dataset(OrderedDict):
"""
Stores multiple variables pertaining to a common set of measurement cases
Superclass: :class:`collections.OrderedDict`
Parameters
----------
items : iterator
Items contained in the Dataset. Items can be either named
data-objects or ``(name, data_object)`` tuples. The Dataset stores
the input items themselves, without making a copy().
name : str
Name for the Dataset.
caption : str
Caption for the table.
info : dict
Info dictionary, can contain arbitrary entries and can be accessed
as ``.info`` attribute after initialization. The Dataset makes a
shallow copy.
n_cases : int
Specify the number of cases in the Dataset if no items are added
upon initialization (by default the number is inferred when the
fist item is added).
Attributes
----------
n_cases : None | int
The number of cases in the Dataset (corresponding to the number of
rows in the table representation). None if no variables have been
added.
n_items : int
The number of items (variables) in the Dataset (corresponding to the
number of columns in the table representation).
Notes
-----
A Dataset represents a data table as a ``{variable_name: value_list}``
dictionary. Each variable corresponds to a column, and each index in the
value list corresponds to a row, or case.
The Dataset class inherits most of its behavior from its superclass
:py:class:`collections.OrderedDict`.
Dictionary keys are enforced to be :py:class:`str` objects and should
correspond to the variable names.
As for a dictionary, The Dataset's length (``len(ds)``) reflects the number
of variables in the Dataset (i.e., the number of rows).
**Accessing Data**
Standard indexing with :class:`str` is used to access the contained Var
and Factor objects:
- ``ds['var1']`` --> ``var1``.
- ``ds['var1',]`` --> ``Dataset([var1])``.
- ``ds['var1', 'var2']`` --> ``Dataset([var1, var2])``
When indexing numerically, the first index defines cases (rows):
- ``ds[1]`` --> row 1
- ``ds[1:5]`` or ``ds[1,2,3,4]`` --> rows 1 through 4
- ``ds[1, 5, 6, 9]`` or ``ds[[1, 5, 6, 9]]`` --> rows 1, 5, 6 and 9
The second index accesses columns, so case indexing can be combined with
column indexing:
- ``ds[:4, :2]`` --> first 4 rows of first 2 columns
Index a single case retrieves an individual case as ``{name: value}``
dictionaries:
- ``ds[1]`` --> ``{'var': 1, 'factor': 'value', ...}``
The :meth:`.itercases` method can be used to iterate over cases as
:class:`dict`.
**Naming**
While Var and Factor objects themselves need not be named, they need
to be named when added to a Dataset. This can be done by a) adding a
name when initializing the Dataset::
>>> ds = Dataset((('v1', var1), ('v2', var2)))
or b) by adding the Var or Factor with a key::
>>> ds['v3'] = var3
If a Var/Factor that is added to a Dataset does not have a name, the new
key is automatically assigned to the Var/Factor's ``.name`` attribute.
Examples
--------
Datasets can be initialize with data-objects, or with
('name', data-object) tuples::
>>> ds = Dataset((var1, var2))
>>> ds = Dataset((('v1', var1), ('v2', var2)))
Alternatively, variables can be added after initialization::
>>> ds = Dataset(n_cases=3)
>>> ds['var', :] = 0
>>> ds['factor', :] = 'a'
>>> print ds
var factor
-------------
0 a
0 a
0 a
"""
_stype = "dataset"
@staticmethod
def _args(items=(), name=None, caption=None, info={}, n_cases=None):
return items, name, caption, info, n_cases
def __init__(self, *args, **kwargs):
# backwards compatibility
if args:
fmt_1 = isdataobject(args[0])
fmt_2 = isinstance(args[0], tuple) and isinstance(args[0][0], str)
if fmt_1:
warn("Initializing Datasets with multiple data-objects is "
"deprecated. Provide a list of data-objects instead.",
DeprecationWarning)
if fmt_1 or fmt_2:
items, name, caption, info, n_cases = self._args(args, **kwargs)
else:
items, name, caption, info, n_cases = self._args(*args, **kwargs)
else:
items, name, caption, info, n_cases = self._args(**kwargs)
# collect initial items
args = []
for item in items:
if isdataobject(item):
if item.name:
args.append((item.name, item))
else:
err = ("items need to be named in a Dataset; use "
"Dataset(('name', item), ...), or ds = Dataset(); "
"ds['name'] = item")
raise ValueError(err)
else:
name, v = item
if not v.name:
v.name = name
args.append(item)
if n_cases is not None:
assert isinstance(n_cases, int)
self.n_cases = n_cases
super(Dataset, self).__init__(args)
# set state
self.name = name
self.info = info.copy()
self._caption = caption
def __setstate__(self, state):
# for backwards compatibility
self.name = state['name']
self.info = state['info']
self._caption = state.get('caption', None)
def __reduce__(self):
return self.__class__, (self.items(), self.name, self._caption,
self.info, self.n_cases)
def __getitem__(self, index):
"""
possible::
>>> ds[9] (int) -> dictionary for one case
>>> ds[9:12] (slice) -> subset with those cases
>>> ds[[9, 10, 11]] (list) -> subset with those cases
>>> ds['MEG1'] (strings) -> Var
>>> ds['MEG1', 'MEG2'] (list of strings) -> list of vars; can be nested!
"""
if isinstance(index, int):
return self.get_case(index)
elif isinstance(index, slice):
return self.sub(index)
if isinstance(index, basestring):
return super(Dataset, self).__getitem__(index)
if not np.iterable(index):
raise KeyError("Invalid index for Dataset: %r" % index)
if all(isinstance(item, basestring) for item in index):
return Dataset(((item, self[item]) for item in index))
if isinstance(index, tuple):
if len(index) != 2:
raise KeyError("Invalid index for Dataset: %s" % repr(index))
i0, i1 = index
if isinstance(i0, basestring):
return self[i1, i0]
elif isinstance(i1, basestring):
return self[i1][i0]
elif np.iterable(i0) and isinstance(i0[0], basestring):
return self[i1, i0]
elif np.iterable(i1) and all(isinstance(item, basestring) for item
in i1):
keys = i1
else:
keys = Datalist(self.keys())[i1]
if isinstance(keys, basestring):
return self[i1][i0]
subds = Dataset(((k, self[k][i0]) for k in keys))
return subds
return self.sub(index)
def __repr__(self):
class_name = self.__class__.__name__
if self.n_cases is None:
items = []
if self.name:
items.append('name=%r' % self.name)
if self.info:
info = repr(self.info)
if len(info) > 60:
info = '<...>'
items.append('info=%s' % info)
return '%s(%s)' % (class_name, ', '.join(items))
rep_tmp = "<%(class_name)s %(name)s%(N)s{%(items)s}>"
fmt = {'class_name': class_name}
fmt['name'] = '%r ' % self.name if self.name else ''
fmt['N'] = 'n_cases=%i ' % self.n_cases
items = []
for key in self:
v = self[key]
if isinstance(v, Var):
lbl = 'V'
elif isinstance(v, Factor):
lbl = 'F'
elif isinstance(v, NDVar):
lbl = 'Vnd'
else:
lbl = type(v).__name__
if getattr(v, 'name', key) == key:
item = '%r:%s' % (key, lbl)
else:
item = '%r:<%s %r>' % (key, lbl, v.name)
items.append(item)
fmt['items'] = ', '.join(items)
return rep_tmp % fmt
def __setitem__(self, index, item, overwrite=True):
if isinstance(index, basestring):
# test if name already exists
if (not overwrite) and (index in self):
raise KeyError("Dataset already contains variable of name %r" % index)
assert_is_legal_dataset_key(index)
# coerce item to data-object
if isdataobject(item) or isinstance(object, Datalist):
if not item.name:
item.name = index
elif isinstance(item, (list, tuple)):
item = Datalist(item, name=index)
else:
pass
# make sure the item has the right length
if isndvar(item) and not item.has_case:
N = 0
else:
N = len(item)
if self.n_cases is None:
self.n_cases = N
elif self.n_cases != N:
msg = ("Can not assign item to Dataset. The item`s length "
"(%i) is different from the number of cases in the "
"Dataset (%i)." % (N, self.n_cases))
raise ValueError(msg)
super(Dataset, self).__setitem__(index, item)
elif isinstance(index, tuple):
if len(index) != 2:
err = ("Dataset indexes can have only two components; direct "
"access to NDVars is not implemented")
raise NotImplementedError(err)
key, idx = index
if isinstance(idx, basestring):
key, idx = idx, key
elif not isinstance(key, basestring):
TypeError("Dataset indexes need variable specified as string")
if key in self:
self[key][idx] = item
elif isinstance(idx, slice):
if idx.start is None and idx.stop is None:
if isinstance(item, basestring):
self[key] = Factor([item], repeat=self.n_cases)
else:
self[key] = Var([item] * self.n_cases)
else:
err = ("Can only add Factor with general value for all "
"cases (ds['name',:] = ...")
raise NotImplementedError(err)
else:
raise NotImplementedError("Advanced Dataset indexing")
else:
raise NotImplementedError("Advanced Dataset indexing")
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if sum(isuv(i) or isdatalist(i) for i in self.values()) == 0:
return self.__repr__()
maxn = preferences['dataset_str_n_cases']
txt = unicode(self.as_table(maxn, '%.5g', midrule=True, lfmt=True))
if self.n_cases > maxn:
note = "... (use .as_table() method to see the whole Dataset)"
txt = os.linesep.join((txt, note))
return txt
def _check_n_cases(self, X, empty_ok=True):
"""Check that an input argument has the appropriate length.
Also raise an error if empty_ok is False and the Dataset is empty.
"""
if self.n_cases is None:
if empty_ok == True:
return
else:
err = ("Dataset is empty.")
raise RuntimeError(err)
n = len(X)
if self.n_cases != n:
name = getattr(X, 'name', "the argument")
err = ("The Dataset has a different length (%i) than %s "
"(%i)" % (self.n_cases, name, n))
raise ValueError(err)
def add(self, item, replace=False):
"""``ds.add(item)`` -> ``ds[item.name] = item``
unless the Dataset already contains a variable named item.name, in
which case a KeyError is raised. In order to replace existing
variables, set ``replace`` to True::
>>> ds.add(item, True)
"""
if not isdataobject(item):
raise ValueError("Not a valid data-object: %r" % item)
elif (item.name in self) and not replace:
raise KeyError("Dataset already contains variable named %r" % item.name)
else:
self[item.name] = item
def add_empty_var(self, name, dtype=np.float64):
"""Create an empty variable in the dataset
Parameters
----------
name : str
Name for the new variable.
dtype : numpy dtype
Data type of the new variable (default is float64).
Returns
-------
var : Var
The new variable.
"""
if self.n_cases is None:
err = "Can't add variable to a Dataset without length"
raise RuntimeError(err)
x = np.empty(self.n_cases, dtype=dtype)
v = Var(x)
self[name] = v
return v
def as_table(self, cases=0, fmt='%.6g', sfmt='%s', sort=False, header=True,
midrule=False, count=False, title=None, caption=None,
ifmt='%s', bfmt='%s', lfmt=False):
r"""
Create an fmtxt.Table containing all Vars and Factors in the Dataset.
Can be used for exporting in different formats such as csv.
Parameters
----------
cases : int | iterator of int
Cases to include (int includes that many cases from the beginning,
0 includes all; negative number works like negative indexing).
fmt : str
Format string for float variables (default ``'%.6g'``).
sfmt : str | None
Formatting for strings (None -> code; default ``'%s'``).
sort : bool
Sort the columns alphabetically.
header : bool
Include the varibale names as a header row.
midrule : bool
print a midrule after table header.
count : bool
Add an initial column containing the case number.
title : None | str
Title for the table.
caption : None | str
Caption for the table (default is the Dataset's caption).
ifmt : str
Formatting for integers (default ``'%s'``).
bfmt : str
Formatting for booleans (default ``'%s'``).
lfmt : bool
Include Datalists.
"""
if isinstance(cases, int):
if cases < 1:
cases = self.n_cases + cases
if cases < 0:
raise ValueError("Can't get table for fewer than 0 cases")
else:
cases = min(cases, self.n_cases)
cases = xrange(cases)
keys = [k for k, v in self.iteritems() if isuv(v) or (lfmt and isdatalist(v))]
if sort:
keys = sorted(keys)
if caption is None:
caption = self._caption
values = [self[key] for key in keys]
fmts = []
for v in values:
if isfactor(v):
fmts.append(sfmt)
elif isintvar(v):
fmts.append(ifmt)
elif isboolvar(v):
fmts.append(bfmt)
elif isdatalist(v):
fmts.append('dl')
else:
fmts.append(fmt)
columns = 'l' * (len(keys) + count)
table = fmtxt.Table(columns, True, title, caption)
if header:
if count:
table.cell('#')
for name in keys:
table.cell(name)
if midrule:
table.midrule()
for i in cases:
if count:
table.cell(i)
for v, fmt_ in izip(values, fmts):
if fmt_ is None:
table.cell(v.x[i])
elif fmt_ == 'dl':
table.cell(v._item_repr(v[i]))
elif fmt_.endswith(('r', 's')):
table.cell(fmt_ % v[i])
else:
table.cell(fmtxt.Number(v[i], fmt=fmt_))
return table
def _asfmtext(self):
return self.as_table()
def export(self, fn=None, fmt='%.10g', header=True, sort=False):
"""This method is deprecated. Use .save(), .save_pickled(),
.save_txt() or .save_tex() instead.
"""
msg = ("The Dataset.export() method is deprecated. Use .save(), "
".save_pickled(), .save_txt() or .save_tex() instead.")
warn(msg, DeprecationWarning)
if not isinstance(fn, basestring):
fn = ui.ask_saveas(filetypes=[("Tab-separated values", '*.txt'),
("Tex table", '*.tex'),
("Pickle", '*.pickled')])
if fn:
print 'saving %r' % fn
else:
return
ext = os.path.splitext(fn)[1][1:]
if ext == 'pickled':
with open(fn, 'wb') as fid:
pickle.dump(self, fid)
else:
table = self.as_table(fmt=fmt, header=header, sort=sort)
if ext in ['txt', 'tsv']:
table.save_tsv(fn, fmt=fmt)
elif ext == 'tex':
table.save_tex(fn)
else:
table.save_tsv(fn, fmt=fmt)
def eval(self, expression):
"""
Evaluate an expression involving items stored in the Dataset.
Parameters
----------
expression : str
Python expression to evaluate, with scipy constituting the global
namespace and the current Dataset constituting the local namespace.
Notes
-----
``ds.eval(expression)`` is equivalent to
``eval(expression, scipy, ds)``.
Examples
--------
In a Dataset containing factors 'A' and 'B'::
>>> ds.eval('A % B')
A % B
"""
if not isinstance(expression, basestring):
err = ("Eval needs expression of type unicode or str. Got "
"%s" % type(expression))
raise TypeError(err)
return eval(expression, vars(scipy), self)
@classmethod
def from_caselist(cls, names, cases):
"""Create a Dataset from a list of cases
Parameters
----------
names : sequence of str
Names for the variables.
cases : sequence
A sequence of cases, whereby each case is itself represented as a
sequence of values (str or scalar). Variable type (Factor or Var)
is inferred from whether values are str or not.
"""
ds = cls()
for i, name in enumerate(names):
values = [case[i] for case in cases]
if any(isinstance(v, basestring) for v in values):
ds[name] = Factor(values)
else:
ds[name] = Var(values)
return ds
@classmethod
def from_r(cls, name):
"""Create a Dataset from an R data frame through ``rpy2``
Parameters
----------
name : str
Name of the dataframe in R.
Examples
--------
Getting an example dataset from R:
>>> from rpy2.robjects import r
>>> r('data(sleep)')
>>> ds = Dataset.from_r('sleep')
>>> print ds
extra group ID
------------------
0.7 1 1
-1.6 1 2
-0.2 1 3
-1.2 1 4
-0.1 1 5
3.4 1 6
3.7 1 7
0.8 1 8
0 1 9
2 1 10
1.9 2 1
0.8 2 2
1.1 2 3
0.1 2 4
-0.1 2 5
4.4 2 6
5.5 2 7
1.6 2 8
4.6 2 9
3.4 2 10
"""
from rpy2 import robjects as ro
df = ro.r[name]
if not isinstance(df, ro.DataFrame):
raise ValueError("R object %r is not a DataFrame")
ds = cls(name=name)
for item_name, item in df.items():
if isinstance(item, ro.FactorVector):
x = np.array(item)
labels = {i:l for i, l in enumerate(item.levels, 1)}
ds[item_name] = Factor(x, labels=labels)
elif isinstance(item, (ro.FloatVector, ro.IntVector)):
x = np.array(item)
ds[item_name] = Var(x)
else:
raise NotImplementedError(str(type(item)))
return ds
def get_case(self, i):
"returns the i'th case as a dictionary"
return dict((k, v[i]) for k, v in self.iteritems())
def get_subsets_by(self, X, exclude=[], name='{name}[{cell}]'):
"""
splits the Dataset by the cells of a Factor and
returns as dictionary of subsets.
"""
if isinstance(X, basestring):
X = self[X]
out = {}
for cell in X.cells:
if cell not in exclude:
setname = name.format(name=self.name, cell=cell)
index = (X == cell)
out[cell] = self.sub(index, setname)
return out
def compress(self, X, drop_empty=True, name='{name}', count='n',
drop_bad=False, drop=()):
"Deprecated. Use .aggregate()."
warn("Dataset.compress s deprecated; use Dataset.aggregate instead"
"(with identical functionality).", DeprecationWarning)
return self.aggregate(X, drop_empty, name, count, drop_bad, drop)
def aggregate(self, x=None, drop_empty=True, name='{name}', count='n',
drop_bad=False, drop=(), equal_count=False, never_drop=()):
"""
Return a Dataset with one case for each cell in X.
Parameters
----------
x : None | str | categorial
Model defining cells to which to reduce cases. By default (``None``)
the Dataset is reduced to a single case.
drop_empty : bool
Drops empty cells in X from the Dataset. This is currently the only
option.
name : str
Name of the new Dataset.
count : None | str
Add a variable with this name to the new Dataset, containing the
number of cases in each cell in X.
drop_bad : bool
Drop bad items: silently drop any items for which compression
raises an error. This concerns primarily factors with non-unique
values for cells in X (if drop_bad is False, an error is raised
when such a Factor is encountered)
drop : sequence of str
Additional data-objects to drop.
equal_count : bool
Make sure the same number of rows go into each average. First, the
cell with the smallest number of rows is determined. Then, for each
cell, rows beyond that number are dropped.
never_drop : sequence of str
If the drop_bad=True setting would lead to dropping a variable
whose name is in never_drop, raise an error instead.
Notes
-----
Handle mne Epoch objects by creating a list with an mne Evoked object
for each cell.
"""
if not drop_empty:
raise NotImplementedError('drop_empty = False')
if x:
if equal_count:
self = self.equalize_counts(x)
x = ascategorial(x, ds=self)
else:
x = Factor('a' * self.n_cases)
ds = Dataset(name=name.format(name=self.name), info=self.info)
if count:
n_cases = filter(None, (np.sum(x == cell) for cell in x.cells))
ds[count] = Var(n_cases)
for k, v in self.iteritems():
if k in drop:
continue
try:
if hasattr(v, 'aggregate'):
ds[k] = v.aggregate(x)
else:
from mne import Epochs
if isinstance(v, Epochs):
evokeds = []
for cell in x.cells:
idx = (x == cell)
if idx.sum():
evokeds.append(v[idx].average())
ds[k] = evokeds
else:
err = ("Unsupported value type: %s" % type(v))
raise TypeError(err)
except:
if drop_bad and k not in never_drop:
pass
else:
raise
return ds
def copy(self, name=True):
"ds.copy() returns an shallow copy of ds"
if name is True:
name = self.name
return Dataset(self.items(), name, self._caption, self.info,
self.n_cases)
def equalize_counts(self, X):
"""Create a copy of the Dataset with equal counts in each cell of X
Parameters
----------
X : categorial
Model which defines the cells in which to equalize the counts.
Returns
-------
equalized_ds : Dataset
Dataset with equal number of cases in each cell of X.
Notes
-----
First, the cell with the smallest number of rows is determined (empty
cells are ignored). Then, for each cell, rows beyond that number are
dropped.
"""
X = ascategorial(X, ds=self)
self._check_n_cases(X, empty_ok=False)
indexes = np.array([X == cell for cell in X.cells])
n_by_cell = indexes.sum(1)
n = np.setdiff1d(n_by_cell, [0]).min()
for index in indexes:
np.logical_and(index, index.cumsum() <= n, index)
index = indexes.any(0)
return self[index]
def head(self, n=10):
"Table with the first n cases in the Dataset"
return self.as_table(n, '%.5g', midrule=True, lfmt=True)
def index(self, name='index', start=0):
"""
Add an index to the Dataset (i.e., `range(n_cases)`), e.g. for later
alignment.
Parameters
----------
name : str
Name of the new index variable.
start : int
Number at which to start the index.
"""
self[name] = Var(np.arange(start, self.n_cases + start))
def itercases(self, start=None, stop=None):
"iterate through cases (each case represented as a dict)"
if start is None:
start = 0
if stop is None:
stop = self.n_cases
elif stop < 0:
stop = self.n_cases - stop
for i in xrange(start, stop):
yield self.get_case(i)
@property
def n_items(self):
return super(Dataset, self).__len__()
def rename(self, old, new):
"""Shortcut to rename a data-object in the Dataset.
Parameters
----------
old : str
Current name of the data-object.
new : str
New name for the data-object.
"""
if old not in self:
raise KeyError("No item named %r" % old)
if new in self:
raise ValueError("Dataset already has variable named %r" % new)
assert_is_legal_dataset_key(new)
# update map
node = self._OrderedDict__map.pop(old)
node[2] = new
self._OrderedDict__map[new] = node
# update dict entry
obj = self[old]
dict.__delitem__(self, old)
dict.__setitem__(self, new, obj)
# update object name
if hasattr(obj, 'name'):
obj.name = new
self[new] = obj
def repeat(self, repeats, name='{name}'):
"""
Returns a new Dataset with each row repeated ``n`` times.
Parameters
----------
repeats : int | array of int
Number of repeats, either a constant or a different number for each
element.
name : str
Name for the new Dataset.
"""
if self.n_cases is None:
raise RuntimeError("Can't repeat Dataset with unspecified n_cases")
if isinstance(repeats, int):
n_cases = self.n_cases * repeats
else:
n_cases = sum(repeats)
return Dataset(((k, v.repeat(repeats)) for k, v in self.iteritems()),
name.format(name=self.name), self._caption, self.info,
n_cases)
@property
def shape(self):
return (self.n_cases, self.n_items)
def sort(self, order, descending=False):
"""Sort the Dataset in place.
Parameters
----------
order : str | data-object
Data object (Var, Factor or interactions) according to whose values
to sort the Dataset, or its name in the Dataset.
descending : bool
Sort in descending instead of an ascending order.
See Also
--------
.sort_idx : Create an index that could be used to sort the Dataset
.sorted : Create a sorted copy of the Dataset
"""
idx = self.sort_idx(order, descending)
for k in self:
self[k] = self[k][idx]
def sort_idx(self, order, descending=False):
"""Create an index that could be used to sort the Dataset.
Parameters
----------
order : str | data-object
Data object (Var, Factor or interactions) according to whose values
to sort the Dataset, or its name in the Dataset.
descending : bool
Sort in descending instead of an ascending order.
See Also
--------
.sort : sort the Dataset in place
.sorted : Create a sorted copy of the Dataset
"""
if isinstance(order, basestring):
order = self.eval(order)
if not len(order) == self.n_cases:
err = ("Order must be of same length as Dataset; got length "
"%i." % len(order))
raise ValueError(err)
idx = order.sort_idx(descending=descending)
return idx
def save(self):
"""Shortcut to save the Dataset, will display a system file dialog
Notes
-----
Use specific save methods for more options.
See Also
--------
.save_pickled : Pickle the Dataset
.save_txt : Save as text file
.save_tex : Save as teX table
.as_table : Create a table with more control over formatting
"""
title = "Save Dataset"
if self.name:
title += ' %s' % self.name
msg = ""
filetypes = [_pickled_ds_wildcard, _tsv_wildcard, _tex_wildcard]
path = ui.ask_saveas(title, msg, filetypes, defaultFile=self.name)
_, ext = os.path.splitext(path)
if ext == '.pickled':
self.save_pickled(path)
elif ext == '.txt':
self.save_txt(path)
elif ext == '.tex':
self.save_tex(path)
else:
err = ("Unrecognized extension: %r. Needs to be .pickled, .txt or "
".tex." % ext)
raise ValueError(err)
def save_rtf(self, path=None, fmt='%.3g'):
"""Save the Dataset as TeX table.
Parameters
----------
path : None | str
Target file name (if ``None`` is supplied, a save file dialog is
displayed). If no extension is specified, '.tex' is appended.
fmt : format string
Formatting for scalar values.
"""
table = self.as_table(fmt=fmt)
table.save_rtf(path)
def save_tex(self, path=None, fmt='%.3g', header=True, midrule=True):
"""Save the Dataset as TeX table.
Parameters
----------
path : None | str
Target file name (if ``None`` is supplied, a save file dialog is
displayed). If no extension is specified, '.tex' is appended.
fmt : format string
Formatting for scalar values.
header : bool
Include the varibale names as a header row.
midrule : bool
print a midrule after table header.
"""
if not isinstance(path, basestring):
title = "Save Dataset"
if self.name:
title += ' %s' % self.name
title += " as TeX Table"
msg = ""
path = ui.ask_saveas(title, msg, [_tex_wildcard],
defaultFile=self.name)
_, ext = os.path.splitext(path)
if not ext:
path += '.tex'
table = self.as_table(fmt=fmt, header=header, midrule=midrule)
table.save_tex(path)
def save_txt(self, path=None, fmt='%s', delim='\t', header=True):
"""Save the Dataset as text file.
Parameters
----------
path : None | str
Target file name (if ``None`` is supplied, a save file dialog is
displayed). If no extension is specified, '.txt' is appended.
fmt : format string
Formatting for scalar values.
delim : str
Column delimiter (default is tab).
header : bool
write the variables' names in the first line
"""
if not isinstance(path, basestring):
title = "Save Dataset"
if self.name:
title += ' %s' % self.name
title += " as Text"
msg = ""
path = ui.ask_saveas(title, msg, [_tsv_wildcard],
defaultFile=self.name)
_, ext = os.path.splitext(path)
if not ext:
path += '.txt'
table = self.as_table(fmt=fmt, header=header)
table.save_tsv(path, fmt=fmt, delimiter=delim)
def save_pickled(self, path=None):
"""Pickle the Dataset.
Parameters
----------
path : None | str
Target file name (if ``None`` is supplied, a save file dialog is
displayed). If no extension is specified, '.pickled' is appended.
"""
if not isinstance(path, basestring):
title = "Pickle Dataset"
if self.name:
title += ' %s' % self.name
msg = ""
path = ui.ask_saveas(title, msg, [_pickled_ds_wildcard],
defaultFile=self.name)
_, ext = os.path.splitext(path)
if not ext:
path += '.pickled'
with open(path, 'wb') as fid:
pickle.dump(self, fid, pickle.HIGHEST_PROTOCOL)
def sorted(self, order, descending=False):
"""Create an sorted copy of the Dataset.
Parameters
----------
order : str | data-object
Data object (Var, Factor or interactions) according to whose values
to sort the Dataset, or its name in the Dataset.
descending : bool
Sort in descending instead of an ascending order.
See Also
--------
.sort : sort the Dataset in place
.sort_idx : Create an index that could be used to sort the Dataset
"""
idx = self.sort_idx(order, descending)
ds = self[idx]
return ds
def sub(self, index, name='{name}'):
"""
Returns a Dataset containing only the cases selected by `index`.
Parameters
----------
index : int | array | str
Index for selecting a subset of cases. Can be an valid numpy index
or a string (the name of a variable in Dataset, or an expression
to be evaluated in the Dataset's namespace).
name : str
name for the new Dataset.
Notes
-----
Keep in mind that index is passed on to numpy objects, which means
that advanced indexing always returns a copy of the data, whereas
basic slicing (using slices) returns a view.
"""
if isinstance(index, int):
if index == -1:
index = slice(-1, None)
else:
index = slice(index, index + 1)
elif isinstance(index, str):
index = self.eval(index)
if isvar(index):
index = index.x
return Dataset(((k, v[index]) for k, v in self.iteritems()),
name.format(name=self.name), self._caption, self.info)
def subset(self, index, name='{name}'):
"Deprecated: use .sub() method with identical functionality."
warn("Dataset.subset is deprecated; use Dataset.sub instead"
"(with identical functionality).", DeprecationWarning)
return self.sub(index, name)
def tail(self, n=10):
"Table with the last n cases in the Dataset"
return self.as_table(xrange(-n, 0), '%.5g', midrule=True, lfmt=True)
def to_r(self, name=None):
"""Place the Dataset into R as dataframe using rpy2
Parameters
----------
name : str
Name for the R dataframe (default is self.name).
Examples
--------
>>> from rpy2.robjects import r
>>> ds = datasets.get_uv()
>>> print ds[:6]
A B rm intvar fltvar fltvar2 index
-----------------------------------------------------
a1 b1 s000 13 0.25614 0.7428 True
a1 b1 s001 8 -1.5174 -0.75498 True
a1 b1 s002 11 -0.5071 -0.13828 True
a1 b1 s003 11 2.1491 -2.1249 True
a1 b1 s004 15 -0.19358 -1.03 True
a1 b1 s005 17 2.141 -0.51745 True
>>> ds.to_r('df')
>>> print r("head(df)")
A B rm intvar fltvar fltvar2 index
1 a1 b1 s000 13 0.2561439 0.7427957 TRUE
2 a1 b1 s001 8 -1.5174371 -0.7549815 TRUE
3 a1 b1 s002 11 -0.5070960 -0.1382827 TRUE
4 a1 b1 s003 11 2.1490761 -2.1249203 TRUE
5 a1 b1 s004 15 -0.1935783 -1.0300188 TRUE
6 a1 b1 s005 17 2.1410424 -0.5174519 TRUE
"""
import rpy2.robjects as ro
if name is None:
name = self.name
if name is None:
raise TypeError('Need a valid name for the R data frame')
items = OrderedDict()
for k, v in self.iteritems():
if isvar(v):
if v.x.dtype.kind == 'b':
item = ro.BoolVector(v.x)
elif v.x.dtype.kind == 'i':
item = ro.IntVector(v.x)
else:
item = ro.FloatVector(v.x)
elif isfactor(v):
x = ro.IntVector(v.x)
codes = sorted(v._labels)
levels = ro.IntVector(codes)
labels = ro.StrVector(tuple(v._labels[c] for c in codes))
item = ro.FactorVector(x, levels, labels)
else:
continue
items[k] = item
df = ro.DataFrame(items)
ro.globalenv[name] = df
def update(self, ds, replace=False, info=True):
"""Update the Dataset with all variables in ``ds``.
Parameters
----------
ds : dict-like
A dictionary like object whose keys are strings and whose values
are data-objects.
replace : bool
If a variable in ds is already present, replace it. If False,
duplicates raise a ValueError (unless they are equivalent).
info : bool
Also update the info dictionary.
Notes
-----
By default, if a key is present in both Datasets, and the corresponding
variables are not equal on all cases, a ValueError is raised. If all
values are equal, the variable in ds is copied into the Dataset that is
being updated (the expected behavior of .update()).
"""
if not replace:
unequal = []
for key in set(self).intersection(ds):
if not np.all(self[key] == ds[key]):
unequal.append(key)
if unequal:
err = ("The following variables are present twice but are not "
"equal: %s" % unequal)
raise ValueError(err)
super(Dataset, self).update(ds)
if info:
self.info.update(ds.info)
class Interaction(_Effect):
"""Represents an Interaction effect.
Usually not initialized directly but through operations on Factors/Vars.
Parameters
----------
base : list
List of data-objects that form the basis of the interaction.
Attributes
----------
factors :
List of all factors (i.e. nonbasic effects are broken up into
factors).
base :
All effects.
"""
_stype = "interaction"
def __init__(self, base):
# FIXME: Interaction does not update when component factors update
self.base = EffectList()
self.is_categorial = True
self.nestedin = EffectList()
for b in base:
if isuv(b):
self.base.append(b.copy()),
if isvar(b):
if self.is_categorial:
self.is_categorial = False
else:
raise TypeError("No Interaction between two Var objects")
elif isinteraction(b):
if (not b.is_categorial) and (not self.is_categorial):
raise TypeError("No Interaction between two Var objects")
else:
self.base.extend(b.base)
self.is_categorial = (self.is_categorial and b.is_categorial)
elif b._stype == "nested": # TODO: nested effects
self.base.append(b)
if b.nestedin not in self.nestedin:
self.nestedin.append(b.nestedin)
else:
raise TypeError("Invalid type for Interaction: %r" % type(b))
if len(self.base) < 2:
raise ValueError("Interaction needs a base of at least two Factors "
"(got %s)" % repr(base))
self._n_cases = N = len(self.base[0])
if not all([len(f) == N for f in self.base[1:]]):
err = ("Interactions only between effects with the same number of "
"cases")
raise ValueError(err)
self.base_names = [str(f.name) for f in self.base]
self.name = ' x '.join(self.base_names)
self.random = False
self.df = reduce(operator.mul, [f.df for f in self.base])
# determine cells:
factors = EffectList(filter(isfactor, self.base))
self.cells = tuple(itertools.product(*(f.cells for f in factors)))
self.cell_header = tuple(f.name for f in factors)
self.beta_labels = ['?'] * self.df # TODO:
def __repr__(self):
names = [UNNAMED if f.name is None else f.name for f in self.base]
if preferences['short_repr']:
return ' % '.join(names)
else:
return "Interaction({n})".format(n=', '.join(names))
# container ---
def __len__(self):
return self._n_cases
def __getitem__(self, index):
if isvar(index):
index = index.x
out = tuple(f[index] for f in self.base)
if index_ndim(index) == 1:
return Interaction(out)
else:
return out
def __contains__(self, item):
if isinstance(item, tuple):
return item in self._value_set
return self.base.__contains__(item)
def __iter__(self):
for i in xrange(len(self)):
yield tuple(b[i] for b in self.base)
# numeric ---
def __eq__(self, other):
if isinteraction(other) and len(other.base) == len(self.base):
x = np.vstack((b == bo for b, bo in izip(self.base, other.base)))
return np.all(x, 0)
elif isinstance(other, tuple) and len(other) == len(self.base):
x = np.vstack(factor == level for factor, level in izip(self.base, other))
return np.all(x, 0)
else:
return np.zeros(len(self), bool)
def __ne__(self, other):
if isinteraction(other) and len(other.base) == len(self.base):
x = np.vstack((b != bo for b, bo in izip(self.base, other.base)))
return np.any(x, 0)
elif isinstance(other, tuple) and len(other) == len(self.base):
x = np.vstack(factor != level for factor, level in izip(self.base, other))
return np.any(x, 0)
return np.ones(len(self), bool)
def as_factor(self, delim=' ', name=None):
"""Convert the Interaction to a factor
Parameters
----------
delim : str
Delimiter to join factor cell values (default ``" "``).
name : str
Name for the Factor (default is None).
Examples
--------
>>> print ds[::20, 'A']
Factor(['a1', 'a1', 'a2', 'a2'], name='A')
>>> print ds[::20, 'B']
Factor(['b1', 'b2', 'b1', 'b2'], name='B')
>>> i = ds.eval("A % B")
>>> print i.as_factor()[::20]
Factor(['a1 b1', 'a1 b2', 'a2 b1', 'a2 b2'], name='AxB')
>>> print i.as_factor("_")[::20]
Factor(['a1_b1', 'a1_b2', 'a2_b1', 'a2_b2'], name='AxB')
"""
return Factor(self.as_labels(delim), name)
def as_cells(self):
"""All values as a list of tuples."""
return [case for case in self]
@LazyProperty
def as_dummy(self):
codelist = [f.as_dummy for f in self.base]
return reduce(_effect_interaction, codelist)
@LazyProperty
def as_effects(self):
"effect coding"
codelist = [f.as_effects for f in self.base]
return reduce(_effect_interaction, codelist)
def _coefficient_names(self, method):
return ["%s %i" % (self.name, i) for i in xrange(self.df)]
def as_labels(self, delim=' '):
"""All values as a list of strings.
Parameters
----------
delim : str
Delimiter with which to join the elements of cells.
"""
return [delim.join(filter(None, map(str, case))) for case in self]
def compress(self, X):
"Deprecated. Use .aggregate()."
warn("Interaction.compress s deprecated; use Interaction.aggregate "
"instead (with identical functionality).", DeprecationWarning)
self.aggregate(X)
def aggregate(self, X):
return Interaction(f.aggregate(X) for f in self.base)
def isin(self, cells):
"""An index that is true where the Interaction equals any of the cells.
Parameters
----------
cells : sequence of tuples
Cells for which the index will be true. Cells described as tuples
of strings.
"""
is_v = [self == cell for cell in cells]
return np.any(is_v, 0)
@LazyProperty
def _value_set(self):
return set(self)
class diff(object):
"""
helper to create difference values for correlation.
"""
def __init__(self, X, c1, c2, match, sub=None):
"""
X: Factor providing categories
c1: category 1
c2: category 2
match: Factor matching values between categories
"""
raise NotImplementedError
# FIXME: use celltable
sub = X.isany(c1, c2)
# ct = celltable
# ...
i1 = X.code_for_label(c1)
i2 = X.code_for_label(c2)
self.I1 = X == i1
self.I2 = X == i2
if sub is not None:
self.I1 = self.I1 * sub
self.I2 = self.I2 * sub
m1 = match.x[self.I1]
m2 = match.x[self.I2]
self.s1 = np.argsort(m1)
self.s2 = np.argsort(m2)
assert np.all(np.unique(m1) == np.unique(m2))
self.name = "{n}({x1}-{x2})".format(n='{0}', x1=X.cells[i1], x2=X.cells[i2])
def subtract(self, Y):
""
assert type(Y) is Var
# if self.sub is not None:
# Y = Y[self.sub]
Y1 = Y[self.I1]
Y2 = Y[self.I2]
y = Y1[self.s1] - Y2[self.s2]
name = self.name.format(Y.name)
# name = Y.name + '_DIFF'
return Var(y, name)
def extract(self, Y):
""
y1 = Y[self.I1].x[self.s1]
y2 = Y[self.I2].x[self.s2]
assert np.all(y1 == y2), Y.name
if type(Y) is Factor:
return Factor(y1, Y.name, random=Y.random, labels=Y.cells)
else:
return Var(y1, Y.name)
@property
def N(self):
return np.sum(self.I1)
def box_cox_transform(X, p, name=None):
"""
:returns: a variable with the Box-Cox transform applied to X. With p==0,
this is the log of X; otherwise (X**p - 1) / p
:arg Var X: Source variable
:arg float p: Parameter for Box-Cox transform
"""
if isvar(X):
X = X.x
if p == 0:
y = np.log(X)
else:
y = (X ** p - 1) / p
return Var(y, name)
class NestedEffect(object):
_stype = "nested"
def __init__(self, effect, nestedin):
if not iscategorial(nestedin):
raise TypeError("Effects can only be nested in categorial base")
self.effect = effect
self.nestedin = nestedin
self.random = effect.random
self.cells = effect.cells
self._n_cases = len(effect)
if isfactor(self.effect):
e_name = self.effect.name
else:
e_name = '(%s)' % self.effect
self.name = "%s(%s)" % (e_name, nestedin.name)
if len(nestedin) != self._n_cases:
err = ("Unequal lengths: effect %r len=%i, nestedin %r len=%i" %
(e_name, len(effect), nestedin.name, len(nestedin)))
raise ValueError(err)
def __repr__(self):
return self.name
def __iter__(self):
return self.effect.__iter__()
def __len__(self):
return self._n_cases
@property
def df(self):
return len(self.effect.cells) - len(self.nestedin.cells)
@property
def as_effects(self):
"create effect codes"
codes = np.zeros((self._n_cases, self.df))
ix = 0
for outer_cell in self.nestedin.cells:
outer_idx = (self.nestedin == outer_cell)
inner_model = self.effect[outer_idx]
n = len(inner_model.cells)
inner_codes = _effect_eye(n)
for i, cell in enumerate(inner_model.cells):
codes[self.effect == cell, ix:ix + n - 1] = inner_codes[i]
ix += n - 1
return codes
def _coefficient_names(self, method):
return ["%s %i" % (self.name, i) for i in xrange(self.df)]
class NonbasicEffect(object):
_stype = "nonbasic"
def __init__(self, effect_codes, factors, name, nestedin=[],
beta_labels=None):
if beta_labels is not None and len(beta_labels) != effect_codes.shape[1]:
raise ValueError("beta_labels need one entry per model column "
"(%s); got %s"
% (effect_codes.shape[1], repr(beta_labels)))
self.nestedin = nestedin
self.name = name
self.random = False
self.as_effects = effect_codes
self._n_cases, self.df = effect_codes.shape
self.factors = factors
self.beta_labels = beta_labels
def __repr__(self):
txt = "<NonbasicEffect: {n}>"
return txt.format(n=self.name)
# container ---
def __len__(self):
return self._n_cases
def _coefficient_names(self, method):
if self.beta_labels is None:
return ["%s %i" % (self.name, i) for i in xrange(self.df)]
else:
return self.beta_labels
class Model(object):
"""A list of effects.
Parameters
----------
x : effect | iterator of effects
Effects to be included in the model (Var, Factor, Interaction ,
...). Can also contain models, in which case all the model's
effects will be added.
Notes
-----
a Model's data is exhausted by its :attr:`.effects` list; all the rest are
@properties.
Accessing effects:
- as list in Model.effects
- with name as Model[name]
"""
_stype = "model"
def __init__(self, x):
effects = EffectList()
# find effects in input
if iseffect(x):
effects.append(x)
n_cases = len(x)
elif ismodel(x):
effects += x.effects
n_cases = len(x)
else:
n_cases = None
for e in x:
# check n_cases
if n_cases is None:
n_cases = len(e)
elif len(e) != n_cases:
e0 = effects[0]
err = ("All effects contained in a Model need to describe"
" the same number of cases. %r has %i cases, %r has"
" %i cases." % (e0.name, len(e0), e.name, len(e)))
raise ValueError(err)
# find effects
if iseffect(e):
effects.append(e)
elif ismodel(e):
effects += e.effects
else:
err = ("Model needs to be initialized with effect (Var, "
"Factor, Interaction, ...) and/or Model objects "
"(got %s)" % type(e))
raise TypeError(err)
self.effects = effects
self._n_cases = n_cases
# beta indices
self.beta_index = beta_index = {}
i = 1
for e in effects:
if isfactor(e) and len(e.cells) == 1:
raise ValueError("The Factor %s has only one level (%s). The "
"intercept is implicit in each model and "
"should not be specified explicitly."
% (e.name, e.cells[0]))
k = i + e.df
beta_index[e] = slice(i, k)
i = k
# dfs
self.df_total = df_total = n_cases
self.df = df = sum(e.df for e in effects) + 1 # intercept
self.df_error = df_error = df_total - df
if df_error < 0:
raise ValueError("Model overspecified")
# names
self.name = ' + '.join([str(e.name) for e in self.effects])
def __repr__(self):
names = self.effects.names()
if preferences['short_repr']:
return ' + '.join(names)
else:
x = ', '.join(names)
return "Model((%s))" % x
def __str__(self):
return str(self.get_table(cases=50))
# container ---
def __len__(self):
return self._n_cases
def __getitem__(self, sub):
if isinstance(sub, str):
for e in self.effects:
if e.name == sub:
return e
raise ValueError("No effect named %r" % sub)
else:
return Model((x[sub] for x in self.effects))
def __contains__(self, effect):
return id(effect) in map(id, self.effects)
def sorted(self):
"""
returns sorted Model, interactions last
"""
out = []
i = 1
while len(out) < len(self.effects):
for e in self.effects:
if len(e.factors) == i:
out.append(e)
i += 1
return Model(out)
# numeric ---
def __add__(self, other):
return Model((self, other))
def __mul__(self, other):
return Model((self, other, self % other))
def __mod__(self, other):
out = []
for e_self in self.effects:
for e_other in Model(other).effects:
out.append(e_self % e_other)
return Model(out)
def __eq__(self, other):
if not isinstance(other, Model):
return False
elif not len(self) == len(other):
return False
elif not len(self.effects) == len(other.effects):
return False
for e, eo in izip(self.effects, other.effects):
if not np.all(e == eo):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# repr ---
@property
def model_eq(self):
return self.name
def get_table(self, cases=None):
"""Return a table with the model codes
Parameters
----------
cases : int
Number of cases (lines) after which to truncate the table (default
is all cases).
Returns
--------
table : FMText Table
The full model as a table.
"""
full_model = self.full
if cases is None:
cases = len(full_model)
else:
cases = min(cases, len(full_model))
n_cols = full_model.shape[1]
table = fmtxt.Table('l' * n_cols)
table.cell("Intercept")
for e in self.effects:
table.cell(e.name, width=e.df)
# rules
i = 2
for e in self.effects:
j = i + e.df - 1
if e.df > 1:
table.midrule((i, j))
i = j + 1
# data
for line in full_model[:cases]:
for i in line:
table.cell(i)
if cases < len(full_model):
table.cell('...')
return table
# coding ---
@LazyProperty
def _effect_to_beta(self):
"""An array indicating for each effect which beta weights it occupies
Returns
-------
effects_to_beta : np.ndarray (n_effects, 2)
For each effect, indicating the first index in betas and df
"""
out = np.empty((len(self.effects), 2), np.int16)
beta_start = 1
for i, e in enumerate(self.effects):
out[i, 0] = beta_start
out[i, 1] = e.df
beta_start += e.df
return out
@LazyProperty
def as_effects(self):
return np.hstack((e.as_effects for e in self.effects))
def fit(self, Y):
"""
Find the beta weights by fitting the model to data
Parameters
----------
Y : Var | array, shape = (n_cases,)
Data to fit the model to.
Returns
-------
beta : array, shape = (n_regressors, )
The beta weights.
"""
Y = asvar(Y)
beta = dot(self.xsinv, Y.x)
return beta
@LazyProperty
def full(self):
"returns the full model including an intercept"
out = np.empty((self._n_cases, self.df))
# intercept
out[:, 0] = 1
self.full_index = {'I': slice(0, 1)}
# effects
i = 1
for e in self.effects:
j = i + e.df
out[:, i:j] = e.as_effects
self.full_index[e] = slice(i, j)
i = j
return out
# checking model properties
def check(self, v=True):
"shortcut to check linear independence and orthogonality"
return self.lin_indep(v) + self.orthogonal(v)
def lin_indep(self, v=True):
"Checks the Model for linear independence of its factors"
msg = []
ne = len(self.effects)
codes = [e.as_effects for e in self.effects]
for i in range(ne):
for j in range(i + 1, ne):
e1 = self.effects[i]
e2 = self.effects[j]
X = np.hstack((codes[i], codes[j]))
if rank(X) < X.shape[1]:
if v:
errtxt = "Linear Dependence Warning: {0} and {1}"
msg.append(errtxt.format(e1.name, e2.name))
return msg
def orthogonal(self, v=True):
"Checks the Model for orthogonality of its factors"
msg = []
ne = len(self.effects)
codes = [e.as_effects for e in self.effects]
# allok = True
for i in range(ne):
for j in range(i + 1, ne):
ok = True
e1 = self.effects[i]
e2 = self.effects[j]
e1e = codes[i]
e2e = codes[j]
for i1 in range(e1.df):
for i2 in range(e2.df):
dotp = np.dot(e1e[:, i1], e2e[:, i2])
if dotp != 0:
ok = False
# allok = False
if v and (not ok):
errtxt = "Not orthogonal: {0} and {1}"
msg.append(errtxt.format(e1.name, e2.name))
return msg
def _parametrize(self, method='effect'):
"Create a design matrix"
return Parametrization(self, method)
def repeat(self, n):
"Analogous to numpy repeat method"
effects = [e.repeat(n) for e in self.effects]
return Model(effects)
@LazyProperty
def xsinv(self):
x = self.full
x_t = x.T
return dot(inv(dot(x_t, x)), x_t)
class Parametrization(object):
"""Parametrization of a statistical model
Parameters
----------
model : Model
Model to be parametrized.
Attributes
----------
x : array (n_cases, n_coeffs)
Design matrix.
Notes
-----
A :class:`Model` is a list of effects. A :class:`Parametrization` contains
a realization of those effects in a model matrix with named columns.
"""
def __init__(self, model, method):
model = asmodel(model)
x = np.empty((model._n_cases, model.df))
x[:, 0] = 1
column_names = ['intercept']
higher_level_effects = {}
terms = {'intercept': slice(0, 1)}
i = 1
for e in model.effects:
j = i + e.df
if method == 'effect':
x[:, i:j] = e.as_effects
elif method == 'dummy':
x[:, i:j] = e.as_dummy
else:
raise ValueError("method=%s" % repr(method))
name = longname(e)
if name in terms:
raise KeyError("Duplicate term name: %s" % repr(name))
terms[name] = slice(i, j)
col_names = e._coefficient_names(method)
column_names.extend(col_names)
for col, col_name in enumerate(col_names, i):
terms[col_name] = slice(col, col + 1)
i = j
# find comparison models
higher_level_effects[name] = [e_ for e_ in model.effects
if e_ is not e
and is_higher_order_effect(e_, e)]
# model basics
self.model = model
self.x = x
self.terms = terms
self.column_names = column_names
self._higher_level_effects = higher_level_effects
# projector
x_t = x.T
self.projector = inv(x_t.dot(x)).dot(x_t)
def reduced_model_index(self, term):
"Boolean index into model columns for model comparison"
out = np.ones(self.x.shape[1], bool)
out[self.terms[term]] = False
for e in self._higher_level_effects[term]:
out[self.terms[e.name]] = False
return out
# ---NDVar dimensions---
DIMINDEX_RAW_TYPES = (int, slice, list)
def dimindex_case(arg):
if isinstance(arg, DIMINDEX_RAW_TYPES):
return arg
elif isvar(arg):
return arg.x
elif isinstance(arg, np.ndarray) and arg.dtype.kind in 'bi':
return arg
else:
raise TypeError("Unknown index type for case dimension: %s"
% repr(arg))
def find_time_point(times, time, rnd='closest'):
"""
Returns (index, time) for the closest point to ``time`` in ``times``
Parameters
----------
times : array, 1d
Monotonically increasing time values.
time : scalar
Time point for which to find a match.
rnd : 'down' | 'closest' | 'up'
Rounding: how to handle time values that do not have an exact match in
times. Round 'up', 'down', or to the 'closest' value.
"""
if time in times:
i = np.where(times == time)[0][0]
else:
gr = (times > time)
if np.all(gr):
if times[1] - times[0] > times[0] - time:
return 0, times[0]
else:
name = repr(times.name) if hasattr(times, 'name') else ''
raise ValueError("time=%s lies outside array %r" % (time, name))
elif np.any(gr):
pass
elif times[-1] - times[-2] >= time - times[-1]:
return len(times) - 1, times[-1]
else:
name = repr(times.name) if hasattr(times, 'name') else ''
raise ValueError("time=%s lies outside array %r" % (time, name))
i_next = np.where(gr)[0][0]
t_next = times[i_next]
if rnd == 'up':
return i_next, t_next
sm = times < time
i_prev = np.where(sm)[0][-1]
t_prev = times[i_prev]
if rnd == 'down':
return i_prev, t_prev
elif rnd != 'closest':
raise ValueError("Invalid argument rnd=%r" % rnd)
if (t_next - time) < (time - t_prev):
i = i_next
time = t_next
else:
i = i_prev
time = t_prev
return i, time
def _subgraph_edges(connectivity, int_index):
"Extract connectivity for a subset of a graph"
if connectivity is None:
return None
idx = np.logical_and(np.in1d(connectivity[:, 0], int_index),
np.in1d(connectivity[:, 1], int_index))
if np.any(idx):
new_c = connectivity[idx]
# remap to new vertex indices
if np.any(np.diff(int_index) < 1): # non-monotonic index
argsort = np.argsort(int_index)
flat_conn_ = np.digitize(new_c.ravel(), int_index[argsort], True)
flat_conn = argsort[flat_conn_]
else:
flat_conn = np.digitize(new_c.ravel(), int_index, True)
return flat_conn.reshape(new_c.shape).astype(np.uint32)
else:
return np.empty((0, 2), dtype=np.uint32)
class Dimension(object):
"""Base class for dimensions.
Attributes
----------
x : array_like
Numerical values (e.g. for locating categories on an axis).
values : sequence
Meaningful point descriptions (e.g. time points, sensor names, ...).
"""
name = 'Dimension'
adjacent = True
def __getstate__(self):
raise NotImplementedError
def __setstate__(self, state):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __eq__(self, other):
if isinstance(other, basestring):
return False
return self.name == other.name
def __ne__(self, other):
return not self == other
def __getitem__(self, index):
"""
- int -> label or value for that location
- [int] -> Dimension object with 1 location
- [int, ...] -> Dimension object
"""
raise NotImplementedError
def _diminfo(self):
"Return a str describing the dimension in on line (79 chars)"
return str(self.name)
def dimindex(self, arg):
"""Process index parameter
Notes
-----
Boolean and int arrays are always considered indexing self.values.
"""
if isndvar(arg):
return self._dimindex_for_ndvar(arg)
elif isvar(arg):
return arg.x
elif isinstance(arg, np.ndarray) and arg.dtype.kind in 'bi':
return arg
elif isinstance(arg, (slice, int)):
return arg
elif isinstance(arg, SEQUENCE_TYPES):
if len(arg) == 0:
return np.empty(0, np.int8)
return np.array([self.dimindex(a) for a in arg])
else:
raise TypeError("Unknown index type for %s dimension: %s"
% (self.name, repr(arg)))
def _dimindex_for_ndvar(self, arg):
"Dimindex for NDVar index"
if arg.x.dtype.kind != 'b':
raise IndexError("Only NDVars with boolean data can serve "
"as indexes. Got %s." % repr(arg))
elif arg.ndim != 1:
raise IndexError("Only NDVars with ndim 1 can serve as "
"indexes. Got %s." % repr(arg))
elif arg.dims[0] != self:
raise IndexError("Index dimension %s is different from data "
"dimension" % arg.dims[0].name)
else:
return arg.x
def intersect(self, dim, check_dims=True):
"""Create a Dimension that is the intersection with dim
Parameters
----------
dim : Dimension
Dimension to intersect with.
check_dims : bool
Check dimensions for consistency (not applicaple).
Returns
-------
intersection : Dimension
The intersection with dim (returns itself if dim and self are
equal)
"""
raise NotImplementedError
def _cluster_properties(self, x):
"""Find cluster properties for this dimension
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
Returns
-------
cluster_properties : None | Dataset
A dataset with variables describing cluster properties.
"""
return None
class Categorial(Dimension):
"""Simple categorial dimension
Parameters
----------
name : str
Dimension name.
values : list of str
Names of the entries.
"""
def __init__(self, name, values):
if len(set(values)) < len(values):
raise ValueError("Dimension can not have duplicate values")
values = np.asarray(values)
if values.dtype.kind not in 'SU':
raise ValueError("All Categorial values must be strings")
self.name = name
self.values = values
def __getstate__(self):
state = {'name': self.name,
'values': self.values}
return state
def __setstate__(self, state):
name = state['name']
values = state['values']
self.__init__(name, values)
def __repr__(self):
args = (repr(self.name), str(self.values))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
def __len__(self):
return len(self.values)
def __eq__(self, other):
is_equal = (Dimension.__eq__(self, other)
and np.all(self.values == other.values))
return is_equal
def __getitem__(self, index):
if isinstance(index, int):
return self.values[index]
values = self.values[index]
return Categorial(self.name, values)
def dimindex(self, arg):
if isinstance(arg, self.__class__):
s_idx, a_idx = np.nonzero(self.values[:, None] == arg.values)
return s_idx[np.argsort(a_idx)]
elif isinstance(arg, basestring):
return np.flatnonzero(self.values == arg)[0]
else:
return super(Categorial, self).dimindex(arg)
def _diminfo(self):
return "%s" % self.name.capitalize()
def intersect(self, dim, check_dims=False):
"""Create a dimension object that is the intersection with dim
Parameters
----------
dim : type(self)
Dimension to intersect with.
check_dims : bool
Check dimensions for consistency (not applicaple to this subclass).
Returns
-------
intersection : type(self)
The intersection with dim (returns itself if dim and self are
equal)
"""
if self.name != dim.name:
raise DimensionMismatchError("Dimensions don't match")
if np.array_equal(self.values, dim.values):
return self
values = np.intersect1d(self.values, dim.values)
if np.array_equal(self.values, values):
return self
elif np.array_equal(dim.values, values):
return dim
return self.__class__(self.name, values)
class Scalar(Dimension):
"Simple scalar dimension"
def __init__(self, name, values, unit=None):
self.x = self.values = values = np.asarray(values)
if len(np.unique(values)) < len(values):
raise ValueError("Dimension can not have duplicate values")
self.name = name
self.unit = unit
def __getstate__(self):
state = {'name': self.name,
'values': self.values,
'unit': self.unit}
return state
def __setstate__(self, state):
name = state['name']
values = state['values']
unit = state.get('unit', None)
self.__init__(name, values, unit)
def __repr__(self):
args = [repr(self.name), str(self.values)]
if self.unit is not None:
args.append(repr(self.unit))
return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
def __len__(self):
return len(self.values)
def __eq__(self, other):
is_equal = (Dimension.__eq__(self, other)
and np.array_equal(self.values, other.values))
return is_equal
def __getitem__(self, index):
if isinstance(index, int):
return self.values[index]
values = self.values[index]
return Scalar(self.name, values, self.unit)
def dimindex(self, arg):
if isinstance(arg, self.__class__):
s_idx, a_idx = np.nonzero(self.values[:, None] == arg.values)
return s_idx[np.argsort(a_idx)]
elif np.isscalar(arg):
return np.argmin(np.abs(self.values - arg))
else:
return super(Scalar, self).dimindex(arg)
def _diminfo(self):
return "%s" % self.name.capitalize()
def intersect(self, dim, check_dims=False):
"""Create a dimension object that is the intersection with dim
Parameters
----------
dim : type(self)
Dimension to intersect with.
check_dims : bool
Check dimensions for consistency (not applicaple to this subclass).
Returns
-------
intersection : type(self)
The intersection with dim (returns itself if dim and self are
equal)
"""
if self.name != dim.name:
raise DimensionMismatchError("Dimensions don't match")
if np.all(self.values == dim.values):
return self
values = np.intersect1d(self.values, dim.values)
if np.all(self.values == values):
return self
elif np.all(dim.values == values):
return dim
return self.__class__(self.name, values)
class Ordered(Scalar):
"""Scalar with guarantee that values are ordered"""
def __init__(self, name, values, unit=None):
values = np.sort(values)
Scalar.__init__(self, name, values, unit=unit)
def dimindex(self, arg):
if isinstance(arg, tuple):
if len(arg) != 2:
raise ValueError("Tuple indexes for the %s dimension signify "
"intervals and need to be exactly of length "
"2 (got %s)" % (self.name, repr(arg)))
start, stop = arg
return np.logical_and(self.values >= start, self.values < stop)
else:
return super(Ordered, self).dimindex(arg)
def _diminfo(self):
name = self.name.capitalize(),
vmin = self.x.min()
vmax = self.x.max()
return "%s [%s, %s]" % (name, vmin, vmax)
def _cluster_properties(self, x):
"""Find cluster properties for this dimension
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
Returns
-------
cluster_properties : None | Dataset
A dataset with variables describing cluster properties.
"""
ds = Dataset()
where = [np.flatnonzero(cluster) for cluster in x]
ds['%s_min' % self.name] = Var([self.values[w[0]] for w in where])
ds['%s_max' % self.name] = Var([self.values[w[-1]] for w in where])
return ds
class Sensor(Dimension):
"""Dimension class for representing sensor information
Parameters
----------
locs : array-like
list of (x, y, z) coordinates;
``x``: anterior - posterior,
``y``: left - right,
``z``: top - bottom
names : list of str | None
sensor names, same order as locs (optional)
groups : None | dict
Named sensor groups.
sysname : None | str
Name of the sensor system (only used for information purposes).
proj2d:
default 2d projection. For options, see the class documentation.
connectivity : array (n_edges, 2)
Sensor connectivity (optional).
Attributes
----------
channel_idx : dict
Dictionary mapping channel names to indexes.
locs : array, shape = (n_sensors, 3)
Spatial position of all sensors.
names : list of str
Ordered list of sensor names.
x, y, z : array, len = n_sensors
X, y and z positions of the sensors.
Notes
-----
The following are possible 2d-projections:
``None``:
Just use horizontal coordinates
``'z root'``:
the radius of each sensor is set to equal the root of the vertical
distance from the top of the net.
``'cone'``:
derive x/y coordinate from height based on a cone transformation
``'lower cone'``:
only use cone for sensors with z < 0
Examples
--------
>>> sensors = [(0, 0, 0),
(0, -.25, -.45)]
>>> sensor_dim = Sensor(sensors, names=["Cz", "Pz"])
"""
name = 'sensor'
adjacent = False
_proj_aliases = {'left': 'x-', 'right': 'x+', 'back': 'y-', 'front': 'y+',
'top': 'z+', 'bottom': 'z-'}
def __init__(self, locs, names=None, groups=None, sysname=None,
proj2d='z root', connectivity=None):
self.sysname = sysname
self.default_proj2d = self._interpret_proj(proj2d)
self._connectivity = connectivity
# 'z root' transformation fails with 32-bit floats
self.locs = locs = np.asarray(locs, dtype=np.float64)
self.x = locs[:, 0]
self.y = locs[:, 1]
self.z = locs[:, 2]
self.n = len(locs)
if names is None:
self.names_dist = names = [str(i) for i in xrange(self.n)]
self.names = Datalist(names)
self.channel_idx = {name: i for i, name in enumerate(self.names)}
pf = os.path.commonprefix(self.names)
if pf:
n_pf = len(pf)
short_names = {name[n_pf:]: i for i, name in enumerate(self.names)}
self.channel_idx.update(short_names)
# cache for transformed locations
self._transformed = {}
# groups
self.groups = groups
def __getstate__(self):
state = {'proj2d': self.default_proj2d,
'groups': self.groups,
'locs': self.locs,
'names': self.names,
'sysname': self.sysname,
'connectivity': self._connectivity}
return state
def __setstate__(self, state):
locs = state['locs']
names = state['names']
groups = state['groups']
sysname = state['sysname']
proj2d = state['proj2d']
connectivity = state.get('connectivity', None)
self.__init__(locs, names, groups, sysname, proj2d, connectivity)
def __repr__(self):
return "<Sensor n=%i, name=%r>" % (self.n, self.sysname)
def __len__(self):
return self.n
def __eq__(self, other):
"Based on having same sensor names"
is_equal = (Dimension.__eq__(self, other)
and len(self) == len(other)
and all(n == no for n, no in zip(self.names, other.names)))
return is_equal
def __getitem__(self, index):
index = self.dimindex(index)
if np.isscalar(index):
return self.names[index]
else:
int_index = np.arange(len(self))[index]
if len(int_index) == 0:
return None
locs = self.locs[index]
names = self.names[index]
# TODO: groups
return Sensor(locs, names, None, self.sysname, self.default_proj2d,
_subgraph_edges(self._connectivity, int_index))
def _cluster_properties(self, x):
"""Find cluster properties for this dimension
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
Returns
-------
cluster_properties : None | Dataset
A dataset with variables describing cluster properties.
"""
return Dataset(('n_sensors', Var(x.sum(1))))
def dimindex(self, arg):
"Convert dimension indexes into numpy indexes"
if isinstance(arg, basestring):
return self.channel_idx[arg]
elif isinstance(arg, Sensor):
return np.array([self.names.index(name) for name in arg.names])
else:
return super(Sensor, self).dimindex(arg)
def connectivity(self):
"""Retrieve the sensor connectivity
Returns
-------
connetivity : array of int, (n_pairs, 2)
array of sorted [src, dst] pairs, with all src < dts.
See Also
--------
.set_connectivity() : define the connectivity
.neighbors() : Neighboring sensors for each sensor in a dictionary.
"""
if self._connectivity is None:
raise RuntimeError("Sensor connectivity is not defined. Use "
"Sensor.set_connectivity().")
else:
return self._connectivity
@classmethod
def from_xyz(cls, path=None, **kwargs):
"""Create a Sensor instance from a text file with xyz coordinates
"""
locs = []
names = []
with open(path) as f:
l1 = f.readline()
n = int(l1.split()[0])
for line in f:
elements = line.split()
if len(elements) == 4:
x, y, z, name = elements
x = float(x)
y = float(y)
z = float(z)
locs.append((x, y, z))
names.append(name)
assert len(names) == n
return cls(locs, names, **kwargs)
@classmethod
def from_sfp(cls, path=None, **kwargs):
"""Create a Sensor instance from an sfp file
"""
locs = []
names = []
for line in open(path):
elements = line.split()
if len(elements) == 4:
name, x, y, z = elements
x = float(x)
y = float(y)
z = float(z)
locs.append((x, y, z))
names.append(name)
return cls(locs, names, **kwargs)
@classmethod
def from_lout(cls, path=None, transform_2d=None, **kwargs):
"""Create a Sensor instance from a *.lout file
"""
kwargs['transform_2d'] = transform_2d
locs = []
names = []
with open(path) as fileobj:
fileobj.readline()
for line in fileobj:
w, x, y, t, f, name = line.split('\t')
x = float(x)
y = float(y)
locs.append((x, y, 0))
names.append(name)
return cls(locs, names, **kwargs)
def _interpret_proj(self, proj):
if proj == 'default':
return self.default_proj2d
elif proj in self._proj_aliases:
return self._proj_aliases[proj]
elif proj is None:
return 'z+'
else:
return proj
def get_locs_2d(self, proj='default', extent=1, frame=0, invisible=True):
"""
returns a sensor X location array, the first column reflecting the x,
and the second column containing the y coordinate of each sensor.
Parameters
----------
proj : str
How to transform 3d coordinates into a 2d map; see class
documentation for options.
extent : int
coordinates will be scaled with minimum value 0 and maximum value
defined by the value of ``extent``.
frame : scalar
Distance of the outermost points from 0 and ``extent`` (default 0).
invisible : bool
Return invisible sensors (sensors that would be hidden behind the
head; default True).
"""
proj = self._interpret_proj(proj)
index = (proj, extent, frame)
if index in self._transformed:
locs2d = self._transformed[index]
else:
locs2d = self._make_locs_2d(proj, extent, frame)
self._transformed[index] = locs2d
if not invisible:
visible = self._visible_sensors(proj)
if visible is not None:
return locs2d[visible]
return locs2d
@LazyProperty
def _sphere_fit(self):
"""Fit the 3d sensor locations to a sphere
Returns
-------
params : tuple
Radius and center (r, cx, cy, cz).
"""
locs = self.locs
# error function
def err(params):
# params: [r, cx, cy, cz]
out = np.sum((locs - params[1:]) ** 2, 1)
out -= params[0] ** 2
return out
# initial guess of sphere parameters (radius and center)
center_0 = np.mean(locs, 0)
r_0 = np.mean(np.sqrt(np.sum((locs - center_0) ** 2, axis=1)))
start_params = np.hstack((r_0, center_0))
# do fit
estimate, _ = leastsq(err, start_params)
return tuple(estimate)
def _make_locs_2d(self, proj, extent, frame):
if proj in ('cone', 'lower cone', 'z root'):
r, cx, cy, cz = self._sphere_fit
# center the sensor locations based on the sphere and scale to
# radius 1
sphere_center = np.array((cx, cy, cz))
locs3d = self.locs - sphere_center
locs3d /= r
# implement projection
locs2d = np.copy(locs3d[:, :2])
if proj == 'cone':
locs2d[:, [0, 1]] *= (1 - locs3d[:, [2]])
elif proj == 'lower cone':
lower_half = locs3d[:, 2] < 0
if any(lower_half):
locs2d[lower_half] *= (1 - locs3d[lower_half][:, [2]])
elif proj == 'z root':
z = locs3d[:, 2]
z_dist = (z.max() + 0.01) - z # distance form top, buffer so that top points don't stick together
r = np.sqrt(z_dist) # desired 2d radius
r_xy = np.sqrt(np.sum(locs3d[:, :2] ** 2, 1)) # current radius in xy
idx = (r_xy != 0) # avoid zero division
F = r[idx] / r_xy[idx] # stretching Factor accounting for current r
locs2d[idx, :] *= F[:, None]
else:
match = re.match('([xyz])([+-])', proj)
if match:
ax, sign = match.groups()
if ax == 'x':
locs2d = np.copy(self.locs[:, 1:])
if sign == '-':
locs2d[:, 0] *= -1
elif ax == 'y':
locs2d = np.copy(self.locs[:, [0, 2]])
if sign == '+':
locs2d[:, 0] *= -1
elif ax == 'z':
locs2d = np.copy(self.locs[:, :2])
if sign == '-':
locs2d[:, 1] *= -1
else:
raise ValueError("invalid proj kwarg: %r" % proj)
# correct extent
if extent:
locs2d -= np.min(locs2d, axis=0) # move to bottom left
locs2d /= (np.max(locs2d) / extent) # scale to extent
locs2d += (extent - np.max(locs2d, axis=0)) / 2 # center
if frame:
locs2d *= (1 - 2 * frame)
locs2d += frame
return locs2d
def _topomap_outlines(self, proj):
"outline argument for mne-python topomaps"
proj = self._interpret_proj(proj)
if proj in ('cone', 'lower cone', 'z root', 'z+'):
return 'top'
else:
return None
def _visible_sensors(self, proj):
"Create an index for sensors that are visible under a given proj"
proj = self._interpret_proj(proj)
match = re.match('([xyz])([+-])', proj)
if match:
# logger.debug("Computing sensors visibility for %s" % proj)
ax, sign = match.groups()
# depth: + = closer
depth = self.locs[:, 'xyz'.index(ax)]
if sign == '-':
depth = -depth
locs2d = self.get_locs_2d(proj)
n_vertices = len(locs2d)
all_vertices = np.arange(n_vertices)
out = np.ones(n_vertices, bool)
# find duplicate points
# TODO OPT: use pairwise distance
x, y = np.where(cdist(locs2d, locs2d) == 0)
duplicate_vertices = ((v1, v2) for v1, v2 in izip(x, y) if v1 < v2)
for v1, v2 in duplicate_vertices:
if depth[v1] > depth[v2]:
out[v2] = False
# logger.debug("%s is hidden behind %s" % (self.names[v2], self.names[v1]))
else:
out[v1] = False
# logger.debug("%s is hidden behind %s" % (self.names[v1], self.names[v2]))
use_vertices = all_vertices[out] # use for hull check
hull = ConvexHull(locs2d[use_vertices])
hull_vertices = use_vertices[hull.vertices]
# for each point:
# find the closest point on the hull
# determine whether it's in front or behind
non_hull_vertices = np.setdiff1d(use_vertices, hull_vertices, True)
hull_locs = locs2d[hull_vertices]
non_hull_locs = locs2d[non_hull_vertices]
dists = cdist(non_hull_locs, hull_locs)
closest = np.argmin(dists, 1)
hide_non_hull_vertices = depth[non_hull_vertices] < depth[hull_vertices][closest]
hide_vertices = non_hull_vertices[hide_non_hull_vertices]
# logger.debug("%s are hidden behind convex hull" % ' '.join(self.names[hide_vertices]))
out[hide_vertices] = False
return out
else:
return None
def get_ROIs(self, base):
"""
returns list if list of sensors, grouped according to closest
spatial proximity to elements of base (=list of sensor ids)"
"""
locs3d = self.locs
# print loc3d
base_locs = locs3d[base]
ROI_dic = dict((i, [Id]) for i, Id in enumerate(base))
for i, loc in enumerate(locs3d):
if i not in base:
dist = np.sqrt(np.sum((base_locs - loc) ** 2, 1))
min_i = np.argmin(dist)
ROI_dic[min_i].append(i)
out = ROI_dic.values()
return out
def get_subnet_ROIs(self, ROIs, loc='first'):
"""
returns new Sensor instance, combining groups of sensors in the old
instance into single sensors in the new instance. All sensors for
each element in ROIs are the basis for one new sensor.
! Only implemented for numeric indexes, not for boolean indexes !
**parameters:**
ROIs : list of lists of sensor ids
each ROI defines one sensor in the new net
loc : str
'first': use the location of the first sensor of each ROI (default);
'mean': use the mean location
"""
names = []
locs = np.empty((len(ROIs, 3)))
for i, ROI in enumerate(ROIs):
i = ROI[0]
names.append(self.names[i])
if loc == 'first':
ROI_loc = self.locs[i]
elif loc == 'mean':
ROI_loc = self.locs[ROI].mean(0)
else:
raise ValueError("invalid value for loc (%s)" % loc)
locs[i] = ROI_loc
return Sensor(locs, names, sysname=self.sysname)
def index(self, exclude=None, names=False):
"""Construct an index for specified sensors
Parameters
----------
exclude : None | list of str, int
Sensors to exclude (by name or index).
Returns
-------
index : numpy index
Numpy index indexing good channels.
"""
if exclude is None:
return full_slice
index = np.ones(len(self), dtype=bool)
for idx in exclude:
if isinstance(idx, str):
idx = self.channel_idx[idx]
else:
idx = int(idx)
index[idx] = False
if names:
index = self.names[index]
return index
def _normalize_sensor_names(self, names):
"Process a user-input list of sensor names"
valid_chs = set()
missing_chs = set()
for name in names:
if isinstance(name, int):
name = '%03i' % name
if name.isdigit():
if name in self.names:
valid_chs.add(name)
continue
else:
name = 'MEG %s' % name
if name in self.names:
valid_chs.add(name)
else:
missing_chs.add(name)
if missing_chs:
msg = ("The following channels are not in the raw data: "
"%s" % ', '.join(sorted(missing_chs)))
raise ValueError(msg)
return sorted(valid_chs)
def intersect(self, dim, check_dims=True):
"""Create a Sensor dimension that is the intersection with dim
Parameters
----------
dim : Sensor
Sensor dimension to intersect with.
check_dims : bool
Check dimensions for consistency (e.g., channel locations). Default
is ``True``. Set to ``False`` to intersect channels based on names
only and ignore mismatch between locations for channels with the
same name.
Returns
-------
sensor : Sensor
The intersection with dim (returns itself if dim and self are
equal)
"""
if self.name != dim.name:
raise DimensionMismatchError("Dimensions don't match")
n_self = len(self)
names = set(self.names)
names.intersection_update(dim.names)
n_intersection = len(names)
if n_intersection == n_self:
return self
elif n_intersection == len(dim.names):
return dim
names = sorted(names)
idx = map(self.names.index, names)
locs = self.locs[idx]
if check_dims:
idxd = map(dim.names.index, names)
if not np.all(locs == dim.locs[idxd]):
err = "Sensor locations don't match between dimension objects"
raise ValueError(err)
new = Sensor(locs, names, sysname=self.sysname,
proj2d=self.default_proj2d)
return new
def neighbors(self, connect_dist):
"""Find neighboring sensors.
Parameters
----------
connect_dist : scalar
For each sensor, neighbors are defined as those sensors within
``connect_dist`` times the distance of the closest neighbor.
Returns
-------
neighbors : dict
Dictionaries whose keys are sensor indices, and whose values are
lists of neighbors represented as sensor indices.
"""
nb = {}
pd = pdist(self.locs)
pd = squareform(pd)
n = len(self)
for i in xrange(n):
d = pd[i, np.arange(n)]
d[i] = d.max()
idx = np.nonzero(d < d.min() * connect_dist)[0]
nb[i] = idx
return nb
def set_connectivity(self, neighbors=None, connect_dist=None):
"""Define the sensor connectivity through neighbors or distance
Parameters
----------
neighbors : sequence of (str, str)
A list of connections, all assumed to be bidirectional.
connect_dist : None | scalar
For each sensor, neighbors are defined as those sensors within
``connect_dist`` times the distance of the closest neighbor.
e.g., 1.75 or 1.6
"""
pairs = set()
if neighbors is not None and connect_dist is not None:
raise TypeError("Can only specify either neighbors or connect_dist")
elif connect_dist is None:
for src, dst in neighbors:
a = self.names.index(src)
b = self.names.index(dst)
if a < b:
pairs.add((a, b))
else:
pairs.add((b, a))
else:
nb = self.neighbors(connect_dist)
for k, vals in nb.iteritems():
for v in vals:
if k < v:
pairs.add((k, v))
else:
pairs.add((v, k))
self._connectivity = np.array(sorted(pairs), np.uint32)
def set_sensor_positions(self, pos, names=None):
"""Set the sensor positions
Parameters
----------
pos : array (n_locations, 3) | MNE Montage
Array with 3 columns describing sensor locations (x, y, and z), or
an MNE Montage object describing the sensor layout.
names : None | list of str
If locations is an array, names should specify a name
corresponding to each entry.
"""
# MNE Montage
if hasattr(pos, 'pos') and hasattr(pos, 'ch_names'):
if names is not None:
raise TypeError("Can't specify names parameter with Montage")
names = pos.ch_names
pos = pos.pos
elif names is not None and len(names) != len(pos):
raise ValueError("Mismatch between number of locations (%i) and "
"number of names (%i)" % (len(pos), len(names)))
if names is not None:
missing = [name for name in self.names if name not in names]
if missing:
raise ValueError("The following sensors are missing: %r" % missing)
index = np.array([names.index(name) for name in self.names])
pos = pos[index]
elif len(pos) != len(self.locs):
raise ValueError("If names are not specified pos must specify "
"exactly one position per channel")
self.locs[:] = pos
@property
def values(self):
return self.names
def as_sensor(obj):
"Coerce to Sensor instance"
if isinstance(obj, Sensor):
return obj
elif isinstance(obj, NDVar) and obj.has_dim('sensor'):
return obj.sensor
elif hasattr(obj, 'pos') and hasattr(obj, 'ch_names') and hasattr(obj, 'kind'):
return Sensor(obj.pos, obj.ch_names, sysname=obj.kind)
else:
raise TypeError("Can't get sensors from %r" % (obj,))
def _point_graph(coords, dist_threshold):
"Connectivity graph for points based on distance"
n = len(coords)
dist = pdist(coords)
# construct vertex pairs corresponding to dist
graph = np.empty((len(dist), 2), np.uint32)
i0 = 0
for vert, di in enumerate(xrange(n - 1, 0, -1)):
i1 = i0 + di
graph[i0:i1, 0] = vert
graph[i0:i1, 1] = np.arange(vert + 1, n)
i0 = i1
return graph[dist < dist_threshold]
def _matrix_graph(matrix):
"Create connectivity from matrix"
coo = matrix.tocoo()
assert np.all(coo.data)
edges = {(min(a, b), max(a, b)) for a, b in izip(coo.col, coo.row) if a != b}
return np.array(sorted(edges), np.uint32)
def _tri_graph(tris):
"""Create connectivity graph from triangles
Parameters
----------
tris : array_like, (n_tris, 3)
Triangles.
Returns
-------
edges : array (n_edges, 2)
All edges between vertices of tris.
"""
pairs = set()
for tri in tris:
a, b, c = sorted(tri)
pairs.add((a, b))
pairs.add((a, c))
pairs.add((b, c))
return np.array(sorted(pairs), np.uint32)
def _mne_tri_soure_space_graph(source_space, vertices_list):
"Connectivity graph for a triangulated mne source space"
i = 0
graphs = []
for ss, verts in izip(source_space, vertices_list):
if len(verts) == 0:
continue
# graph for the whole source space
src_vertices = ss['vertno']
tris = ss['use_tris']
graph = _tri_graph(tris)
# select relevant edges
if not np.array_equal(verts, src_vertices):
if not np.all(np.in1d(verts, src_vertices)):
raise RuntimeError("Not all vertices are in the source space")
edge_in_use = np.logical_and(np.in1d(graph[:, 0], verts),
np.in1d(graph[:, 1], verts))
graph = graph[edge_in_use]
# reassign vertex ids based on present vertices
if len(verts) != verts.max() + 1:
graph = (np.digitize(graph.ravel(), verts, True)
.reshape(graph.shape).astype(np.uint32))
# account for index of previous source spaces
if i > 0:
graph += i
i += len(verts)
graphs.append(graph)
return np.vstack(graphs)
class SourceSpace(Dimension):
"""MNE source space dimension.
Parameters
----------
vertno : list of array
The vertex identities of the dipoles in the source space (left and
right hemisphere separately).
subject : str
The mri-subject name.
src : str
The kind of source space used (e.g., 'ico-4').
subjects_dir : str
The path to the subjects_dir (needed to locate the source space
file).
parc : None | str
Add a parcellation to the source space to identify vertex location.
Only applies to ico source spaces, default is 'aparc'.
connectivity : None | sparse matrix
Cached source space connectivity.
Notes
-----
besides numpy indexing, the following indexes are possible:
- mne Label objects
- 'lh' or 'rh' to select an entire hemisphere
"""
name = 'source'
adjacent = False
_src_pattern = os.path.join('{subjects_dir}', '{subject}', 'bem',
'{subject}-{src}-src.fif')
def __init__(self, vertno, subject=None, src=None, subjects_dir=None,
parc='aparc', connectivity=None):
match = re.match("(ico|vol)-(\d)", src)
if match:
kind, grade = match.groups()
grade = int(grade)
else:
raise ValueError("Unrecognized src value %r" % src)
self.vertno = vertno
self.subject = subject
self.src = src
self.kind = kind
self.grade = grade
self.subjects_dir = subjects_dir
self._connectivity = connectivity
self._n_vert = sum(len(v) for v in vertno)
if kind == 'ico':
self.lh_vertno = vertno[0]
self.rh_vertno = vertno[1]
self.lh_n = len(self.lh_vertno)
self.rh_n = len(self.rh_vertno)
self.set_parc(parc)
def __getstate__(self):
state = {'vertno': self.vertno, 'subject': self.subject,
'src': self.src, 'subjects_dir': self.subjects_dir,
'parc': self.parc}
return state
def __setstate__(self, state):
vertno = state['vertno']
subject = state['subject']
src = state.get('src', None)
parc = state.get('parc', None)
subjects_dir = state.get('subjects_dir', None)
self.__init__(vertno, subject, src, subjects_dir, parc)
def __repr__(self):
ns = ', '.join(str(len(v)) for v in self.vertno)
return "<SourceSpace [%s], %r, %r>" % (ns, self.subject, self.src)
def __len__(self):
return self._n_vert
def __eq__(self, other):
is_equal = (Dimension.__eq__(self, other)
and self.subject == other.subject
and len(self) == len(other)
and all(np.array_equal(s, o) for s, o in
izip(self.vertno, other.vertno)))
return is_equal
def __getitem__(self, index):
arange = np.arange(len(self))
int_index = arange[index]
bool_index = np.in1d(arange, int_index, True)
# vertno
boundaries = np.cumsum(tuple(chain((0,), (len(v) for v in self.vertno))))
vertno = [v[bool_index[boundaries[i]:boundaries[i + 1]]]
for i, v in enumerate(self.vertno)]
# parc
if self.parc is None:
parc = None
else:
parc = self.parc[index]
dim = SourceSpace(vertno, self.subject, self.src, self.subjects_dir,
parc, _subgraph_edges(self._connectivity, int_index))
return dim
def _cluster_properties(self, x):
"""Find cluster properties for this dimension
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
Returns
-------
cluster_properties : Dataset
A dataset with variables describing cluster properties along this
dimension: "n_sources".
"""
if np.any(np.sum(x, 1) == 0):
raise ValueError("Empty cluster")
ds = Dataset()
# no clusters
if len(x) == 0:
ds['n_sources'] = Var([])
ds['hemi'] = Factor([])
if self.parc is not None:
ds['location'] = Factor([])
return ds
# n sources
ds['n_sources'] = Var(x.sum(1))
if self.kind == 'vol':
return ds
# hemi
hemis = []
for x_ in x:
where = np.nonzero(x_)[0]
src_in_lh = (where < self.lh_n)
if np.all(src_in_lh):
hemis.append('lh')
elif np.any(src_in_lh):
hemis.append('bh')
else:
hemis.append('rh')
ds['hemi'] = Factor(hemis)
# location
if self.parc is not None:
locations = []
for x_ in x:
parc_entries = self.parc[x_]
argmax = np.argmax(np.bincount(parc_entries.x))
location = parc_entries[argmax]
locations.append(location)
ds['location'] = Factor(locations)
return ds
def _diminfo(self):
ns = ', '.join(str(len(v)) for v in self.vertno)
return "SourceSpace (MNE) [%s], %r, %r>" % (ns, self.subject, self.src)
def connectivity(self, disconnect_parc=False):
"""Create source space connectivity
Parameters
----------
disconnect_parc : bool
Reduce connectivity to label-internal connections.
Returns
-------
connetivity : array of int, (n_pairs, 2)
array of sorted [src, dst] pairs, with all src < dts.
"""
if self._connectivity is None:
if self.src is None or self.subject is None or self.subjects_dir is None:
err = ("In order for a SourceSpace dimension to provide "
"connectivity information it needs to be initialized with "
"src, subject and subjects_dir parameters")
raise ValueError(err)
src = self.get_source_space()
if self.kind == 'vol':
coords = src[0]['rr'][self.vertno[0]]
dist_threshold = self.grade * 0.0011
connectivity = _point_graph(coords, dist_threshold)
elif self.kind == 'ico':
connectivity = _mne_tri_soure_space_graph(src, self.vertno)
else:
msg = "Connectivity for %r source space" % self.kind
raise NotImplementedError(msg)
if connectivity.max() >= len(self):
raise RuntimeError("SourceSpace connectivity failed")
self._connectivity = connectivity
else:
connectivity = self._connectivity
if disconnect_parc:
parc = self.parc
if parc is None:
raise RuntimeError("SourceSpace has no parcellation (use "
".set_parc())")
idx = np.array([parc[s] == parc[d] for s, d in connectivity])
connectivity = connectivity[idx]
return connectivity
def circular_index(self, seeds, extent=0.05, name="globe"):
"""Returns an index into all vertices within extent of seed
Parameters
----------
seeds : array_like, (3,) | (n, 3)
Seed location(s) around which to build index.
extent :
Returns
-------
roi : NDVar, ('source',)
Index into the spherical area around seeds.
"""
seeds = np.atleast_2d(seeds)
dist = cdist(self.coordinates, seeds)
mindist = np.min(dist, 1)
x = mindist < extent
dims = (self,)
info = {'seeds': seeds, 'extent': extent}
return NDVar(x, dims, info, name)
@LazyProperty
def coordinates(self):
sss = self.get_source_space()
coords = (ss['rr'][v] for ss, v in izip(sss, self.vertno))
coords = np.vstack(coords)
return coords
def dimindex(self, arg):
if isinstance(arg, (mne.Label, mne.label.BiHemiLabel)):
return self._dimindex_label(arg)
elif isinstance(arg, basestring):
if arg == 'lh':
if self.lh_n:
return slice(None, self.lh_n)
else:
raise IndexError("lh is empty")
elif arg == 'rh':
if self.rh_n:
return slice(self.lh_n, None)
else:
raise IndexError("rh is empty")
else:
return self._dimindex_label(arg)
elif isinstance(arg, SourceSpace):
sv = self.vertno
ov = arg.vertno
if all(np.array_equal(s, o) for s, o in izip(sv, ov)):
return full_slice
else:
idxs = tuple(np.in1d(s, o, True) for s, o in izip(sv, ov))
index = np.hstack(idxs)
return index
elif isinstance(arg, SEQUENCE_TYPES):
return self.parc.isin(arg)
else:
return super(SourceSpace, self).dimindex(arg)
def _dimindex_label(self, label):
if isinstance(label, basestring):
if self.parc is None:
raise RuntimeError("SourceSpace has no parcellation")
elif label not in self.parc:
err = ("SourceSpace parcellation has no label called %r"
% label)
raise KeyError(err)
idx = self.parc == label
elif label.hemi == 'both':
lh_idx = self._dimindex_hemilabel(label.lh)
rh_idx = self._dimindex_hemilabel(label.rh)
idx = np.hstack((lh_idx, rh_idx))
else:
idx = np.zeros(len(self), dtype=np.bool8)
idx_part = self._dimindex_hemilabel(label)
if label.hemi == 'lh':
idx[:self.lh_n] = idx_part
elif label.hemi == 'rh':
idx[self.lh_n:] = idx_part
else:
err = "Unknown value for label.hemi: %s" % repr(label.hemi)
raise ValueError(err)
return idx
def _dimindex_hemilabel(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertno[0]
else:
stc_vertices = self.vertno[1]
idx = np.in1d(stc_vertices, label.vertices, True)
return idx
def get_source_space(self):
"Read the corresponding MNE source space"
path = self._src_pattern.format(subjects_dir=self.subjects_dir,
subject=self.subject, src=self.src)
src = mne.read_source_spaces(path)
return src
def index_for_label(self, label):
"""Returns the index for a label
Parameters
----------
label : str | Label | BiHemiLabel
The name of a region in the current parcellation, or a Label object
(as created for example by mne.read_label). If the label does not
match any sources in the SourceEstimate, a ValueError is raised.
Returns
-------
index : NDVar of bool
Index into the source space dim that corresponds to the label.
"""
idx = self._dimindex_label(label)
if isinstance(label, basestring):
name = label
else:
name = label.name
return NDVar(idx, (self,), {}, name)
def intersect(self, other, check_dims=True):
"""Create a Source dimension that is the intersection with dim
Parameters
----------
dim : Source
Dimension to intersect with.
check_dims : bool
Check dimensions for consistency (not applicaple to this subclass).
Returns
-------
intersection : Source
The intersection with dim (returns itself if dim and self are
equal)
"""
if self.subject != other.subject:
raise ValueError("Source spaces can not be compared because they "
"are defined on different MRI subjects (%s, %s). "
"Consider using eelbrain.morph_source_space()."
% (self.subject, other.subject))
elif self.src != other.src:
raise ValueError("Source spaces can not be compared because they "
"are defined with different spatial decimation "
"parameters (%s, %s)." % (self.src, other.src))
elif self.subjects_dir != other.subjects_dir:
raise ValueError("Source spaces can not be compared because they "
"have differing subjects_dir parameters:\n%s\n%s"
% (self.subjects_dir, other.subjects_dir))
index = np.hstack(np.in1d(s, o) for s, o
in izip(self.vertno, other.vertno))
return self[index]
def _mask_label(self):
"Create a Label that masks the areas not covered in this SourceSpace"
lh = rh = None
sss = self.get_source_space()
if self.lh_n:
lh_verts = np.setdiff1d(sss[0]['vertno'], self.lh_vertno)
if len(lh_verts):
lh = mne.Label(lh_verts, hemi='lh', color=(0, 0, 0)).fill(sss, 'unknown')
if self.rh_n:
rh_verts = np.setdiff1d(sss[1]['vertno'], self.rh_vertno)
if len(rh_verts):
rh = mne.Label(rh_verts, hemi='rh', color=(0, 0, 0)).fill(sss, 'unknown')
return lh, rh
def set_parc(self, parc):
"""Set the source space parcellation
Parameters
----------
parc : None | str | Factor
Add a parcellation to the source space to identify vertex location.
Can be specified as Factor assigning a label to each source, or a
string specifying a freesurfer parcellation (stored as *.annot
files with the MRI). Only applies to ico source spaces, default is
'aparc'.
"""
if parc is None:
parc_ = None
elif isfactor(parc):
if len(parc) != len(self):
raise ValueError("Wrong length (%i)" % len(parc))
parc_ = parc
elif isinstance(parc, basestring):
if self.kind == 'ico':
fname = os.path.join(self.subjects_dir, self.subject, 'label', '%%s.%s.annot' % parc)
vert_codes_lh, ctab_lh, names_lh = read_annot(fname % 'lh')
vert_codes_rh, ctab_rh, names_rh = read_annot(fname % 'rh')
x_lh = vert_codes_lh[self.lh_vertno]
x_rh = vert_codes_rh[self.rh_vertno]
x_rh += x_lh.max() + 1
names = chain(('%s-lh' % name for name in names_lh),
('%s-rh' % name for name in names_rh))
parc_ = Factor(np.hstack((x_lh, x_rh)), parc,
labels={i: name for i, name in enumerate(names)})
else:
raise NotImplementedError
else:
raise ValueError("Parc needs to be string, got %s" % repr(parc))
self.parc = parc_
@property
def values(self):
raise NotImplementedError
_uts_tol = 0.000001 # tolerance for deciding if time values are equal
class UTS(Dimension):
"""Dimension object for representing uniform time series
Parameters
----------
tmin : scalar
First time point (inclusive).
tstep : scalar
Time step between samples.
nsamples : int
Number of samples.
Notes
-----
Special indexing:
(tstart, tstop) : tuple
Restrict the time to the indicated window (either end-point can be
None).
"""
name = 'time'
unit = 's'
def __init__(self, tmin, tstep, nsamples):
self.tmin = tmin
self.tstep = tstep
self.nsamples = nsamples = int(nsamples)
self.x = self.times = tmin + np.arange(nsamples) * tstep
self.tmax = self.times[-1]
self.tstop = self.tmin + tstep * nsamples
@classmethod
def from_int(cls, first, last, sfreq):
"""Create a UTS dimension from sample index and sampling frequency
Parameters
----------
first : int
Index of the first sample, relative to 0.
last : int
Index of the last sample, relative to 0.
sfreq : scalar
Sampling frequency, in Hz.
"""
tmin = first / sfreq
nsamples = last - first + 1
tstep = 1. / sfreq
return cls(tmin, tstep, nsamples)
def __getstate__(self):
state = {'tmin': self.tmin,
'tstep': self.tstep,
'nsamples': self.nsamples}
return state
def __setstate__(self, state):
tmin = state['tmin']
tstep = state['tstep']
nsamples = state['nsamples']
self.__init__(tmin, tstep, nsamples)
def __repr__(self):
return "UTS(%s, %s, %s)" % (self.tmin, self.tstep, self.nsamples)
def _diminfo(self):
name = self.name.capitalize()
tmax = self.times[-1] + self.tstep
sfreq = 1. / self.tstep
info = '%s %.3f - %.3f s, %s Hz' % (name, self.tmin, tmax, sfreq)
return info
def __len__(self):
return len(self.times)
def __eq__(self, other):
is_equal = (Dimension.__eq__(self, other)
and self.tmin == other.tmin
and self.tstep == other.tstep
and self.nsamples == other.nsamples)
return is_equal
def __getitem__(self, index):
if isinstance(index, int):
return self.times[index]
elif not isinstance(index, slice):
# convert index to slice
index = np.arange(len(self))[index]
start = index[0]
steps = np.unique(np.diff(index))
if len(steps) > 1:
raise NotImplementedError("non-uniform time series")
step = steps[0]
stop = index[-1] + step
index = slice(start, stop, step)
if isinstance(index, slice):
if index.start is None:
start = 0
else:
start = index.start
if index.stop is None:
stop = len(self)
else:
stop = index.stop
tmin = self.times[start]
nsamples = stop - start
if index.step is None:
tstep = self.tstep
else:
tstep = self.tstep * index.step
else:
err = ("Unupported index: %r" % index)
raise TypeError(err)
return UTS(tmin, tstep, nsamples)
def _cluster_bounds(self, x):
"""Cluster start and stop in samples
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
"""
# find indices of cluster extent
row, col = np.nonzero(x)
try:
ts = [col[row == i][[0, -1]] for i in xrange(len(x))]
except IndexError:
raise ValueError("Empty cluster")
ts = np.array(ts)
return ts
def _cluster_properties(self, x):
"""Find cluster properties for this dimension
Parameters
----------
x : array of bool, (n_clusters, len(self))
The cluster extents, with different clusters stacked along the
first axis.
Returns
-------
cluster_properties : Dataset
A dataset with variables describing cluster properties along this
dimension: "tstart", "tstop", "duration".
"""
ds = Dataset()
# no clusters
if len(x) == 0:
ds['tstart'] = Var([])
ds['tstop'] = Var([])
ds['duration'] = Var([])
return ds
# create time values
bounds = self._cluster_bounds(x)
tmin = self.times[bounds[:, 0]]
tmax = self.times[bounds[:, 1]]
ds['tstart'] = Var(tmin)
ds['tstop'] = Var(tmax + self.tstep)
ds['duration'] = ds.eval("tstop - tstart")
return ds
def dimindex(self, arg):
if np.isscalar(arg):
i = int(round((arg - self.tmin) / self.tstep))
if i < 0 or i >= self.nsamples:
raise ValueError("Time index %s out of range (%s, %s)"
% (arg, self.tmin, self.tmax))
return i
elif isinstance(arg, UTS):
if self.tmin == arg.tmin:
start = None
stop = arg.nsamples
elif arg.tmin < self.tmin:
err = ("The index time dimension starts before the reference "
"time dimension")
raise DimensionMismatchError(err)
else:
start_float = (arg.tmin - self.tmin) / self.tstep
start = int(round(start_float))
if abs(start_float - start) > _uts_tol:
err = ("The index time dimension contains values not "
"contained in the reference time dimension")
raise DimensionMismatchError(err)
stop = start + arg.nsamples
if self.tstep == arg.tstep:
step = None
elif self.tstep > arg.tstep:
err = ("The index time dimension has a higher sampling rate "
"than the reference time dimension")
raise DimensionMismatchError(err)
else:
step_float = arg.tstep / self.tstep
step = int(round(step_float))
if abs(step_float - step) > _uts_tol:
err = ("The index time dimension contains values not "
"contained in the reference time dimension")
raise DimensionMismatchError(err)
if stop == self.nsamples:
stop = None
return slice(start, stop, step)
elif isinstance(arg, tuple):
if len(arg) != 2:
raise ValueError("Tuple indexes signify intervals for uniform "
"time-series (UTS) dimension and need to be "
"exactly of length 2 (got %s)" % repr(arg))
tstart, tstop = arg
return self._slice(tstart, tstop)
else:
return super(UTS, self).dimindex(arg)
def index(self, time, rnd='closest'):
"""Find the index for a time point
Parameters
----------
time : scalar
Time point for which to find an index.
rnd : 'down' | 'closest' | 'up'
Rounding: how to handle time values that do not have an exact
match. Round 'up', 'down', or to the 'closest' neighbor.
Returns
-------
i : int
Index of ``time``, rounded according to ``rnd``.
"""
if rnd == 'closest':
return int(round((time - self.tmin) / self.tstep))
else:
i, _ = find_time_point(self.times, time, rnd)
return i
def intersect(self, dim, check_dims=True):
"""Create a UTS dimension that is the intersection with dim
Parameters
----------
dim : UTS
Dimension to intersect with.
check_dims : bool
Check dimensions for consistency (not applicaple to this subclass).
Returns
-------
intersection : UTS
The intersection with dim (returns itself if dim and self are
equal)
"""
if self.tstep == dim.tstep:
tstep = self.tstep
else:
raise NotImplementedError("Intersection of UTS with unequal tstep :(")
tmin_diff = abs(self.tmin - dim.tmin) / tstep
if abs(tmin_diff - round(tmin_diff)) > _uts_tol:
raise DimensionMismatchError("UTS dimensions have different times")
tmin = max(self.tmin, dim.tmin)
tmax = min(self.tmax, dim.tmax)
nsamples = int(round((tmax - tmin) / tstep)) + 1
if nsamples <= 0:
raise DimensionMismatchError("UTS dimensions don't overlap")
return UTS(tmin, tstep, nsamples)
def _slice(self, tstart, tstop):
"Create a slice into the time axis"
if (tstart is not None) and (tstop is not None) and (tstart >= tstop):
raise ValueError("tstart must be smaller than tstop")
if tstart is None:
start = None
elif tstart <= self.tmin - self.tstep:
raise ValueError("Value out of range: tstart=%s" % tstart)
else:
start_float = (tstart - self.tmin) / self.tstep
start = int(start_float)
if start_float - start > 0.000001:
start += 1
if tstop is None:
stop = None
elif tstop > self.tstop:
raise ValueError("Value out of range: tstop=%s" % tstop)
else:
stop_float = (tstop - self.tmin) / self.tstep
stop = int(stop_float)
if stop_float - stop > 0.000001:
stop += 1
s = slice(start, stop)
return s
@property
def values(self):
return self.times
def intersect_dims(dims1, dims2, check_dims=True):
"""Find the intersection between two multidimensional spaces
Parameters
----------
dims1, dims2 : tuple of dimension objects
Two spaces involving the same dimensions with overlapping values.
check_dims : bool
Check dimensions for consistency (e.g., channel locations in a Sensor
dimension). Default is ``True``. Set to ``False`` to ignore non-fatal
mismatches.
Returns
-------
dims : tuple of Dimension objects
Intersection of dims1 and dims2.
"""
return tuple(d1.intersect(d2, check_dims=check_dims) for d1, d2 in zip(dims1, dims2))
# ---NDVar functions---
def corr(x, dim='sensor', obs='time', name=None):
"""Calculate Neighbor correlation
Parameter
---------
x : NDVar
The data.
dim : str
Dimension over which to correlate neighbors.
"""
dim_obj = x.get_dim(dim)
# find neighbors
neighbors = defaultdict(list)
for a, b in dim_obj.connectivity():
neighbors[a].append(b)
neighbors[b].append(a)
# for each point, find the average correlation with its neighbors
data = x.get_data((dim, obs))
cc = np.corrcoef(data)
y = np.empty(len(dim_obj))
for i in xrange(len(dim_obj)):
y[i] = np.mean(cc[i, neighbors[i]])
info = cs.set_info_cs(x.info, cs.stat_info('r'))
return NDVar(y, (dim_obj,), info, name)
def cwt_morlet(Y, freqs, use_fft=True, n_cycles=3.0, zero_mean=False,
out='magnitude'):
"""Time frequency decomposition with Morlet wavelets (mne-python)
Parameters
----------
Y : NDVar with time dimension
Signal.
freqs : scalar | array
Frequency/ies of interest. For a scalar, the output will not contain a
frequency dimension.
use_fft : bool
Compute convolution with FFT or temporal convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
out : 'complex' | 'magnitude' | 'phase'
Format of the data in the returned NDVar.
Returns
-------
tfr : NDVar
Time frequency decompositions.
"""
from mne.time_frequency.tfr import cwt_morlet
if not Y.get_axis('time') == Y.ndim - 1:
raise NotImplementedError
x = Y.x
x = x.reshape((np.prod(x.shape[:-1]), x.shape[-1]))
Fs = 1. / Y.time.tstep
if np.isscalar(freqs):
freqs = [freqs]
fdim = None
else:
fdim = Ordered("frequency", freqs, 'Hz')
freqs = fdim.values
x = cwt_morlet(x, Fs, freqs, use_fft, n_cycles, zero_mean)
if out == 'magnitude':
x = np.abs(x)
elif out == 'complex':
pass
else:
raise ValueError("out = %r" % out)
new_shape = Y.x.shape[:-1]
dims = Y.dims[:-1]
if fdim is not None:
new_shape += (len(freqs),)
dims += (fdim,)
new_shape += Y.x.shape[-1:]
dims += Y.dims[-1:]
x = x.reshape(new_shape)
info = cs.set_info_cs(Y.info, cs.default_info('A'))
out = NDVar(x, dims, info, Y.name)
return out
def resample(data, sfreq, npad=100, window='boxcar'):
"""Resample an NDVar with 'time' dimension after properly filtering it
Parameters
----------
data : NDVar
Ndvar which should be resampled.
sfreq : scalar
New sampling frequency.
npad : int
Number of samples to use at the beginning and end for padding.
window : string | tuple
See scipy.signal.resample for description.
Notes
-----
requires mne-python
"""
axis = data.get_axis('time')
old_sfreq = 1.0 / data.time.tstep
x = mne.filter.resample(data.x, sfreq, old_sfreq, npad, axis, window)
tstep = 1. / sfreq
time = UTS(data.time.tmin, tstep, x.shape[axis])
dims = data.dims[:axis] + (time,) + data.dims[axis + 1:]
return NDVar(x, dims=dims, info=data.info, name=data.name)
|
import sys
import os
import numpy as np
path_dir = '../data/' # CHANGE HERE!
file_list = os.listdir(path_dir)
file_list.sort()
for files in file_list:
tmp = np.fromfile(files, dtype=np.float32)
prev = tmp[0:3]
for i in range(1, int(len(tmp))/4):
prev = np.vstack([prev,tmp[4*i:4*i+3]])
'''
###############concatenate method######################
array_1 = np.array([1,2,3,4,5,6,7,8,9,10,11,12])
prev = array_1[0:3]
for i in range(1,int(len(array_1)/4)):
prev = np.vstack([prev,array_1[4*i:4*i+3]])
print(prev)
########################################################
'''
|
# -*- python -*-
import re
def get_matching_words( regex ):
words = [
"aimlessness",
"assassin",
"baby",
"beekeeper",
"belladonna",
"cannonball",
"crybaby",
"denver",
"embraceable",
"facetious",
"flashbulb",
"gaslight",
"hobgoblin",
"iconoclast",
"issue",
"kebab",
"kilo",
"laundered",
"mattress",
"millennia",
"natural",
"obsessive",
"paranoia",
"queen",
"rabble",
"reabsorb",
"sacrilegious",
"schoolroom",
"tabby",
"tabloid",
"unbearable",
"union",
"videotape",
]
matches = []
for word in words:
if re.search( regex, word ):
matches.append( word )
return matches
print 'Test: All words that contain a "v"'
print get_matching_words( r'v' )
#=> ['denver', 'obsessive', 'videotape']
print 'Test: All words that contain a double-"s"'
print get_matching_words( r's{2}' )
#=> ['aimlessness', 'assassin', 'issue', 'mattress', 'obsessive']
print 'Test: All words that end with an "e"'
print get_matching_words( r'e$' )
#=> ['embraceable', 'issue', 'obsessive', 'rabble', 'unbearable', 'videotape']
print 'Test: All words that contain an "b", any character, then another "b"'
print get_matching_words( r'b.b' )
#=> ['baby', 'crybaby', 'kebab']
print 'Test: All words that contain an "b", at least one character, then another "b"'
print get_matching_words( r'b.+b' )
#=> ['baby', 'crybaby', 'embraceable', 'flashbulb', 'hobgoblin', 'kebab', 'reabsorb', 'unbearable']
print 'Test: All words that contain an "b", any number of characters (including zero), then another "b"'
print get_matching_words( r'b.*b' )
#=> ['baby', 'crybaby', 'embraceable', 'flashbulb', 'hobgoblin', 'kebab', 'rabble', 'reabsorb', 'tabby', 'unbearable']
print 'Test: All words that include all five vowels in order'
print get_matching_words( r'a.*e.*i.*o.*u' )
#=> ['facetious', 'sacrilegious']
print 'Test: All words that only use the letters in "regular expression" (each letter can appear any number of times)'
print get_matching_words( r'^[regular expression]+$' )
#=> ['assassin', 'issue', 'paranoia', 'union']
print 'Test: All words that contain a double letter'
print get_matching_words( r'([a-z])\1' )
#=> ['aimlessness', 'assassin', 'beekeeper', 'belladonna', 'cannonball', 'issue', 'mattress', 'millennia', 'obsessive', 'queen', 'rabble', 'schoolroom', 'tabby']
|
def debug(s):
return s.replace('bugs','=').replace('bug','').replace('=','bugs')
'''
Take debugging to a whole new level:
Given a string, remove every single bug.
This means you must remove all instances of the word 'bug' from within a
given string, unless the word is plural ('bugs').
For example, given 'obugobugobuoobugsoo', you should return 'ooobuoobugsoo'.
Another example: given 'obbugugo', you should return 'obugo'.
Note that all characters will be lowercase and letters.
Happy squishing!
'''
|
# -*- coding: utf-8 -*-
"""
This is an implementation of Amazon Product Advertising API in Python.
Thanks to following.
- PyAWS
http://pyaws.sourceforge.net/
- python-amazon-product-api
http://pypi.python.org/pypi/python-amazon-product-api
- ryo_abe
http://d.hatena.ne.jp/ryo_abe/20100416/1271384372
"""
from base64 import b64encode
import hmac
from time import strftime, gmtime
from urllib2 import quote, urlopen
from xml2obj import Xml2Obj
try:
from hashlib import sha256
except ImportError:
from Crypto.Hash import SHA256 as sha256
__author__ = "Yuya Takeyama <sign.of.the.wolf.pentagram@gmail.com>"
__version__ = "0.0.1"
DEFAULT_API_VERSION = '2009-11-01'
LOCALE_DOMAINS = {
None : "ecs.amazonaws.com",
"ca" : "ecs.amazonaws.ca",
"de" : "ecs.amazonaws.de",
"fr" : "ecs.amazonaws.fr",
"jp" : "ecs.amazonaws.jp",
"uk" : "ecs.amazonaws.co.uk",
"us" : "ecs.amazonaws.us"
}
class ProductAdvertising(object):
def __init__(self, licenseKey, secretLicenseKey, locale=None):
self.setLicenseKey(licenseKey)
self.setSecretLicenseKey(secretLicenseKey)
self.setLocale(locale)
self.parser = Xml2Obj()
def call(self, operation, **kwds):
kwds['Operation'] = operation
url = self.makeUrl(**kwds)
return self.parser.parse(urlopen(url).read())
def makeUrl(self, **kwds):
param = self.makeParam(kwds)
signature = self.makeSignature(kwds)
return "http://" + LOCALE_DOMAINS[self.getLocale()] + "/onca/xml?" + param + "&Signature=" + signature
def setLicenseKey(self, licenseKey=None):
self.__licenseKey = licenseKey
return self
def getLicenseKey(self):
return self.__licenseKey
def setSecretLicenseKey(self, secretLicenseKey=None):
self.__secretLicenseKey = secretLicenseKey
def getSecretLicenseKey(self):
return self.__secretLicenseKey
def setLocale(self, locale=None):
self.__locale = locale
return self
def getLocale(self):
return self.__locale
def getTimestamp(self):
return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
def makeParam(self, args):
for key, val in args.items():
if val is None:
del args[key]
if 'Version' not in args:
args['Version'] = DEFAULT_API_VERSION
if 'Service' not in args:
args['Service'] = 'AWSECommerceService'
if 'Timestamp' not in args:
args['Timestamp'] = self.getTimestamp()
args['AWSAccessKeyId'] = self.getLicenseKey()
keys = sorted(args.keys())
return '&'.join('%s=%s' % (key, quote(str(args[key]))) for key in keys)
def makeSignature(self, args):
param = self.makeParam(args)
msg = 'GET'
msg += '\n' + LOCALE_DOMAINS[self.getLocale()]
msg += '\n' + "/onca/xml"
msg += '\n' + param.encode('utf-8')
return quote(b64encode(hmac.new(self.getSecretLicenseKey(), msg, sha256).digest()))
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='fast_bcf_parser',
ext_modules=cythonize("lib/parsers/unbcf_fast.pyx",
#gdb_debug=True
),
)
|
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
import matplotlib as mpl
class ModeloAsintomaticos():
def __init__(self, N_0, S_0, I_0, A_0, R_0, lambd, mu, mu_star, gamma,
gamma_star, beta_1, beta_2, beta_3, beta_4):
assert(N_0 == S_0 + I_0 + A_0 + R_0)
linspace = range(61)
y_0 = (N_0, S_0, I_0, A_0, R_0)
sol = odeint(self.f, y_0, linspace, args=(lambd, mu, mu_star, gamma,
gamma_star, beta_1, beta_2, beta_3, beta_4))
N, S, I, A, R = np.split(sol, range(1, 5), axis=1)
N = N[:, 0]
S = S[:, 0]
I = I[:, 0]
A = A[:, 0]
R = R[:, 0]
plt.figure(1)
plt.title('Predicciones evolución epidemia')
plt.subplot(2, 2, 1, title='Totales')
plt.plot(linspace, sol[:, 0], color='k')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
plt.subplot(2, 2, 2, title='Susceptibles')
plt.plot(linspace, sol[:, 0], color='b')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
mpl.rcParams['axes.prop_cycle'] = mpl.cycler(color=['r', 'y'])
plt.subplot(2, 2, 3, title='Infectados y asintomáticos')
plt.plot(linspace, sol[:, 2:4])
plt.legend(['Infectados', 'Asintomáticos'], loc='upper right')
plt.subplot(2, 2, 4, title='Recuperados')
plt.plot(linspace, sol[:, 4], color='g')
fallecidos = mu_star*I
contagiados_por_infectado = beta_1*np.multiply(S, I)
contagiados_por_asintomatico = beta_3*np.multiply(S, A)
plt.figure(2)
plt.title('Curvas diarias')
plt.subplot(1, 2, 1, title='Fallecimientos diarios')
plt.plot(fallecidos, color='k')
plt.subplot(1, 2, 2, title='Contagios diarios')
plt.plot(contagiados_por_infectado, color='r')
plt.plot(contagiados_por_asintomatico, color='y')
plt.plot(contagiados_por_infectado + contagiados_por_asintomatico, color='b')
plt.legend(['Contagios por infectado', 'Contagios por asintomático', 'Contagios totales'])
fallecidos_totales = np.sum(fallecidos)
contagios_totales = np.sum(contagiados_por_infectado + contagiados_por_asintomatico)
ratio_contagios_asintomaticos = np.sum(contagiados_por_asintomatico)/contagios_totales
print('Fallecidos en los siguiente 60 días:', fallecidos_totales)
print('Contagios en los siguiente 60 días:', contagios_totales)
print('Porcentaje de contagios por asintomático:', ratio_contagios_asintomaticos*100)
print('Fallecidos por asintomático:', ratio_contagios_asintomaticos*fallecidos_totales)
def f(self, y, t, lambd, mu, mu_star, gamma,
gamma_star, beta_1, beta_2, beta_3, beta_4):
N, S, I, A, R = y
assert (abs(N - (S + I + A + R)) < 1e-3)
dN = N*lambd - mu_star*I - N*mu
dS = - (beta_1 + beta_2)*S*I - (beta_3 + beta_4)*S*A - S*mu + N*lambd
dI = beta_1*S*I + beta_3*S*A - I*mu_star - gamma*I - mu*I
dA = beta_2*S*I + beta_4*S*A - gamma_star*A - mu*A
dR = I*gamma + A*gamma_star - R*mu
assert (abs(dN - (dS + dI + dA + dR)) < 1e-3)
return dN, dS, dI, dA, dR
|
"""
Construa a função separaPal(string) que recebe como entrada um texto contendo um documento qualquer.
A função deve retornar uma lista contendo palavras presentes no texto. Uma palavra é uma sequência de
caracteres entre caracteres chamados de separadores. São considerados separadores os seguintes caracteres:
Ponto, exclamação, interrogação, dois pontos, ponto e vírgula, vírgula e espaço
"""
def retorna_separadores(separadores):
for i in range(len(separadores)):
return separadores[i]
#
#
def separaPal(pTexto):
strSeparadores = ' ,.:;!?'
strBuffer = ""
lstPalavras = []
for i in range(len(pTexto)):
if pTexto[i] not in strSeparadores:
strBuffer += pTexto[i]
elif strBuffer != "":
lstPalavras.append(strBuffer)
strBuffer = ""
#
#
if strBuffer != "":
lstPalavras.append(strBuffer)
#
return lstPalavras
#
def main():
texto = input("Digite um texto: ")
print(separaPal(texto))
return 0
if __name__ == '__main__':
main()
|
"""
MIT License
Copyright (c) 2018 Rafael Felix Alves
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def merge_array(x, y, axis=0):
from numpy import size,atleast_1d, concatenate
if not size(x):
return atleast_1d(y)
elif size(x) and size(y):
return concatenate([x, atleast_1d(y)], axis)
elif size(y):
return atleast_1d(y)
else:
return atleast_1d([])
def merge_dict(x, y):
ans = {}
for _key in (x.keys() & y.keys()):
if isinstance(x[_key], dict):
ans[_key] = merge_dict(x[_key], y[_key])
else:
ans[_key] = merge_array(x[_key], y[_key])
return ans
def normalize(x, ord=1,axis=-1):
'''
Normalize is a function that performs unit normalization
Please, see http://mathworld.wolfram.com/UnitVector.html
:param x: Vector
:return: normalized x
'''
from numpy import atleast_2d, linalg, float
return (atleast_2d(x) / atleast_2d(linalg.norm(atleast_2d(x), ord=ord, axis=axis)).T).astype(float)
import numpy as np
import json
class NpJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
|
from django.urls import path, re_path, include
from . import views
urlpatterns = [
path('', views.NewsView.as_view({'get': 'list'})),
re_path('detail/(?P<pk>\d+)',views.NewsDetailView.as_view({'get': 'retrieve'})),
# path('comment', views.CommentView.as_view({'get':'list','post':'create'}))
path('comment', views.CommentView.as_view({'post':'create'}))
]
|
import json
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views import View
from recipes.models import (
Favorite, Follow, Ingredient, Recipe, ShoppingList, User)
class Favorites(LoginRequiredMixin, View):
""" Функция добавления/ удаления рецепта из "Избранного"
"""
def post(self, request):
req_ = json.loads(request.body)
recipe_id = req_.get('id')
if recipe_id is not None:
recipe = get_object_or_404(Recipe, id=recipe_id)
_, created = Favorite.objects.get_or_create(
user=request.user, recipe=recipe
)
return JsonResponse({'success': created})
return JsonResponse({'success': False}, status=400)
def delete(self, request, recipe_id):
recipe = get_object_or_404(
Favorite, recipe=recipe_id, user=request.user
)
recipe.delete()
return JsonResponse({'success': True})
class Follows(LoginRequiredMixin, View):
""" Функция добавления/ удаления подписок
"""
def post(self, request):
req_ = json.loads(request.body)
author_id = req_.get('id')
if author_id is not None:
author = get_object_or_404(User, id=author_id)
if request.user == author:
return JsonResponse({'success': False})
_, created = Follow.objects.get_or_create(
user=request.user, author=author
)
return JsonResponse({'success': created})
return JsonResponse({'success': False}, status=400)
def delete(self, request, author_id):
author = get_object_or_404(User, id=author_id)
removed = Follow.objects.filter(
user=request.user, author=author
).delete()
if removed:
return JsonResponse({'success': True})
return JsonResponse({'success': False})
class Purchases(LoginRequiredMixin, View):
""" Функция добавления/ удаления рецептов в список покупок
"""
def post(self, request):
req_ = json.loads(request.body)
recipe_id = req_.get('id')
if recipe_id is not None:
recipe = get_object_or_404(Recipe, id=recipe_id)
_, created = ShoppingList.objects.get_or_create(
user=request.user, recipe=recipe
)
return JsonResponse({'success': created})
return JsonResponse({'success': False}, status=400)
def delete(self, request, recipe_id):
recipe = get_object_or_404(Recipe, id=recipe_id)
removed = ShoppingList.objects.filter(
user=request.user, recipe=recipe
).delete()
if removed:
return JsonResponse({'success': True})
return JsonResponse({'success': False})
class Ingredients(LoginRequiredMixin, View):
""" Функция получения списка ингредиентов
"""
def get(self, request):
text = request.GET['query']
ingredients = list(
Ingredient.objects.filter(title__icontains=text).values(
'title', 'unit'
)
)
return JsonResponse(ingredients, safe=False)
|
import matplotlib.pyplot as plt
import math
s = math.e
seg_length = 1.
Nmax = 15000
eps = [0.]
phi = [0.]
X = [0.]
Y = [0.]
for step in range(Nmax):
epsn = eps[-1]
phin = phi[-1]
epsnp1 = (epsn+2.*math.pi*s) % (2.*math.pi)
phinp1 = epsn + (phin%(2.*math.pi))
eps.append(epsnp1)
phi.append(phinp1)
X.append(X[-1]+seg_length*math.cos(phinp1))
Y.append(Y[-1]+seg_length*math.sin(phinp1))
plt.plot(X,Y,'g')
plt.gca().set_aspect('equal', adjustable='box')
plt.title("s="+str(s))
plt.show()
|
import calendar
import time
import hdbfs.db
TYPE_NILL = 0
TYPE_FILE = 1000
TYPE_FILE_DUP = 1001
TYPE_FILE_VAR = 1002
TYPE_GROUP = 2000
TYPE_ALBUM = 2001
TYPE_CLASSIFIER = 2002
ORDER_VARIENT = -1
ORDER_DUPLICATE = -2
LEGACY_REL_CHILD = 0
LEGACY_REL_DUPLICATE = 1000
LEGACY_REL_VARIANT = 1001
LEGACY_REL_CLASS = 2000
def upgrade_from_0_to_1( log, session ):
print 'Database upgrade from VER 0 -> VER 1'
session.execute( 'ALTER TABLE mfl ADD COLUMN parent INTEGER' )
session.execute( 'ALTER TABLE mfl ADD COLUMN gorder INTEGER' )
return 1, 0
def upgrade_from_1_to_2( log, session ):
log.info( 'Database upgrade from VER 1 -> VER 2' )
session.execute( 'CREATE TABLE objl ( '
' id INTEGER PRIMARY KEY, '
' type INTEGER NOT NULL )' )
session.execute( 'CREATE TABLE rell ( '
' id INTEGER NOT NULL, '
' parent INTEGER NOT NULL, '
' rel INTEGER NOT NULL, '
' sort INTEGER )' )
session.execute( 'CREATE TABLE fchk ( '
' id INTEGER PRIMARY KEY, '
' len INTEGER, '
' crc32 TEXT, '
' md5 TEXT, '
' sha1 TEXT )' )
coltbl = {}
collst = {}
session.execute( 'INSERT INTO objl ( id, type ) '
'SELECT id, :file_type AS type FROM mfl ORDER BY id ASC',
{ 'file_type' : TYPE_FILE } )
session.execute( 'INSERT INTO fchk ( id, len, crc32, md5, sha1 ) '
'SELECT id, len, crc32, md5, sha1 FROM mfl ORDER BY id ASC' )
session.execute( 'INSERT INTO rell ( id, parent, rel ) '
'SELECT id, parent, CASE gorder '
' WHEN :order_dup THEN :rel_dup '
' ELSE :rel_var '
' END rel '
' FROM mfl '
' WHERE gorder IN ( :order_dup, :order_var ) ORDER BY id ASC',
{ 'order_dup' : ORDER_DUPLICATE,
'order_var' : ORDER_VARIENT,
'rel_dup' : LEGACY_REL_DUPLICATE,
'rel_var' : LEGACY_REL_VARIANT } )
# In schema ver 1.0, the album object is the first image in the album.
# Create a mapping table that will add album objects into the objl table
# and map them back to the first image in the album
session.execute( 'CREATE TEMPORARY TABLE album_map ('
' old_parent INTEGER PRIMARY KEY, '
' album_id INTEGER )' )
session.execute( 'CREATE TEMPORARY TRIGGER update_album_map AFTER INSERT ON album_map '
' BEGIN '
' INSERT INTO objl ( type ) VALUES ( %d ); '
' UPDATE album_map SET album_id = (SELECT id FROM objl WHERE rowid = last_insert_rowid()) '
' WHERE old_parent = NEW.old_parent; '
' END' % ( TYPE_ALBUM, ) )
# Add all the albums into the album map
session.execute( 'INSERT INTO album_map ( old_parent ) '
'SELECT DISTINCT parent FROM mfl WHERE parent NOT NULL and gorder >= 0' )
# Add all the first images to the albums
session.execute( 'INSERT INTO rell ( id, parent, rel, sort ) '
'SELECT old_parent, album_id, :child_rel, 0 FROM album_map',
{ 'child_rel' : LEGACY_REL_CHILD } )
# Add all the subsequent images to the albums
session.execute( 'INSERT INTO rell ( id, parent, rel, sort ) '
'SELECT o.id, m.album_id, :child_rel AS rel, o.gorder + 1'
' FROM mfl o '
' INNER JOIN album_map m ON o.parent == m.old_parent',
{ 'child_rel' : LEGACY_REL_CHILD } )
session.execute( 'DROP TRIGGER update_album_map' )
session.execute( 'DROP TABLE album_map' )
return 2, 0
def upgrade_from_2_to_3( log, session ):
log.info( 'Database upgrade from VER 2 -> VER 3' )
session.execute( 'ALTER TABLE objl ADD COLUMN name TEXT' )
session.execute( 'CREATE TABLE meta ( '
' id INTEGER NOT NULL, '
' tag TEXT NOT NULL, '
' value TEXT )' )
session.execute( 'INSERT INTO meta ( id, tag, value ) '
'SELECT id, "altname" AS tag, name FROM naml' )
session.execute( 'CREATE TEMPORARY TABLE single_names ('
' rid INTEGER PRIMARY KEY, '
' id INTEGER NOT NULL, '
' name TEXT NOT NULL )' )
session.execute( 'INSERT INTO single_names '
'SELECT min( rowid ), id, name FROM naml GROUP BY id' )
session.execute( 'UPDATE objl SET name = ('
' SELECT s.name from single_names s WHERE s.id = objl.id)'
' WHERE EXISTS (SELECT * FROM single_names s WHERE s.id = objl.id)' )
session.execute( 'DROP TABLE single_names' )
session.execute( 'DELETE FROM meta '
' WHERE meta.tag = "altname" '
' AND EXISTS ('
' SELECT * FROM objl o WHERE o.id = meta.id '
' AND o.name = meta.value)' )
session.execute( 'DROP TABLE naml' )
session.execute( 'UPDATE dbi SET ver = 3, rev = 0' )
return 3, 0
def upgrade_from_3_to_4( log, session ):
log.info( 'Database upgrade from VER 3 -> VER 4' )
session.execute( 'INSERT INTO objl ( type, name ) '
'SELECT DISTINCT :tag_type AS type, tag from tagl',
{ 'tag_type' : TYPE_CLASSIFIER } )
session.execute( 'INSERT INTO rell ( id, parent, rel ) '
'SELECT t.id, o.id, :tag_rel AS rel '
' FROM tagl t INNER JOIN objl o ON o.type = :tag_type '
' AND o.name = t.tag',
{ 'tag_rel' : LEGACY_REL_CLASS,
'tag_type' : TYPE_CLASSIFIER } )
session.execute( 'DROP TABLE tagl' )
session.execute( 'UPDATE dbi SET ver = 4, rev = 0' )
return 4, 0
def upgrade_from_4_to_5( log, session ):
log.info( 'Database upgrade from VER 4 -> VER 5' )
# Step 1, create new tables
session.execute( 'ALTER TABLE objl ADD COLUMN dup INTEGER' )
session.execute( 'CREATE TABLE rel2 ( '
' child INTEGER NOT NULL, '
' parent INTEGER NOT NULL, '
' sort INTEGER )' )
session.execute( 'CREATE TABLE mtda ( '
' id INTEGER NOT NULL, '
' key TEXT NOT NULL, '
' value TEXT )' )
# Step 2, convert relations
session.execute( 'INSERT INTO rel2 ( child, parent, sort ) '
'SELECT id, parent, sort FROM rell '
' WHERE rel = :child_type OR rel = :class_type',
{ 'child_type' : LEGACY_REL_CHILD,
'class_type' : LEGACY_REL_CLASS } )
for result in session.execute( 'SELECT id, parent, rel, sort FROM rell'
' WHERE rel = :dup_type OR rel = :var_type',
{ 'dup_type' : LEGACY_REL_DUPLICATE,
'var_type' : LEGACY_REL_VARIANT } ):
if( result['rel'] == LEGACY_REL_DUPLICATE ):
target_type = TYPE_FILE_DUP
elif( result['rel'] == LEGACY_REL_VARIANT ):
target_type = TYPE_FILE_VAR
else:
assert False
session.execute( 'UPDATE objl SET type = :type, dup = :parent WHERE id = :child',
{ 'child' : result['id'],
'parent' : result['parent'],
'type' : target_type } )
# Step 3, collapse meta into mtda
for result in session.execute( 'SELECT DISTINCT id, tag FROM meta' ):
values = [ r['value'] for r in session.execute( 'SELECT value FROM meta where id = :id AND tag = :tag', result ) ]
if( len( values ) == 1 ):
value = values[0]
else:
assert( result['tag'] == 'altname' )
value = ':'.join( values )
session.execute( 'INSERT INTO mtda ( id, key, value ) VALUES ( :id, :key, :value )',
{ 'id' : result['id'],
'key' : result['tag'],
'value' : value } )
# Step 4, drop old tables
session.execute( 'DROP TABLE rell' )
session.execute( 'DROP TABLE meta' )
# Step 5, update the database file
session.execute( 'UPDATE dbi SET ver = 5, rev = 0' )
return 5, 0
def upgrade_from_5_to_6( log, session ):
log.info( 'Database upgrade from VER 5 -> VER 6' )
session.execute( 'ALTER TABLE dbi ADD COLUMN imgdb_ver INTEGER' )
session.execute( 'UPDATE dbi SET ver = 6, rev = 0, imgdb_ver = 0' )
return 6, 0
def upgrade_from_6_to_7( log, session ):
log.info( 'Database upgrade from VER 6 -> VER 7' )
# Note, I normally wouldn't want to add a default, because having
# an exception thrown if we ever try to insert an empty time is
# a good way to catch errors. However, SqlLite doesn't provide
# any good mechinisms to add a not-null column, then revoke the
# default.
session.execute( 'ALTER TABLE objl ADD COLUMN create_ts INTEGER NOT NULL DEFAULT 0' )
session.execute( 'UPDATE objl SET create_ts = :now',
{ 'now' : calendar.timegm( time.gmtime() ) } )
session.execute( 'UPDATE dbi SET ver = 7, rev = 0' )
return 7, 0
def upgrade_from_7_to_8( log, session ):
log.info( 'Database upgrade from VER 7 -> VER 8' )
session.execute( 'ALTER TABLE mtda ADD COLUMN num INTEGER' )
session.execute( 'UPDATE dbi SET ver = 8, rev = 0' )
return 8, 0
|
sportsList = open('sports.txt')
for index in range(1,11):
sp = sportsList.readline()
if len(sp) >= 8 :
print (sp .rstrip())
|
#
# -*- Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
#
################################################################################
# HEADING
################################################################################
#
# Author
# The author of the script.
# Feature (Optional)
# The 'Feature' contains the reference to the related Jira Nr. and
# description to which this test case belongs. If the test case belongs to
# more than one feature, the features have to be separated by a comma.
# Description
# Description of the test case and the test procedure with the test goal.
# Modified by
# N.A.
# Version
# Current version of the file. Should follow the project versioning standards
# for documents.
# History (Optional)
# History of the changes of each version. Should follow the project versioning
# standards for documents.
# Requirements
# List of requirements covered by the test case.
# Database
# Database used for INSTANCE.
# Comments (Optional)
# Any comments for the test case.
# Instance Table
# The INSTANCE table should be choose and/or created in a way that simplifies
# the script and allows an easy verification of the number of permutations for
# the tests. For example, instead of using the "Itineraire" table and
# iterating for each point or condition, an "Itineraire_ADV" or
# "Itineraire_condition" should be used.
#
################################################################################
Author = "Author"
Feature = ""
Description = """Sample Description with some "extra" features."""
Modifiedby = ""
Version = "1.0"
History = """"""
Requirements = ""
Database = ""
Comments = """"""
Table = ""
################################################################################
# IMPORT
################################################################################
# Import des fonctions de lecture du modèle
from Interface_Tesia import *
# Import des fonctions d'instanciation
from Interface_Simenv import *
# Import des autres fonctions
from Interface_Database import GetInstanceTest
from Dico_Var import __mode__,__Debug__,__NoDebug__
from Librairie import *
import sys
import inspect
################################################################################
# Scénario de test
################################################################################
def Scenario(instance):
############################################################################
# INITIALIZATION
############################################################################
# Definition du nom du scénario instancié
FileName = inspect.getsourcefile(Scenario)
FileName = FileName[(FileName.rfind("\\")+1):]
FileName = FileName.replace(".py",".scn")
SetPath(FileName)
# Récupération des instances associées au test
MaBaseDeDonneesEst(instance)
INSTANCE = GetInstanceTest(FileName)
# Début de génération du scénario instancié
Debug("==== Début de génération du %s ===========" % FileName.replace(".scn",""))
# Création du scénario instancié
CreationFichier()
EcrireEntete(Author, Modifiedby, Description, Version)
# Vérification de la présence d'au moins une instance à tester
if INSTANCE != [] :
# On teste chaque ligne de la table des instances
for PARAMETRE in INSTANCE :
####################################################################
# PRECONDITIONS
####################################################################
# Récupération des noms des éléments constituant l'instance testée (càd le contenu des colonnes de la table des instances)
# Par rapport au DVSS, chaque élément mentionné en gras dans les "Objets utilisés" doit être récupéré de cette manière
# Exemple, récupération du nom de l'itinéraire testé dans la colonne nommée "Itineraire" :
#
# Itineraire1 = PARAMETRE['Itineraire']
#
ITI = PARAMETRE['Itineraire']
# Récupération des éléments constituant l'instance testée à partir de leur nom
# A faire pour chaque nom d'élément récupéré ci-dessus
# Exemple, récupération de l'itinéraire testé à partir de son nom :
#
# Itineraire1 = ItineraireNomme(Itineraire1)
#
# (cf fichier Librairie.py pour la liste des fonctions de type "xxxNomme(e)")
ITI = ItineraireNomme(ITI)
# Récupération dans le modèle des éléments ne faisant pas partie de l'instance mais nécessaires au déroulement du test
# Par rapport au DVSS, chaque élément mentionné non en gras dans les "Objets utilisés" doit être récupéré de cette manière
# Exemple, récupération d'une aiguille commandée à droite par l'itinéraire testé :
#
# Aiguille1 = ListeAigCommandeesParIti(Itineraire1)[0]
#
# (cf fichiers Interface_Tesia.py et Librairie.py pour les fonctions (resp. directes et macro) de lecture du modèle)
SIG = ITI_SIG_origine_Concernant(ITI)[0]
#Affichage de l'instance dans le scénario
Instance(PARAMETRE)
####################################################################
# EXECUTION
####################################################################
# TODO
# S'il n'y a aucune instance à tester
else :
print "Pas d'instances pour ce scénario"
Commentaire("Pas d'instances pour ce scénario")
# Fin de génération du scénario instancié
FermetureFichier()
Debug ("==== %s généré ===========" % FileName.replace(".scn",""))
################################################################################
# Auto-instanciation
################################################################################
def main():
import CModelCore
import Dico_Var
instance = Dico_Var.BasePathInstanceTesia
modele = Dico_Var.BasePathModelTesia
core = CModelCore.CModelCore()
core.OpenModel(modele)
core.OpenInstance(instance)
MonGestionnaireEst(core)
Scenario(instance)
core.Close()
################################################################################
# Run
################################################################################
if __name__ == '__main__':
main()
|
"""
Tests for captions resources.
"""
import io
import pytest
import responses
import pyyoutube.models as mds
from .base import BaseTestCase
from pyyoutube.error import PyYouTubeException
from pyyoutube.media import Media
class TestCaptionsResource(BaseTestCase):
RESOURCE = "captions"
def test_list(self, helpers, key_cli):
with responses.RequestsMock() as m:
m.add(
method="GET",
url=self.url,
json=self.load_json("captions/captions_by_video.json", helpers),
)
res = key_cli.captions.list(parts=["snippet"], video_id="oHR3wURdJ94")
assert res.items[0].id == "SwPOvp0r7kd9ttt_XhcHdZthMwXG7Z0I"
def test_insert(self, helpers, authed_cli):
video_id = "zxTVeyG1600"
body = mds.Caption(
snippet=mds.CaptionSnippet(
name="日文字幕", language="ja", videoId=video_id, isDraft=True
)
)
media = Media(
io.StringIO(
"""
1
00:00:00,036 --> 00:00:00,703
ジメジメした天気
"""
)
)
upload = authed_cli.captions.insert(
body=body,
media=media,
)
assert upload.resumable_progress == 0
def test_update(self, helpers, authed_cli):
caption_id = "AUieDabWmL88_xoRtxyxjTMtmvdoF9dLTW3WxfJvaThUXkNptljUijDFS-kDjyA"
new_body = mds.Caption(
id=caption_id,
snippet=mds.CaptionSnippet(videoId="zxTVeyG1600", isDraft=False),
)
media = Media(
io.StringIO(
"""
1
00:00:00,036 --> 00:00:00,703
ジメジメした天気
"""
),
)
upload = authed_cli.captions.update(
body=new_body,
media=media,
)
assert upload.resumable_progress == 0
with responses.RequestsMock() as m:
m.add(
method="PUT",
url=self.url,
json=self.load_json("captions/update_response.json", helpers),
)
caption = authed_cli.captions.update(body=new_body)
assert not caption.snippet.isDraft
def test_download(self, authed_cli):
caption_id = "AUieDabWmL88_xoRtxyxjTMtmvdoF9dLTW3WxfJvaThUXkNptljUijDFS-kDjyA"
with responses.RequestsMock() as m:
m.add(
method="GET",
url=f"{self.url}/{caption_id}",
)
res = authed_cli.captions.download(caption_id=caption_id)
assert res.status_code == 200
def test_delete(self, helpers, authed_cli):
caption_id = "AUieDabWmL88_xoRtxyxjTMtmvdoF9dLTW3WxfJvaThUXkNptljUijDFS-kDjyA"
with responses.RequestsMock() as m:
m.add(method="DELETE", url=self.url)
assert authed_cli.captions.delete(caption_id=caption_id)
with pytest.raises(PyYouTubeException):
with responses.RequestsMock() as m:
m.add(
method="DELETE",
url=self.url,
status=403,
json=self.load_json("error_permission_resp.json", helpers),
)
authed_cli.captions.delete(caption_id=caption_id)
|
# Different occurences of a substring
# can overlap one another ex. ATA in CGATATATC
# This algorithm is a full proof way to find all substrings
# INPUT - full genome string and substring we are looking for
# ALGO - reads from input, splices every possible continuous
# four letter string, if any of them equal our substring
# the first position of the string is printed
f = open('algoIII.txt', 'r')
x = f.readline()
y = f.readline()
# Converts the string into a list then back to a string
# For some reason len() keeps adding 1 xtra character
# might be a bug in the text editor
x = list(x)
del(x[z-1])
x = "".join(x)
a = len(x)
b = len(y)
for i in range (b - a+1):
answer = y [i:i+a]
if answer == x:
print i
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name='SpoofDogg',
version='1.0',
description='A tool that does ARP poisoning and DNS spoofing.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
python_requires="~=3.5",
url='',
license='Apache',
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
author='Ada Donder',
author_email='adadonderr@gmail.com',
keywords="ARP, DNS, spoof, poison, gateway",
install_requires=['scapy', 'netfilterqueue']
)
|
from django.conf.urls import url,include
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.job_list),
path('<int:id>',views.job_details),
]
|
import logging
import fmcapi
def test__application(fmc):
logging.info("Testing Application class.")
obj1 = fmcapi.Applications(fmc=fmc)
logging.info("All Application -- >")
result = obj1.get(limit=1000)
logging.info(result)
logging.info(f"Total items: {len(result['items'])}")
del obj1
obj1 = fmcapi.Applications(fmc=fmc, name="WD softwares Download/Update")
logging.info("One Application -- >")
logging.info(obj1.get(limit=1000))
logging.info("Testing Application class done.\n")
|
# 양수로 이루어진 m x n 그리드를 인자로 드립니다. 상단 왼쪽에서 시작하여, 하단 오른쪽까지 가는 길의 요소를 다 더했을 때,가장 작은 합을 찾아서 return 해주세요.
# 한 지점에서 우측이나 아래로만 이동할 수 있습니다.
# Input: [ [1,3,1], [1,5,1], [4,2,1] ]
# Output: 7
# 설명: 1→3→1→1→1 의 합이 제일 작음
def min_path_sum(grid):
x = len(grid)
y = len(grid[0])
for i in range(1,x):
grid[0][i] += grid[0][i-1]
grid[i][0] += grid[i-1][0]
for i in range(1,x):
for j in range(1,y):
grid[i][j] += min(grid[i][j-1], grid[i-1][j])
return grid[-1][-1]
min_path_sum([[1,1,3,1],[1,1,5,1],[1,4,2,1],[1,2,3,4]])
|
# create your solar system animation here!
import turtle
import math
class SolarSystem:
def __init__(self, height, width):
self.sun = None
self.planets = []
self.window = turtle.Screen()
self.window.tracer(0)
self.window.setup(900, 900)
self.window.bgcolor("black")
self.sst = turtle.Turtle()
self.sst.hideturtle()
self.window.setworldcoordinates(
-width / 2.0, -height / 2.0, width / 2.0, height / 2.0
)
def add_planet(self, planet):
self.planets.append(planet)
def add_sun(self, sun):
self.sun = sun
def show_planets(self):
for planet in self.planets:
print(planet.name)
def freeze(self):
self.sst.exitOnClick()
def move_planets(self):
G = 0.1
dt = 0.0005
for p in self.planets:
p.move_to(p.x + dt * p.x_vel, p.y + dt * p.y_vel)
rx = self.sun.x - p.x
ry = self.sun.y - p.y
r = math.sqrt(rx ** 2 + ry ** 2)
x_accel = G * self.sun.mass * rx / r ** 3
y_accel = G * self.sun.mass * ry / r ** 3
p.x_vel += dt * x_accel
p.y_vel += dt * y_accel
class Sun:
def __init__(self, name, radius, mass, temp):
self.name = name
self.radius = radius
self.mass = mass
self.temp = temp
self.x = 0
self.y = 0
self.st = turtle.Turtle()
self.st.up()
self.st.goto(self.x, self.y)
self.st.down()
self.st.color("yellow")
self.st.begin_fill()
self.st.shape("circle")
self.st.shapesize(radius, radius, 1)
# self.st.circle(radius)
self.st.end_fill()
class Planet:
def __init__(self, name, radius, mass, distance, x_vel, y_vel, color):
self.name = name
self.radius = radius
self.mass = mass
self.distance = distance
self.x = distance
self.y = 0
self.color = color
self.x_vel = x_vel
self.y_vel = y_vel
self.t = turtle.Turtle()
self.t.up()
self.t.goto(self.x, self.y)
self.t.down()
self.t.color(color)
self.t.begin_fill()
self.t.shape("circle")
self.t.shapesize(radius, radius, 1)
# self.t.circle(radius)
self.t.end_fill()
def move_to(self, new_x, new_y):
self.x = new_x
self.y = new_y
self.t.up()
self.t.goto(self.x, self.y)
self.t.down()
def create_animate():
ss = SolarSystem(2, 2)
sun = Sun("SUN", 1, 25, 5800)
ss.add_sun(sun)
planet = Planet("MERCURY", 2, 1000, 0.7, -0.5, 0.5, "blue")
ss.add_planet(planet)
planet_2 = Planet("EARTH", 4, 5000, 0.8, -0.5, 0.6, "green")
ss.add_planet(planet_2)
planet_3 = Planet("JUPITER", 8, 8000, 1, -0.5, 0.8, "brown")
ss.add_planet(planet_3)
while True:
ss.move_planets()
ss.window.update()
# i couldn't get ontimer to work for the life of me :(
# ss.move_planets()
# ss.window.ontimer(create_animate, 500)
# ss.window.update()
# ss.window.exitonclick()
# ss.window.mainloop()
create_animate()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 13:41:52 2018
@author: Rafael Rocha
"""
import sys
import time
import os
import numpy as np
import keras
import matplotlib.pyplot as plt
#import my_utils as ut
from sklearn.metrics import classification_report, confusion_matrix
from keras.optimizers import SGD, Adam, Adagrad, RMSprop
from keras.losses import categorical_crossentropy
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input
from keras.models import Model
from keras import backend as K
data_set_name = 'train_test_splits_2.npz'
#data_set_name = 'dataset_pad_128x256_aug.npz'
data = np.load(data_set_name)
x = data['x']
y = data['y']
x_test = data['x_test']
y_test = data['y_test']
samples = data['samples']
x_train = x[samples[9]]
y_train = y[samples[9]]
lr = 0.01
momentum = 0.3
batch_size = 5
epochs = 50
#x_train = data['x_train_1']
#y_train = data['y_train_1']
#x_test = data['x_test_1']
#y_test = data['y_test_1']
x_train = x_train.reshape(x_train.shape[0], 128, 256, 1)
x_test = x_test.reshape(x_test.shape[0], 128, 256, 1)
y_train = keras.utils.to_categorical(y_train, 3)
y_test = keras.utils.to_categorical(y_test, 3)
input_shape = (np.size(x_train, 1), np.size(x_train, 2), 1)
# ==============================================================================
# Create deep network
# ==============================================================================
K.clear_session()
inputs = Input(input_shape)
conv0 = Conv2D(32, kernel_size=(11, 11), strides=5, activation='relu',
input_shape=input_shape)(inputs)
conv1 = Conv2D(64, (3,3), activation='relu')(conv0)
pool0 = MaxPooling2D(pool_size=(2, 2))(conv1)
pool0 = Dropout(0.25)(pool0)
flatt0 = Flatten()(pool0)
dense0 = Dense(128, activation='relu')(flatt0)
#dense0 = Dropout(0.25)(dense0)
outputs = Dense(3, activation='softmax')(dense0)
model = Model(inputs=inputs, outputs=outputs)
model.compile(loss=categorical_crossentropy,
optimizer=SGD(lr=lr, momentum=momentum),
metrics=['accuracy'])
# ==============================================================================
# ==============================================================================
# Training deep network
# ==============================================================================
start_time = time.time()
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
# validation_split=validation_split)
# validation_data=(x_validation, y_validation))
validation_data=(x_test, y_test))
training_time = time.time() - start_time
score = model.evaluate(x_train,
y_train,
verbose=0)
#
print("\n--- Training time: %s seconds ---" % training_time)
print('Traning loss:', score[0])
print('Training accuracy:', score[1])
# ==============================================================================
start_time = time.time()
score = model.evaluate(x_test,
y_test,
verbose=0)
test_time = time.time() - start_time
print("\n--- Test time: %s seconds ---" % test_time)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_pred = model.predict(x_test)
#y_pred = model.predict_classes(x_test)
target_names = ['Absent', 'Undamaged', 'Damaged']
print('\n')
cr = classification_report(np.argmax(y_test, axis=1),
np.argmax(y_pred, axis=1),
target_names=target_names,
digits=4)
print(cr)
print('\nConfusion matrix:\n')
cm = confusion_matrix(np.argmax(y_test, axis=1),
np.argmax(y_pred, axis=1))
print(cm)
str_acc = "%.2f" % (100*score[1])
model_name = data_set_name + '_' + 'epochs_' + str(epochs) + '_' + 'acc_' + str_acc + '.h5'
model_path = os.path.join('model', model_name)
#model.save(model_path)
acc = history.history['acc']
loss = history.history['loss']
val_acc = history.history['val_acc']
val_loss = history.history['val_loss']
#np.savez(dataset_name+'_'+str(epochs)+ '_' + str_acc, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,acc=acc, loss=loss, val_acc=val_acc, val_loss=val_loss)
#ut.plot_acc_loss(acc, val_acc, loss, val_loss)
|
import os
from os.path import join
# Messy counting files in directory
# def messy_file_count(directory):
# for dirpath, dirs, files in os.walk(directory):
# filelist = []
# for f in files:
# filelist.append(f)
# if len(filelist) == 0:
# continue
# else:
# print("{0}: {1} Files".format(dirpath,len(filelist)))
# return directory
#
def clean_file_count(directory):
# iterate through all files and dir names in path
def topDir(topFolder):
count = 0
#exts = ['.BAK', '.SAM', '.DOC', '.GAR', '.NTS','.LEA']
ext = "."
for files in os.listdir(topFolder):
filePath = join(barcodes, files)
if os.path.isdir(filePath):
# if dir, recursively count files in dir
continue
elif os.path.isfile(filePath):
# if file, increment
count += 1
else:
if ext in filePath:
print(filePath)
count +=1
if count > 1:
print("{0}: {1} Files".format(topFolder,count))
return count
path = 0
for paths in os.listdir(directory):
path +=1
topFolder = join(directory, paths)
topDir(topFolder)
return path
path = r'C:\Path\On\Windows'
coll_input = input("Enter the Directory Name: ")
coll = str(coll_input)
#rcoll = "<PREFIX>"+coll #Add a directory prefix if necessary
#directory = join(path,rcoll) #If you added a directory prefix
directory = join(path,coll)
#messy_file_count(directory)
clean_file_count(directory)
|
#!/usr/bin/python
import sys
import httplib
# loope over sites and if anython but 200 or 301 shows sound alarm
for site_name in ["prolinuxhub.com", "prolinuxhub.com", "site2.com", # REPLACE WITH YOUR WEBSITES
]:
try:
conn = httplib.HTTPConnection(site_name)
conn.request("HEAD", "/")
r1 = conn.getresponse()
conn.close()
website_return = r1.status
if website_return != 200 and website_return != 301:
print "Problem with " + site_name
sys.exit(2)
except Exception as e:
print "Possibly server down " + site_name
sys.exit(2)
|
s=input()
a=[int(i) for i in s]
sum=0
for i in a:
sum+=(i**3)
if(sum==int(s)):
print("yes")
else:
print("no")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.