text
stringlengths 8
6.05M
|
|---|
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .token import account_activation_token
from django.contrib.auth import get_user_model
from .forms import EmailValidationForm,ConfirmForm,RequestForm
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from residents.models import Request
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = get_user_model().objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
form = EmailValidationForm()
if request.method == 'POST':
f = EmailValidationForm(request.POST)
if f.is_valid():
if f.cleaned_data['email'] == user.email:
return redirect('user-terms',uidb64=uidb64,token=token)
else:
form = f
return render(request, 'users/emailValidation.html', {'form': form})
else:
return render(request, 'users/invalid.html')
def confirm_terms(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = get_user_model().objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
form = ConfirmForm()
if request.method == 'POST':
f = ConfirmForm(request.POST)
if f.is_valid():
user.is_active = True
user.save()
tkn = PasswordResetTokenGenerator().make_token(user)
return redirect('password_reset_confirm',uidb64=uidb64,token=tkn)
else:
form = f
return render(request, 'users/tou.html', {'form': form, 'user':user})
else:
return HttpResponse('Activation link is invalid!')
@csrf_exempt
@xframe_options_exempt
def request_resident(request):
if request.method == "POST":
form = RequestForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('register_complete')
else:
form = RequestForm()
return render(request,'users/register.html',{'form':form,})
@xframe_options_exempt
def request_complete(request):
return render(request,'users/register_complete.html')
|
# Generated by Django 3.1.2 on 2020-10-31 13:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testingSystem', '0002_auto_20201031_1136'),
]
operations = [
migrations.CreateModel(
name='CheckedTest',
fields=[
('test_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='testingSystem.test')),
('status', models.CharField(choices=[('OK', 'Решение зачтено'), ('CE', 'Ошибка компиляции'), ('WA', 'Неверный ответ'), ('TL', 'Превышен лимит времени'), ('ML', 'Превышен лимит памяти'), ('RE', 'Ошибка выполнения'), ('IL', 'Превышен лимит ожидания'), ('SE', 'Ошибка сервера'), ('RJ', 'Решение отклонено')], max_length=2)),
('memory_used', models.IntegerField()),
('time_used', models.IntegerField()),
],
bases=('testingSystem.test',),
),
migrations.RemoveField(
model_name='attempt',
name='failed_test_index',
),
migrations.RemoveField(
model_name='attempt',
name='memory_used',
),
migrations.RemoveField(
model_name='attempt',
name='status',
),
migrations.RemoveField(
model_name='attempt',
name='time_used',
),
migrations.AddField(
model_name='attempt',
name='score',
field=models.FloatField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='attempt',
name='checked_tests',
field=models.ManyToManyField(to='testingSystem.CheckedTest'),
),
]
|
# Importing repositries
import csv
import random
# DEBUG
#task_number = random.randint(0, 10) #Giving me random lines
# Importing routine file
raw_morning_data = open('morn_data.csv') #importing the data raw
read_morning_data = csv.reader(raw_morning_data) #reading the data raw
row = list(read_morning_data) #creating each data row
row_count = len(row) #the total number of rows in the list
#THIS IS THE MONEY SHOT BABY
#print(row) #clearing the line
#string_row = str(row[task_number])[2:-2]
#print(string_row)
# Setting initiated variables / RANDOM NUMBERS
total_tasks = random.randint(0, 10) #choosing between 0 - 10
task_number = random.randint(0, total_tasks) #choosing between 0 - 10
task_counter = total_tasks
#setting constant variables
x = 1
total_time = 0
'''
def routines(x):
print(x)
x = x -1
#print("ROUTINE RUNNING!")
print(x)
global task_counter = int(x)
#string_row = str(row[task_number])[2:-2]
#print(string_row)
'''
'''
def mainloop(task_counter):
if(task_counter != 0):
while task_counter != 0:
routines(task_counter)
print("does not equal")
print(task_counter)
else:
print("All done!")
'''
'''
while task_counter != int(0):
routines(task_counter)
#task_counter = int(x)
#print("does not equal")
print(task_counter)
break
else:
print("All done!")
'''
# START - program introduction
print()
print(r"Good morning, todays morning routine consists of", total_tasks, "tasks.")
print()
#print(task_counter)
#mainloop(task_counter)
#if(task_counter != 0):
while task_counter != 0:
time = random.randint(1, total_tasks) #choosing between 0 - 10
total_time = total_time + time
str_time = "(" + str(time) + 'min)'
task_counter = task_counter -1
#print(task_counter)
y = str(x) + "."
#THIS IS THE MONEY SHOT BABY
#print(row) #clearing the line
task_number = random.randint(0, total_tasks) #choosing between 0 - total
string_row = str(row[task_number])[2:-2]
#print(string_row)
print(y, string_row, str_time)
x = x +1
#break
else:
#print("All done!")
print(" ")
final_total_time = "Todays morning routine will take: " + str(total_time) + " min's"
print(final_total_time)
print(" ")
|
# Write a Python program to convert a pair of values into a sorted unique array
def sortValues(values):
print(sorted(values))
values = [3,4,2,3,5,6,4,3]
sortValues(values)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from Crawler.Edinburgh.Edinburgh import settings
class EdinburghPipeline(object):
'''
#链接数据库
def __init__(self):
self.connect = pymysql.connect(
host = settings.Mysql_host,
db = settings.Mysql_dbname,
user = settings.Mysql_user,
passwd = settings.Mysql_passwd
)
#通过游标对数据库进行操作
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
#定义sql语句
#sql_1 = "create table {} (description, structure, learning_outcomes, career_opportunities, bachelor_requirements, language_requirements, application_deadlines, full_time_fee);"
sql = "insert into {}(description, structure, learning_outcomes, career_opportunities, bachelor_requirements, language_requirements, application_deadlines, full_time_fee) values(%s,%s,%s,%s,%s,%s,%s,%s);"
content = (item['description'],
item['structure'],
item['learning_outcomes'],
item['career_opportunities'],
item['bachelor_requirements'],
item['language_requirements'],
item['application_deadlines'],
item['full_time_fee'],)
#执行sql语句
self.cursor.execute(sql.format(input("输入表名:")),content)
#提交sql语句
self.connect.commit()
return item
'''
#输出成txt文件
def process_item(self, item, spider):
#文件路径名可改变(下面是我电脑上的文件存放路径)
file_name = '/Users/digger/anaconda3/lib/python3.6/site-packages/Crawler/Edinburgh/{}.txt'
with open(file_name.format(input("请输入文件名:")),'a') as file:
file.write(item['programme'] + '\n')
file.write('Description:'+'\n'+item['description']+'\n\n')
file.write('Compulsory_Courses:'+'\n'+item['Compulsory_Courses'] + '\n\n')
file.write('Option_Courses:'+'\n'+item['Option_Courses'] + '\n\n')
file.write('learning_outcomes:'+'\n'+item['learning_outcomes'] + '\n\n')
file.write('career_opportunities:'+'\n'+item['career_opportunities'] + '\n\n')
file.write('bachelor_requirements:'+'\n'+item['bachelor_requirements'] + '\n\n')
file.write('language_requirements:'+'\n'+item['language_requirements'] + '\n\n')
file.write('application_deadlines:'+'\n'+item['application_deadlines'] + '\n\n')
file.write('full_time_fee:'+'\n'+item['full_time_fee'] + '\n\n')
return item
|
import pandas as pd
path = input("type your path:")
name = input("type your file name:")
rst = name + 'negf.fasta'
aim= path+ '/' + name + "neg.csv"
resu=path+ '/' + rst
df1 = pd.read_csv(aim)
with open(resu,'a') as file_handle:
n2=0
for row in df1.itertuples():
aimid = row[2]
aimseq = row[4]
if len(aimseq)==51 and aimseq[25]=='C':
n2 = n2+1
seq= f">seq_{name}neg_{n2}_{aimid}"+ "\n" + aimseq
file_handle.write("{}\n".format(seq))
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the grpc channel decorator to have metadata set per call
"""
class GRPCWithMetadataCallable:
def __init__(self, callable, getMetadata):
"""Constructor.
Args:
callable: Underlying channel
getMetadata: method to call to get metadata for the grpc request.
"""
self.callable = callable
self.getMetadata = getMetadata
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None):
call_metadata = self.getMetadata()
if metadata is not None and call_metadata is not None:
call_metadata = metadata + call_metadata
return self.callable(request, timeout, metadata=call_metadata, credentials=credentials)
class GRPCWithMetadataChannel:
"""This class provides a decorator for grpc channels where the caller can set up
metadata to be attached to all calls to the underlying stub.
"""
def __init__(self, channel, getMetadata):
self.channel = channel
self.getMetadata = getMetadata
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return GRPCWithMetadataCallable(
self.channel.unary_unary(method, request_serializer, response_deserializer),
self.getMetadata)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return GRPCWithMetadataCallable(
self.channel.unary_stream(method, request_serializer, response_deserializer),
self.getMetadata)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return GRPCWithMetadataCallable(
self.channel.stream_unary(method, request_serializer, response_deserializer),
self.getMetadata)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return GRPCWithMetadataCallable(
self.channel.stream_stream(method, request_serializer, response_deserializer),
self.getMetadata)
|
#####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtGui, QtWidgets
import time
import logging
import socket
import rospy
# Custom Imports
#####################################
# Global Variables
#####################################
#####################################
# RoverVideoReceiver Class Definition
#####################################
class ROSMasterChecker(QtCore.QThread):
def __init__(self):
super(ROSMasterChecker, self).__init__()
# ########## Class Variables ##########
self.ros_master_present = False
self.start_time = time.time()
self.start()
def run(self):
try:
master = rospy.get_master()
master.getPid()
self.ros_master_present = True
except socket.error:
return
def master_present(self, timeout):
while self.isRunning() and (time.time() - self.start_time) < timeout:
self.msleep(100)
return self.ros_master_present
|
# Copyright (c) 2020. Yul HR Kang. hk2699 at caa dot columbia dot edu.
import numpy as np
from matplotlib import pyplot as plt
from pprint import pprint
import time
import numpy_groupies as npg
from collections import OrderedDict as odict
from typing import Union, Iterable, Sequence, Callable, Tuple, Any
import torch
from torch.nn import functional as F
from torch import nn
from lib.pylabyk import numpytorch as npt, yktorch as ykt
from lib.pylabyk.numpytorch import npy
from lib.pylabyk import argsutil, plt2, np2
npt.device0 = torch.device('cpu')
ykt.default_device = torch.device('cpu')
from a0_dtb import a1_dtb_1D_sim as sim1d
from a0_dtb.a1_dtb_1D_sim import TimedModule
from a0_dtb.a1_dtb_1D_sim \
import simulate_p_cond__rt_ch, rt_sec2fr
from data_2d.consts import get_kw_plot
from data_2d import consts
# run "tensorboard --logdir=runs" in the terminal to see log
def ____Utils____():
pass
def get_demo_ev(n_cond=9, nt=consts.NT) -> torch.Tensor:
levels = 0.5 * np.exp(-np.log(2)
* np.arange(n_cond // 2))
levels = set(-levels).union(set(levels))
if n_cond % 2 == 1:
levels = levels.union({0.})
levels = torch.tensor(np.sort(list(levels), 0))
# levelss[cond12, dim]
levelss = torch.stack([
v.flatten() for v in torch.meshgrid([levels, levels])
], -1)
# ev[cond12, fr, dim]
ev = levelss[:, None, :].expand(-1, nt, -1)
return ev
def unique_conds(cond_by_dim):
"""
:param cond_by_dim: [tr, dim]
:return: ev_conds[cond, dim], dcond[tr]
"""
ev_conds, dcond = np.unique(cond_by_dim, return_inverse=True, axis=0)
return ev_conds, dcond
class Data2D:
def __init__(
self,
nt0=consts.NT,
dt0=consts.DT,
subsample_factor=1,
**kwargs
):
"""
:param nt0:
:param dt0:
:param subsample_factor:
:param kwargs:
"""
self.nt0 = nt0
self.dt0 = dt0
self.subsample_factor = subsample_factor
self.n_ch = consts.N_CH_FLAT
# Placeholders
self.ev_cond_dim = np.empty([0, 0], dtype=np.float) # type: np.ndarray
self.ch_tr_dim = np.empty([0, 0], dtype=np.int) # type: np.ndarray
self.dcond_tr = np.empty([0], dtype=np.int) # type: np.ndarray
@property
def nt(self):
return int(self.nt0 // self.subsample_factor)
@property
def dt(self):
return self.dt0 * self.subsample_factor
@property
def n_dcond0(self):
return self.ev_cond_dim.shape[0]
@property
def n_tr0(self):
return self.ch_tr_dim.shape[0]
def get_incl(self, mode, fold_valid, mode_train, n_fold_valid,
i_fold_test=0, n_fold_test=1,
to_debug=False):
in_tr = np.ones(self.n_tr0, dtype=np.bool)
in_dcond = np.ones(self.n_dcond0, dtype=np.bool)
in_tr_train_valid = np.ones(self.n_tr0, dtype=np.bool)
if mode == 'all':
pass
elif mode in ['train', 'valid', 'train_valid', 'test']:
# --- Choose training + validation set using mode_train
def is_in_train_valid(mode_train):
if mode_train == 'all':
if n_fold_test > 1:
in_tr_train_valid = (np.floor(
(np.arange(self.n_tr0) / self.n_tr0) * n_fold_test
) != i_fold_test)
else:
in_tr_train_valid = np.ones(self.n_tr0, dtype=np.bool)
in_dcond_train_valid = np.ones(
len(self.ev_cond_dim), dtype=np.bool)
elif mode_train == 'easiest':
if n_fold_test > 1:
raise ValueError()
ev_cond_dim = self.ev_cond_dim
dcond_tr = self.dcond_tr
easiest_cond_in_dim = np.abs(ev_cond_dim).max(0)
in_dcond_train_valid = np.any(
np.abs(ev_cond_dim) == easiest_cond_in_dim[None, :],
axis=1)
ix_dcond_train_valid = np.nonzero(in_dcond_train_valid)[0]
in_tr_train_valid = (
dcond_tr[:, None]
== ix_dcond_train_valid[None, :]
).any(1) # noqa
if to_debug: # CHECKED mode_train == 'easiest'
print("==== Checking mode_train == 'easiest'")
print('ev_cond included:')
dcond_tr_incl0 = np.unique(dcond_tr[in_tr_train_valid])
print(ev_cond_dim[in_dcond_train_valid, :])
print(ev_cond_dim[dcond_tr_incl0, :])
does_ev_cond_agree = np.all(
ev_cond_dim[in_dcond_train_valid, :]
== ev_cond_dim[dcond_tr_incl0, :]
)
print('Does ev_cond agree with expected: %d'
% does_ev_cond_agree)
assert does_ev_cond_agree
print('easiest_in_dim:')
print(easiest_cond_in_dim)
n_cond_incl0 = np.sum(in_dcond_train_valid)
print('len(dcond_incl0): %d' % n_cond_incl0)
n_cond_incl1 = (
len(np.unique(ev_cond_dim[:, 0])) * 2
+ len(np.unique(ev_cond_dim[:, 1])) * 2 - 4
)
print('# dcond along the boundary of the matrix: %d'
% n_cond_incl1)
does_n_cond_incl_agree = (n_cond_incl0 == n_cond_incl1)
print('Does len(dcond_incl0) agree with expected: %d'
% does_n_cond_incl_agree)
assert does_n_cond_incl_agree
print('====')
else:
raise ValueError()
return in_tr_train_valid, in_dcond_train_valid
in_tr_train_valid, in_dcond_train_valid = is_in_train_valid(
mode_train)
# -- Choose test set by flipping training + validation set
if mode == 'test':
if mode_train == 'all':
if n_fold_test > 1:
in_tr = ~in_tr_train_valid
else:
in_tr = in_tr_train_valid
in_dcond = in_dcond_train_valid
else:
in_tr = ~in_tr_train_valid
in_dcond = ~in_dcond_train_valid
elif mode == 'train_valid' or n_fold_valid == 1:
in_tr = in_tr_train_valid
in_dcond = in_dcond_train_valid
else: # mode in ['train', 'valid']
in_dcond = in_dcond_train_valid
assert n_fold_valid > 1
# get in_tr_valid
n_tr = in_tr_train_valid.sum()
ix_tr = np.zeros(self.n_tr0,
dtype=np.long) + np.nan
ix_tr[in_tr_train_valid] = np.arange(n_tr)
in_tr_valid = np.floor(
ix_tr / n_tr * n_fold_valid
) == fold_valid
if mode == 'valid':
in_tr = in_tr_valid
else: # mode == 'train':
in_tr = ~in_tr_valid & in_tr_train_valid
else:
raise ValueError()
if to_debug: # CHECKED n_fold_valid
if mode != 'all':
print('==== Checking n_fold_valid')
n_tr_incl = in_tr.sum()
n_tr_incl0 = in_tr_train_valid.sum() # noqa
n_tr = len(in_tr_train_valid)
print(
'#tr_total: %d\n'
'#in_tr_train_valid: %d (%1.1f%% of all)\n'
'#in_tr: %d (%1.1f%% of tr_incl0)\n'
'fold_valid: %d/%d, mode: %s, mode_train: %s\n'
'===='
% (
n_tr,
n_tr_incl0, n_tr_incl0 / n_tr * 100,
n_tr_incl, n_tr_incl / n_tr_incl0 * 100,
fold_valid, n_fold_valid, mode, mode_train
)
)
plt.subplot(1, 2, 1)
if mode != 'all':
plt.plot(in_tr_train_valid, 'yo')
plt.plot(in_tr, 'k.')
plt.xlabel('trial')
plt.title('Included in %s fold %d/%d'
% (mode, fold_valid, n_fold_valid))
plt.subplot(1, 2, 2)
plt.plot(*self.ev_cond_dim.T, 'yo')
plt.plot(*self.ev_cond_dim[in_dcond].T, 'k.')
plt.xlabel(consts.DIM_NAMES_LONG[0])
plt.ylabel(consts.DIM_NAMES_LONG[1])
plt.title('Included in %s among %s' % (mode, mode_train))
plt.show()
print('====')
return in_dcond, in_tr, in_tr_train_valid
class Data2DRT(Data2D):
def __init__(
self,
ev_tr_dim: np.ndarray,
ch_tr_dim: np.ndarray,
rt_tr: np.ndarray,
**kwargs
):
super().__init__(**kwargs)
self.ev_tr_dim = ev_tr_dim
self.ch_tr_dim = ch_tr_dim
self.rt_tr = rt_tr
self.n_ch = consts.N_CH_FLAT
self.ev_cond_dim = np.empty(())
self.dcond_tr = np.empty(())
self.update_data()
# # self.n_cond_rt_ch, self.ev_cond_fr_dim_meanvar,
# self.ev_cond_dim, self.dcond_tr = self.dat2p_dat(
# ch_tr_dim, rt_tr, ev_tr_dim)[2:4]
def simulate_data(self, pPred_cond_rt_ch: torch.Tensor, seed=0,
rt_only=False):
torch.random.manual_seed(seed)
if rt_only:
dcond_tr = self.dcond_tr
chSim_tr_dim = self.ch_tr_dim
chSimFlat_tr = consts.ch_by_dim2ch_flat(chSim_tr_dim)
pPred_tr_rt = pPred_cond_rt_ch[dcond_tr, :, chSimFlat_tr]
rtSim_tr = npy(npt.categrnd(
probs=npt.sumto1(pPred_tr_rt, -1)
) * self.dt)
else:
dcond_tr = self.dcond_tr
pPred_tr_rt_ch = pPred_cond_rt_ch[dcond_tr, :, :]
n_tr, nt, n_ch = pPred_tr_rt_ch.shape
chSim_tr_rt_ch = npy(npt.categrnd(
probs=pPred_tr_rt_ch.reshape([n_tr, -1])))
rtSim_tr = npy((chSim_tr_rt_ch // n_ch) * self.dt)
chSim_tr = npy(chSim_tr_rt_ch % n_ch)
chs = np.array(consts.CHS)
chSim_tr_dim = np.stack([
chs[dim][chSim_tr]
for dim in range(consts.N_DIM)
], -1)
self.update_data(ch_tr_dim=chSim_tr_dim, rt_tr=rtSim_tr)
def update_data(
self,
ch_tr_dim: np.ndarray = None,
rt_tr: np.ndarray = None,
ev_tr_dim: np.ndarray = None
):
if ch_tr_dim is None:
ch_tr_dim = self.ch_tr_dim
else:
self.ch_tr_dim = ch_tr_dim
if rt_tr is None:
rt_tr = self.rt_tr
else:
self.rt_tr = rt_tr
if ev_tr_dim is None:
ev_tr_dim = self.ev_tr_dim
else:
self.ev_tr_dim = ev_tr_dim
self.ev_cond_dim, self.dcond_tr = self.dat2p_dat(
npy(ch_tr_dim), npy(rt_tr), ev_tr_dim)[2:4]
def get_data_by_cond(
self, mode='all', i_fold_valid=0, epoch=0,
mode_train='all', n_fold_valid=1,
i_fold_test=0, n_fold_test=1,
upsample_ev=1,
to_debug=False
) -> (torch.Tensor, torch.Tensor, np.ndarray, np.ndarray, np.ndarray):
"""
:param mode: 'all'|'train'|'valid'|'train_valid'|'test'
:param i_fold_valid:
:param epoch:
:param mode_train: 'all'|'easiest'
:param n_fold_valid:
:param upsample_ev: 1 to disable upsampling; ~5 looks nice
:param to_debug:
:return: ev_cond_fr_dim_meanvar, n_cond_rt_ch, in_tr, in_dcond, \
ev_cond_dim
"""
in_dcond, in_tr, in_tr_train_valid = self.get_incl(
mode, i_fold_valid, mode_train, n_fold_valid,
i_fold_test=i_fold_test, n_fold_test=n_fold_test,
to_debug=to_debug
)
n_cond_rt_ch, ev_cond_fr_dim_meanvar, ev_cond_dim, dcond_tr = \
self.dat2p_dat(
self.ch_tr_dim[in_tr],
self.rt_tr[in_tr],
self.ev_tr_dim[in_tr],
)
ev_cond_dim = self.ev_cond_dim[in_dcond]
return ev_cond_fr_dim_meanvar, n_cond_rt_ch, in_tr, in_dcond, \
ev_cond_dim
def dat2p_dat(
self,
ch_tr_dim: np.ndarray,
rt_sec: np.ndarray,
ev_tr_dim: np.ndarray,
) -> (torch.Tensor, torch.Tensor, np.ndarray, np.ndarray):
"""
:param ch_tr_dim: [tr, dim]
:param rt_sec: [tr]
:param ev_tr_dim: [tr, dim]
:return: n_cond_rt_ch[cond, rt, ch],
ev_cond_fr_dim_meanvar[dcond, fr, dim, (mean, var)],
ev_cond_dim[dcond, dim], dcond_tr[tr]
"""
nt0 = self.nt0
dt0 = self.dt0
n_ch_flat = self.n_ch
subsample_factor = self.subsample_factor
nt = int(nt0 // subsample_factor)
dt = dt0 * subsample_factor
drt = rt_sec2fr(rt_sec=rt_sec, dt=dt, nt=nt)
ch_flat = consts.ch_by_dim2ch_flat(ch_tr_dim)
ev_cond_dim, dcond_tr = unique_conds(ev_tr_dim)
n_cond_flat = len(ev_cond_dim)
ev_cond_fr_dim = torch.tensor(ev_cond_dim)[:, None, :].expand(
[-1, nt, -1])
ev_cond_fr_dim_meanvar = torch.stack([
ev_cond_fr_dim, torch.zeros_like(ev_cond_fr_dim)
], -1)
n_cond_rt_ch = torch.tensor(npg.aggregate(
np.stack([dcond_tr, drt, ch_flat.astype(np.long)]),
1., 'sum', [n_cond_flat, nt, n_ch_flat]
))
return n_cond_rt_ch, ev_cond_fr_dim_meanvar, ev_cond_dim, dcond_tr
def upsample_ev(ev_cond_fr_dim_meanvar: torch.Tensor,
dim_rel: int,
steps=51 # CHECKING: revert to 51
) -> torch.Tensor:
ev0 = ev_cond_fr_dim_meanvar
ev_dim_cond = ev0[:, 0, :, 0].T
evs_dim_cond = [v.unique() for v in ev_dim_cond]
dim_irr = consts.get_odim(dim_rel)
ev_rel = torch.linspace(
evs_dim_cond[dim_rel].min(), evs_dim_cond[dim_rel].max(), steps=steps
)
ev_irr = evs_dim_cond[dim_irr]
ev_rel, ev_irr = torch.meshgrid([ev_rel, ev_irr])
ev = torch.stack([v.flatten() for v in [ev_rel, ev_irr]], -1)
if dim_rel == 1:
ev = ev.flip(-1)
ev = ev[:, None, :].expand([-1, ev0.shape[1], -1])
ev = torch.stack([ev, torch.zeros_like(ev)], -1)
return ev
def ____Model_Classes____():
pass
class Dtb2DRT(TimedModule):
kind = 'None'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, ev: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Note that cond is ravelled across both dims, as is necessary to
model, e.g., the other-dim-dependent bias.
:param ev: [condition, frame, dim]
:return: p_absorbed[condition, frame, ch]
"""
raise NotImplementedError()
def expand_ev(self, ev: torch.Tensor) -> torch.Tensor:
"""
:param ev: [cond, fr, dim, (mean, var)] or [cond, dim]
:return: ev[cond, fr, dim, (mean, var)]
"""
if ev.ndim == 4:
return ev
elif ev.ndim == 2:
ev_cond_dim_meanvar = torch.stack([
ev, torch.zeros_like(ev)
], -1)
ev_cond_fr_dim_meanvar = ev_cond_dim_meanvar.unsqueeze(1).expand(
[-1, self.nt, -1, -1]
)
return ev_cond_fr_dim_meanvar
else:
raise ValueError("ev dimensions must be [cond, fr, dim, (mean, "
"var)] or [cond, dim]")
class Dtb2DRTDimensionWise(Dtb2DRT):
kind = 'dimwise'
def __init__(
self,
dtb1ds: Union[sim1d.Dtb1D, Sequence[sim1d.Dtb1D]] = sim1d.Dtb1D,
to_allow_irr_ixn=False,
**kwargs
):
"""
:param dtb1ds:
:param kwargs:
"""
super().__init__(**kwargs)
if type(dtb1ds) is type:
kw = argsutil.kwdef(kwargs, {
'timer': self.timer
})
dtb1ds = [
dtb1ds(**kw)
for dim in range(consts.N_DIM)
]
self.dtb1ds = nn.ModuleList(dtb1ds) # type: nn.ModuleList[sim1d.Dtb1D]
self.to_allow_irr_ixn = to_allow_irr_ixn
if to_allow_irr_ixn:
self.kappa_rel_odim = ykt.BoundedParameter(
[0., 0.], -0.1, 0.1
)
self.kappa_rel_abs_odim = ykt.BoundedParameter(
[0., 0.], -0.1, 0.1
)
else:
self.kappa_rel_odim = ykt.BoundedParameter(
[0., 0.], 0., 0.
)
self.kappa_rel_abs_odim = ykt.BoundedParameter(
[0., 0.], 0., 0.
)
# self.kappa_odim = ykt.BoundedParameter(
# [0., 0.], -0.1, 0.1
# )
# self.kappa_abs_odim = ykt.BoundedParameter(
# [0., 0.], -0.1, 0.1
# )
def ev_bins(self):
return [dtb.ev_bin for dtb in self.dtb1ds]
# def forward(self, ev: torch.Tensor, **kwargs
# ) -> Tuple[torch.Tensor, torch.Tensor]:
# """
# :param ev: [cond, fr, dim, (mean, var)]
# :return: p_cond__ch_td[cond, ch_flat, td],
# unabs_dim_td_cond_ev[dim, td, cond, ev]
# where ch_dim = self.chs[dim, ch_flat]
# """
# ev = self.expand_ev(ev)
#
# # p_dim_cond__ch_td[dim, cond, ch, td] = P(ch_dim, td_dim | cond)
# p_dim_cond__ch_td = torch.stack([
# dtb.forawrd(ev1).permute([0, 2, 1])
# for ev1, dtb in zip(npt.p2st(ev), self.dtb1ds)
# ])
# p_dim_cond__ch_td_par = torch.stack([
# F.interpolate(p_td0, # p_td0[cond, ch, fr]
# p_td0.shape, 1./speed,
# mode='linear')
# for speed, p_td0 in zip(self.speed_par, p_dim_cond__ch_td)
# ])
#
# # return p_cond_td_ch
def get_out_dtb1ds(self, ev: torch.Tensor, return_unabs: bool):
"""
:param ev: [cond, fr, dim, (mean, var)]
:param return_unabs:
:return: p_dim_cond_td_ch, unabs_dim_td_cond_ev
"""
ev1 = ev.clone()
if self.to_allow_irr_ixn:
for dim in range(consts.N_DIM):
odim = consts.get_odim(dim)
kappa = self.dtb1ds[dim].kappa[:]
# ko = self.kappa_odim[odim] / kappa
# kao = self.kappa_abs_odim[odim] / kappa
okappa = self.dtb1ds[odim].kappa[:]
ko = self.kappa_rel_odim[odim] / kappa * okappa
kao = self.kappa_rel_abs_odim[odim] / kappa * okappa
# ko = self.kappa_odim[odim]
# kao = self.kappa_abs_odim[odim]
ev1[:, :, dim, 0] = (
ev[:, :, dim, 0]
+ ev[:, :, odim, 0] * ko
+ ev[:, :, odim, 0].abs() * kao
)
# ev[:, :, dim, 1] = (
# ev0[:, :, dim, 1]
# + (ev0[:, :, odim, 1] * ko) ** 2
# + ((
# ev0[:, :, odim, 1]
# # + ev0[:, :, odim, 0] ** 2
# # - ev0[:, :, odim, 0].abs() ** 2 # see NOTE
# ) * kao) ** 2
# )
# NOTE: in fact we need to subtract mean(abs(ev)) instead of
# abs(mean(ev)), so this is
# incorrect when the evidence ever changes sign within the
# subsampled time step. The best way around this is to give
# subsample_factor directly to the Dtb module, and cache the
# subsampled evidence to avoid additional time cost.
# See: https://stats.stackexchange.com/a/89909/121823
outs = [
dtb(ev11, return_unabs=return_unabs)
for ev11, dtb in zip(ev1.permute([2, 0, 1, 3]), self.dtb1ds)
]
if return_unabs:
p_dim_cond_td_ch = torch.stack([v[0] for v in outs])
unabs_dim_td_cond_ev = torch.stack([v[1] for v in outs])
else:
# p_dim_cond__ch_td[dim, cond, ch, td] = P(ch_dim, td_dim | cond)
p_dim_cond_td_ch = torch.stack(outs)
unabs_dim_td_cond_ev = None
return p_dim_cond_td_ch, unabs_dim_td_cond_ev
class Dtb2DRTSpeed(Dtb2DRTDimensionWise):
kind = 'speed'
def __init__(
self,
speed_par0=(0.5, 0.5),
speed_par_lb=(0.01, 0.01),
speed_par_ub=(1., 1.),
freeze_speed=(False, False),
**kwargs
):
"""
Ser: Just freeze one of the dim's speed_par at 1, the other at 0.
Par: Just freeze both dims' speed_par at 1.
:param speed_par0:
:param speed_par_lb:
:param speed_par_ub:
:param freeze_speed:
:param kwargs:
"""
super().__init__(**kwargs)
self.speed_par = nn.ModuleList([
ykt.BoundedParameter(th0, lb, ub)
for dim, (th0, lb, ub)
in enumerate(zip(speed_par0, speed_par_lb, speed_par_ub))
])
for dim, frz in enumerate(freeze_speed):
if frz:
npt.freeze(self.speed_par[dim])
class Dtb2DRTSerial(Dtb2DRTDimensionWise):
"""
"""
kind = 'serial'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, ev: torch.Tensor, return_unabs=False, **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ev: [cond, fr, dim, (mean, var)]
:return:
"""
ev = self.expand_ev(ev)
p_dim_cond_td_ch, unabs_dim_td_cond_ev = self.get_out_dtb1ds(
ev, return_unabs)
p_cond_td_ch = self.get_p_cond_td_ch(p_dim_cond_td_ch)
return p_cond_td_ch, unabs_dim_td_cond_ev
@staticmethod
def get_p_cond_td_ch(p_dim_cond_td_ch: torch.Tensor) -> torch.Tensor:
p_dim_cond_ch_td = p_dim_cond_td_ch.permute([0, 1, 3, 2])
nt = p_dim_cond_ch_td.shape[-1]
n_cond = p_dim_cond_ch_td.shape[1]
p_cond_ch_td = torch.empty(n_cond, consts.N_CH_FLAT, nt)
for ch_flat, ch_dims in enumerate(consts.CHS_TENSOR.T):
p_cond_ch_td[:, ch_flat, :] = npt.conv_t(
p_dim_cond_ch_td[0, :, ch_dims[0], :].unsqueeze(0),
p_dim_cond_ch_td[1, :, ch_dims[1], :].unsqueeze(1),
groups=n_cond
)
# # CHECKED
# plt.imshow(npy(p_cond__ch_td[:, 3, :].sum([1])).reshape([9, 9]))
# plt.colorbar()
# plt.show()
p_cond_td_ch = p_cond_ch_td.permute([0, 2, 1])
return p_cond_td_ch
class Dtb2DRTParallel(Dtb2DRTDimensionWise):
"""
"""
kind = 'parallel'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, ev: torch.Tensor, return_unabs=False, **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ev: [cond, fr, dim, (mean, var)] or [cond, dim]
:return: p_cond_td_ch[cond, td, ch],
unabs_dim_td_cond_ev[dim, td, cond, ev]
"""
ev = self.expand_ev(ev)
p_dim_cond_td_ch, unabs_dim_td_cond_ev = self.get_out_dtb1ds(
ev, return_unabs)
p_cond_td_ch = self.get_p_cond_td_ch(p_dim_cond_td_ch)
return p_cond_td_ch, unabs_dim_td_cond_ev
@staticmethod
def get_p_cond_td_ch(p_dim_cond_td_ch: torch.Tensor) -> torch.Tensor:
p_dim_cond_ch_td = p_dim_cond_td_ch.permute([0, 1, 3, 2])
nt = p_dim_cond_ch_td.shape[-1]
n_cond = p_dim_cond_ch_td.shape[1]
p_cond_ch_td = torch.empty(n_cond, consts.N_CH_FLAT, nt)
for ch_flat, ch_dims in enumerate(consts.CHS_TENSOR.T):
out = npt.max_distrib(
p_dim_cond_ch_td[
[0, 1], :, ch_dims[[0, 1]], :
].transpose(1, 2)
)[0]
p_cond_ch_td[:, ch_flat, :] = out.transpose(0, 1)
# # CHECKED
# cond12 = 2 * 9 + 4
# ps = []
# for dim1, color in zip([0, 1], ['r', 'b']):
# p1 = p_dim_cond__ch_td[dim1, cond12, ch_dims[dim1], :]
# ps.append(p1)
# plt.plot(npy(npt.sumto1(p1)), color)
# p2 = p_cond__ch_td[cond12, ch_flat, :]
# plt.plot(npy(npt.sumto1(p2)), 'k--')
# plt.show()
#
# print(
# (ps[0].sum(-1) * ps[1].sum(-1) - p2.sum(-1)).abs().max()
# )
#
# # CHECKED
# plt.imshow(npy(p_cond__ch_td[:, 3, :].sum([1])).reshape([9, 9]))
# plt.colorbar()
# plt.show()
p_cond_td_ch = p_cond_ch_td.permute([0, 2, 1])
return p_cond_td_ch
class Dtb2DRTTargetwise(Dtb2DRT):
pass
class FitRT2D(TimedModule):
"""
Put DTB, Tnd, and Lase together to fit the data.
"""
def __init__(
self,
dtb2d: Dtb2DRT = Dtb2DRTSerial,
tnds: Union[sim1d.Tnd, Sequence[sim1d.Tnd]] = sim1d.TndLogNorm,
lapse: sim1d.LapseUniform = sim1d.LapseUniform,
to_normalize_within_cond=None,
**kwargs
):
super().__init__(**kwargs)
if 'sumto1_wi_cond' in kwargs:
assert to_normalize_within_cond is None, \
'give either sumto1_wi_cond or to_normalize_within_cond, ' \
'but not both!'
to_normalize_within_cond = kwargs['sumto1_wi_cond']
elif to_normalize_within_cond is None:
to_normalize_within_cond = True
self.chs = torch.tensor(consts.CHS) # [dim, ch_flat] = ch_dim
if type(dtb2d) is type:
dtb2d = dtb2d(**{**kwargs, 'timer': self.timer})
self.dtb = dtb2d
n_tnd = consts.N_CH_FLAT
if type(tnds) is type:
tnds = [
tnds(**{**kwargs, 'timer': self.timer})
for i in range(n_tnd)
]
else:
assert len(tnds) == n_tnd
self.tnds = nn.ModuleList(tnds) # the order is the same as chs
if type(lapse) is type:
lapse = lapse(**{**kwargs, 'timer': self.timer})
self.lapse = lapse
self.to_normalize_within_cond = to_normalize_within_cond
def forward(self, ev: torch.Tensor):
"""
:param ev: [condition, frame, dim, (mean, var)]
:return: p_cond__rt_ch[condition, frame, ch_flat]
"""
nt = ev.shape[1]
t_all = self.get_t_all(nt=nt)
n_cond = ev.shape[0]
p_ch_cond_td = self.dtb(ev=ev)[0].permute([2, 0, 1])
p_ch_cond_rt = []
p_tnds = []
for ch, (p_cond_td, tnd) in enumerate(zip(p_ch_cond_td, self.tnds)):
p_tnd = tnd.get_p_tnd(t_all)
p_cond_rt = npt.conv_t(
p_cond_td[None], # [1, cond, fr]
p_tnd[None, None, :].expand([n_cond, 1, nt]), # [cond, 1, fr]
groups=n_cond
)
# CHECKED
# plt.plot(*npys(t_all, p_cond_td[0, :]))
# plt.plot(*npys(t_all, p_cond_rt[0, 0, :]));
# plt.show()
p_tnds.append(p_tnd)
p_ch_cond_rt.append(p_cond_rt)
p_ch_cond_rt = torch.stack(p_ch_cond_rt)
p_cond_rt_ch = p_ch_cond_rt.permute([1, 2, 0])
p_cond_rt_ch = self.lapse(p_cond_rt_ch)
# CHECKING: to_normalize_within_cond will have to be True - should
# make RT prediction lines from serial parallel each other - if not
# fix something
if self.to_normalize_within_cond:
p_cond_rt_ch = p_cond_rt_ch / p_cond_rt_ch.sum([1, 2], keepdim=True)
p_cond_rt_ch[torch.isnan(p_cond_rt_ch)] = 0.
return p_cond_rt_ch
def load_state_dict(self, state_dict, *args, strict=False, **kwargs):
state_dict = sim1d.Dtb1D.update_state_dict(state_dict)
super().load_state_dict(state_dict, *args, strict=strict, **kwargs)
class RTNonparam2D(Dtb2DRT):
"""
Fit 1D RT distrib per 1D condition; take choice from data
See also:
Decision2D_bef_Cosyne2019/+Fit/+D2/+RT/+Td2Tnd/main_td2tnd.m
Decision2D_bef_Cosyne2019/+Fit/+D2/+RT/+Td2Tnd/Main.m
"""
kind = 'none'
n_dim = 2
def __init__(
self,
ev_cond_dim: np.ndarray,
# n_cond_ch: np.ndarray,
n_cond_rt_ch: np.ndarray,
loc0=0.4,
disper0=0.1,
disper_ub=0.95,
thres_n_tr=10,
correct_only=False,
exclude_0coh=False,
sumto1_wi_cond=True,
distrib_kind='lognorm',
disper_kind='cv',
**kwargs
):
"""
:param ev_cond_dim: [condFlat, dim] = ev_cond_dim
:param n_cond_ch: [condFlat, chFlat] = N(chFlat | condFlat)
"""
super().__init__(**kwargs)
n_cond_ch = npy(n_cond_rt_ch).sum(1)
self.distrib_kind = distrib_kind
assert disper_kind in ('cv', 'sd')
self.disper_kind = disper_kind
self.ev_cond_dim = ev_cond_dim
self.nCondFlat = self.ev_cond_dim.shape[0]
self.thres_n_tr = thres_n_tr
self.sumto1_wi_cond = sumto1_wi_cond
self.correct_only = correct_only
self.exclude_0coh = exclude_0coh
self.n_cond_ch = n_cond_ch
self.p_cond__ch = torch.tensor(np2.sumto1(self.n_cond_ch, -1))
self.conds_dim = []
self.dCond_dim = []
self.nCond_dim = []
for dim in range(self.n_dim):
conds, dcond = np.unique(self.ev_cond_dim[:, dim],
return_inverse=True)
self.conds_dim.append(conds)
self.dCond_dim.append(dcond)
self.nCond_dim.append(len(conds))
self.dCond_dim = np.stack(self.dCond_dim, -1)
if self.disper_kind == 'cv':
self.loc_dim_cond = nn.ModuleList([
ykt.BoundedParameter(
np.zeros([nCond, consts.N_CH]) + loc0, 0.05, 3.0
) for nCond in self.nCond_dim])
self.disper_dim_cond = nn.ModuleList([
ykt.BoundedParameter(
np.zeros([nCond, consts.N_CH]) + disper0, 0.01, disper_ub
) for nCond in self.nCond_dim])
elif self.disper_kind == 'sd':
# Determine mean and SD of RT in the data
n_cond_rt_ch = npt.tensor(n_cond_rt_ch)
n_cond0_cond1_rt_ch0_ch1 = n_cond_rt_ch.reshape([
*self.nCond_dim, -1, 2, 2
])
n_cond0_cond1_ch0_ch1 = n_cond0_cond1_rt_ch0_ch1.sum(2)
# n_cond0_cond1_rt_ch0_ch1[
# n_cond0_cond1_ch0_ch1[:, :, None, :, :].expand_as(
# n_cond0_cond1_rt_ch0_ch1
# ) < self.thres_n_tr] = 1. / self.nt
meanRt_cond0_cond1_ch0_ch1 = npy(npt.nan2v(npt.mean_distrib(
npt.sumto1(n_cond0_cond1_rt_ch0_ch1, 2),
self.t_all[None, None, :, None, None],
axis=2
), 0.051).clamp_min(0.051))
sdRt_cond0_cond1_ch0_ch1 = npy(npt.nan2v(npt.std_distrib(
npt.sumto1(n_cond0_cond1_rt_ch0_ch1, 2),
self.t_all[None, None, :, None, None],
axis=2
), 0.051).clamp_min(0.051))
to_excl = n_cond0_cond1_ch0_ch1 < self.thres_n_tr
meanRt_cond0_cond1_ch0_ch1[to_excl] = np.nan
sdRt_cond0_cond1_ch0_ch1[to_excl] = np.nan
minMeanRt_dim_condDim_chDim = [
np2.nan2v(np.nanmin(meanRt_cond0_cond1_ch0_ch1, (1, 3)),
0.051),
np2.nan2v(np.nanmin(meanRt_cond0_cond1_ch0_ch1, (0, 2)),
0.051)
]
minSdRt_dim_condDim_chDim = [
np2.nan2v(np.nanmin(sdRt_cond0_cond1_ch0_ch1, (1, 3)),
0.051),
np2.nan2v(np.nanmin(sdRt_cond0_cond1_ch0_ch1, (0, 2)),
0.051)
]
self.loc_dim_cond = nn.ModuleList([
ykt.BoundedParameter(
v / 2, 0.05, v * disper_ub
) for v in minMeanRt_dim_condDim_chDim])
self.disper_dim_cond = nn.ModuleList([
ykt.BoundedParameter(
np.sqrt((v ** 2) / 2), 0.02, np.sqrt((v ** 2) * disper_ub)
) for v in minSdRt_dim_condDim_chDim])
# n_dim_condDim_rt_chDim = [
# n_cond0_cond1_rt_ch0_ch1.sum((1, 4)),
# n_cond0_cond1_rt_ch0_ch1.sum((0, 3))
# ]
# meanRt_dim_condDim_chDim = [
# npt.nan2v(
# npt.mean_distrib(
# npt.sumto1(v, 1),
# self.t_all[None, :, None], axis=1),
# 0.051
# ).clamp_min(0.051) for v in n_dim_condDim_rt_chDim
# ]
# sdRt_dim_condDim_chDim = [
# npt.nan2v(
# npt.std_distrib(
# npt.sumto1(v, 1),
# self.t_all[None, :, None], axis=1),
# 0.051
# ).clamp_min(0.051) for v in n_dim_condDim_rt_chDim
# ]
# self.loc_dim_cond = nn.ModuleList([
# ykt.BoundedParameter(
# v, 0.05, v * 2
# ) for v in meanRt_dim_condDim_chDim])
# self.disper_dim_cond = nn.ModuleList([
# ykt.BoundedParameter(
# v, 0.05, ((v ** 2) * 2).sqrt()
# ) for v in sdRt_dim_condDim_chDim])
# print([v[:] for v in self.disper_dim_cond])
# print('--')
class RTNonparam2DSer(RTNonparam2D):
kind = 'ser_np'
def get_p_cond_td_ch(self, p_dim_cond_td_ch: torch.Tensor) -> torch.Tensor:
return Dtb2DRTSerial.get_p_cond_td_ch(p_dim_cond_td_ch)
def forward(self, ev: torch.Tensor, **kwargs
) -> Tuple[torch.Tensor, Any]:
"""
Only uses ev of the first frame.
:param ev: [condition, frame, dim, (meanvar)]
:return: p_cond__rt_ch[condition, frame, ch_flat], None
"""
nt = ev.shape[1]
ev = ev[:, 0, :, 0] # ev[cond, dim]
n_tr = ev.shape[0]
dCond_dim = np.empty([n_tr, self.n_dim], dtype=np.long)
for dim in range(self.n_dim):
for icond in range(self.nCond_dim[dim]):
incl = ev[:, dim] == self.ev_cond_dim[icond, dim]
dCond_dim[incl, dim] = icond
t_all = self.get_t_all(nt=nt)
# p_dim_cond_td_ch[dim, tr, td, ch]
p_dim_cond_td_ch = torch.zeros(
self.n_dim, self.nCondFlat, nt, consts.N_CH)
for dim, (loc, disper) in enumerate(zip(
self.loc_dim_cond, self.disper_dim_cond)):
if self.disper_kind == 'cv':
std = loc[:] * disper[:]
elif self.disper_kind == 'sd':
std = disper[:]
else:
raise ValueError()
if self.distrib_kind == 'lognorm':
p_cond_td_ch1 = npt.lognorm_pmf(
t_all[:, None, None],
loc[None, :, :], std[None, :, ]).transpose(0, 1)
elif self.distrib_kind == 'invnorm':
p_cond_td_ch1 = npt.inv_gaussian_pmf_mean_stdev(
t_all[:, None, None],
loc[None, :, :], std[None, :, ]).transpose(0, 1)
else:
raise ValueError()
# p_cond_td_ch = npt.inv_gaussian_pmf_mean_stdev(
# t_all[None, :, None],
# loc[:, None, :], std[:, None, :])
for iCond, cond in enumerate(self.conds_dim[dim]):
incl = self.ev_cond_dim[:, dim] == cond
p_dim_cond_td_ch[dim, incl, :, :] = p_cond_td_ch1[[iCond], :]
p_cond_td_ch = self.get_p_cond_td_ch(p_dim_cond_td_ch)
if self.sumto1_wi_cond:
p_cond_td_ch = (
npt.sumto1(p_cond_td_ch, 1)
* self.p_cond__ch[:, None, :])
else:
p_cond_td_ch = p_cond_td_ch * self.p_cond__ch[:, None, :]
p_cond_ch_td = p_cond_td_ch.permute([0, 2, 1])
# incl[condFlat, chFlat]
incl = self.n_cond_ch >= self.thres_n_tr
if self.exclude_0coh:
incl = incl & ((self.ev_cond_dim != 0).all(-1)[:, None])
if self.correct_only:
ch_sign = consts.ch_bool2sign(consts.CHS_ARRAY)
for dim in range(consts.N_DIM):
incl = incl & (
np.sign(self.ev_cond_dim[:, [dim]]) != -ch_sign[[dim], :])
# axs = plt2.GridAxes(1, 4)
# for col in range(4):
# plt.sca(axs[0, col])
# plt.imshow(incl[:, col].reshape([9, 9]))
# plt.show() # CHECKED
p_cond_ch_td[~incl] = self.p_cond__ch[~incl][:, None] / nt
p_cond_td_ch = p_cond_ch_td.permute([0, 2, 1])
# p_cond_td_ch = p_cond_td_ch * torch.tensor(
# incl[:, None, :], dtype=torch.float)
# for cond in range(self.nCondFlat):
# for ch in range(consts.N_CH_FLAT):
# p_cond__ch
p_tr__td_ch = torch.empty(n_tr, nt, consts.N_CH_FLAT)
for iCond, ev_cond1 in enumerate(self.ev_cond_dim):
incl = torch.all(ev == torch.tensor(ev_cond1[None, :]), dim=1)
p_tr__td_ch[incl, :] = p_cond_td_ch[iCond, :]
# if not self.sumto1_wi_cond:
# print(p_tr__td_ch.sum([1, 2])) # CHECKED sum within cond
return p_tr__td_ch, None
class RTNonparam2DPar(RTNonparam2DSer):
kind = 'par_np'
def get_p_cond_td_ch(self, p_dim_cond_td_ch: torch.Tensor) -> torch.Tensor:
return Dtb2DRTParallel.get_p_cond_td_ch(p_dim_cond_td_ch)
def ____Plot____():
pass
def plot_p_ch_vs_ev(ev_cond: Union[torch.Tensor, np.ndarray],
n_ch: Union[torch.Tensor, np.ndarray],
style='pred',
ax: plt.Axes = None,
dim_rel=0,
group_dcond_irr : Iterable[Iterable[int]] = None,
cmap: Union[str, Callable] = 'cool',
kw_plot=(),
) -> Iterable[plt.Line2D]:
"""
@param ev_cond: [condition, dim] or [condition, frame, dim, (mean, var)]
@type ev_cond: torch.Tensor
@param n_ch: [condition, ch] or [condition, rt_frame, ch]
@type n_ch: torch.Tensor
@return: hs[cond_irr][0] = Line2D, conds_irr
"""
if ax is None:
ax = plt.gca()
if ev_cond.ndim != 2:
assert ev_cond.ndim == 4
ev_cond = npt.p2st(ev_cond.mean(1))[0]
if n_ch.ndim != 2:
assert n_ch.ndim == 3
n_ch = n_ch.sum(1)
ev_cond = npy(ev_cond)
n_ch = npy(n_ch)
n_cond_all = n_ch.shape[0]
ch_rel = np.repeat(
np.array(consts.CHS[dim_rel])[None, :], n_cond_all, 0)
n_ch = n_ch.reshape([-1])
ch_rel = ch_rel.reshape([-1])
dim_irr = consts.get_odim(dim_rel)
conds_rel, dcond_rel = np.unique(ev_cond[:, dim_rel], return_inverse=True)
conds_irr, dcond_irr = np.unique(
np.abs(ev_cond[:, dim_irr]), return_inverse=True)
if group_dcond_irr is not None:
conds_irr, dcond_irr = group_conds(conds_irr, dcond_irr,
group_dcond_irr)
n_conds = [len(conds_rel), len(conds_irr)]
n_ch_rel = npg.aggregate(
np.stack([
ch_rel,
np.repeat(dcond_irr[:, None], consts.N_CH_FLAT, 1).flatten(),
np.repeat(dcond_rel[:, None], consts.N_CH_FLAT, 1).flatten(),
]),
n_ch, 'sum', [consts.N_CH, n_conds[1], n_conds[0]]
)
p_ch_rel = n_ch_rel[1] / n_ch_rel.sum(0)
hs = []
for dcond_irr1, p_ch1 in enumerate(p_ch_rel):
if type(cmap) is str:
color = plt.get_cmap(cmap, n_conds[1])(dcond_irr1)
else:
color = cmap(n_conds[1])(dcond_irr1)
kw1 = get_kw_plot(style, color=color, **dict(kw_plot))
h = ax.plot(conds_rel, p_ch1, **kw1)
hs.append(h)
plt2.box_off(ax=ax)
x_lim = ax.get_xlim()
plt2.detach_axis('x', amin=x_lim[0], amax=x_lim[1], ax=ax)
plt2.detach_axis('y', amin=0, amax=1, ax=ax)
ax.set_yticks([0, 0.5, 1])
ax.set_yticklabels(['0', '', '1'])
ax.set_xlabel('evidence')
ax.set_ylabel(r"$\mathrm{P}(z=1 \mid c)$")
return hs, conds_irr
def group_conds(conds_irr, dcond_irr, group_dcond_irr):
dcond_irr1 = np.empty_like(dcond_irr)
conds_irr1 = []
for group, dconds_in_group in enumerate(group_dcond_irr):
incl = np.any(dcond_irr[:, None]
== np.array(dconds_in_group)[None, :], 1)
dcond_irr1[incl] = group
conds_irr1.append(conds_irr[dconds_in_group])
dcond_irr = dcond_irr1
conds_irr = conds_irr1
return conds_irr, dcond_irr
def plot_rt_vs_ev(
ev_cond: Union[torch.Tensor, np.ndarray],
n_cond__rt_ch: Union[torch.Tensor, np.ndarray],
dim_rel=0,
group_dcond_irr: Iterable[Iterable[int]] = None,
correct_only=True,
cmap: Union[str, Callable] = 'cool',
kw_plot=(),
**kwargs
) -> Tuple[Sequence[Sequence[plt.Line2D]], Sequence[Sequence[float]]]:
"""
@param ev_cond: [condition]
@type ev_cond: torch.Tensor
@param n_cond__rt_ch: [condition, frame, ch]
@type n_cond__rt_ch: torch.Tensor
@return:
"""
if ev_cond.ndim != 2:
assert ev_cond.ndim == 4
ev_cond = npt.p2st(ev_cond.mean(1))[0]
assert n_cond__rt_ch.ndim == 3
ev_cond = npy(ev_cond)
n_cond__rt_ch = npy(n_cond__rt_ch)
nt = n_cond__rt_ch.shape[1]
n_ch = n_cond__rt_ch.shape[2]
n_cond_all = n_cond__rt_ch.shape[0]
ix_sort = np.argsort(ev_cond[:, dim_rel])
ev_cond = ev_cond[ix_sort]
n_cond__rt_ch = n_cond__rt_ch[ix_sort]
dim_irr = consts.get_odim(dim_rel)
conds_rel, dcond_rel = np.unique(ev_cond[:, dim_rel], return_inverse=True)
conds_irr, dcond_irr = np.unique(
np.abs(ev_cond[:, dim_irr]), return_inverse=True)
if group_dcond_irr is not None:
conds_irr, dcond_irr = group_conds(conds_irr, dcond_irr,
group_dcond_irr)
n_conds = [len(conds_rel), len(conds_irr)]
if correct_only: # simply split at zero
hs = []
rts = []
for dcond_irr1 in range(n_conds[1]):
for ch1 in range(consts.N_CH):
ch_sign = consts.ch_bool2sign(ch1)
incl = dcond_irr == dcond_irr1
ev_cond1 = conds_rel
n_cond__rt_ch1 = np.empty([n_conds[0], nt, n_ch])
for dcond_rel1 in range(n_conds[0]):
incl1 = incl & (dcond_rel1 == dcond_rel)
n_cond__rt_ch1[dcond_rel1] = n_cond__rt_ch[incl1].sum(0)
if type(cmap) is str:
color = plt.get_cmap(cmap, n_conds[1])(dcond_irr1)
else:
color = cmap(n_conds[1])(dcond_irr1)
chs = np.array(consts.CHS)
n_cond__rt_ch11 = np.zeros(n_cond__rt_ch1.shape[:2]
+ (consts.N_CH,))
# # -- Pool across ch_irr
for ch_rel in range(consts.N_CH):
incl = chs[dim_rel] == ch_rel
n_cond__rt_ch11[:, :, ch_rel] = n_cond__rt_ch1[
:, :, incl].sum(-1)
hs1, rts1 = sim1d.plot_rt_vs_ev(
ev_cond1, n_cond__rt_ch11,
color=color,
correct_only=correct_only,
kw_plot=kw_plot,
**kwargs,
)
hs.append(hs1)
rts.append(rts1)
else:
raise NotImplementedError()
return hs, rts
def plot_rt_distrib(
n_cond_rt_ch: np.ndarray, ev_cond_dim: np.ndarray,
abs_cond=True,
lump_wrong=True,
dt=consts.DT,
colors=None, alpha=1.,
alpha_face=0.5,
smooth_sigma_sec=0.05,
to_normalize_max=False,
to_cumsum=False,
to_exclude_last_frame=True,
to_skip_zero_trials=False,
label='',
# to_exclude_bins_wo_trials=10,
kw_plot=(),
fig=None,
axs=None,
to_use_sameaxes=True,
):
"""
:param n_cond_rt_ch:
:param ev_cond_dim:
:param abs_cond:
:param lump_wrong:
:param dt:
:param gs:
:param colors:
:param alpha:
:param smooth_sigma_sec:
:param kw_plot:
:param axs:
:return: axs, p_cond01__rt_ch01, p_cond01__rt_ch01_sm, hs
"""
if colors is None:
colors = ['red', 'blue']
elif type(colors) is str:
colors = [colors] * 2
else:
assert len(colors) == 2
nt = n_cond_rt_ch.shape[1]
t_all = np.arange(nt) * dt + dt
out = np.meshgrid(
np.unique(ev_cond_dim[:, 0]),
np.unique(ev_cond_dim[:, 1]),
np.arange(nt), np.arange(2), np.arange(2),
indexing='ij'
)
cond0, cond1, fr, ch0, ch1 = [v.flatten() for v in out]
from copy import deepcopy
n0 = deepcopy(n_cond_rt_ch)
if to_exclude_last_frame:
n0[:, -1, :] = 0.
n0 = n0.flatten()
def sign_cond(v):
v1 = np.sign(v)
v1[v == 0] = 1
return v1
if abs_cond:
ch0 = consts.ch_bool2sign(ch0)
ch1 = consts.ch_bool2sign(ch1)
# 1 = correct, -1 = wrong
ch0 = sign_cond(cond0) * ch0
ch1 = sign_cond(cond1) * ch1
cond0 = np.abs(cond0)
cond1 = np.abs(cond1)
ch0 = consts.ch_sign2bool(ch0).astype(np.int)
ch1 = consts.ch_sign2bool(ch1).astype(np.int)
else:
raise ValueError()
if lump_wrong:
# treat all choices as correct when cond == 0
ch00 = ch0 | (cond0 == 0)
ch10 = ch1 | (cond1 == 0)
ch0 = (ch00 & ch10)
ch1 = np.ones_like(ch00, dtype=np.int)
cond_dim = np.stack([cond0, cond1], -1)
conds = []
dcond_dim = []
for cond in cond_dim.T:
conds1, dcond1 = np.unique(cond, return_inverse=True)
conds.append(conds1)
dcond_dim.append(dcond1)
dcond_dim = np.stack(dcond_dim)
n_cond01_rt_ch01 = npg.aggregate([
*dcond_dim, fr, ch0, ch1
], n0, 'sum', [*(np.amax(dcond_dim, 1) + 1), nt, consts.N_CH, consts.N_CH])
p_cond01__rt_ch01 = np2.nan2v(n_cond01_rt_ch01
/ n_cond01_rt_ch01.sum((2, 3, 4),
keepdims=True))
n_conds = p_cond01__rt_ch01.shape[:2]
# # CHECKED
# print(p_cond01__rt_ch01.sum((2, 3))[:, :, 1])
if axs is None:
axs = plt2.GridAxes(
n_conds[1], n_conds[0],
left=0.6, right=0.3,
bottom=0.45, top=0.74,
widths=[1], heights=[1],
wspace=0.04, hspace=0.04,
)
# if fig is None:
# fig = plt.figure(figsize=[6, 6])
# gs = plt.GridSpec(
# nrows=n_conds[1], ncols=n_conds[0],
# left=0.1, right=0.95,
# bottom=0.075, top=0.9,
# figure=fig,
# )
# # fig = plt.figure(figsize=[7.5, 7.5])
# # gs = plt.GridSpec(
# # nrows=n_conds[1], ncols=n_conds[0],
# # left=0.05, right=0.98,
# # bottom=0.07, top=0.95,
# # hspace=0.04
# # )
#
# axs = np.empty(n_conds[1::-1], dtype=np.object)
# for row in range(axs.shape[0]):
# for col in range(axs.shape[1]):
# axs[row, col] = plt.subplot(gs[row, col])
kw_label = {
'fontsize': 12,
}
pad = 8
axs[0, 0].set_title('strong\nmotion', pad=pad, **kw_label)
axs[0, -1].set_title('weak\nmotion', pad=pad, **kw_label)
axs[0, 0].set_ylabel('strong\ncolor', labelpad=pad, **kw_label)
axs[-1, 0].set_ylabel('weak\ncolor', labelpad=pad, **kw_label)
if smooth_sigma_sec > 0:
from scipy import signal, stats
sigma_fr = smooth_sigma_sec / dt
width = np.ceil(sigma_fr * 2.5).astype(np.int)
kernel = stats.norm.pdf(np.arange(-width, width+1), 0, sigma_fr)
kernel = np2.vec_on(kernel, 2, 5)
p_cond01__rt_ch01_sm = signal.convolve(
p_cond01__rt_ch01, kernel,
mode='same'
)
else:
p_cond01__rt_ch01_sm = p_cond01__rt_ch01.copy()
if to_cumsum:
p_cond01__rt_ch01_sm = np.cumsum(p_cond01__rt_ch01_sm, axis=2)
if to_normalize_max:
p_cond01__rt_ch01_sm = np2.nan2v(
p_cond01__rt_ch01_sm
/ np.amax(np.abs(p_cond01__rt_ch01_sm),
(2, 3, 4), keepdims=True)
)
n_row = n_conds[1]
n_col = n_conds[0]
for dcond0 in range(n_conds[0]):
for dcond1 in range(n_conds[1]):
row = n_row - 1 - dcond1
col = n_col - 1 - dcond0
ax = axs[row, col] # type: plt.Axes
for ch0 in [0, 1]:
for ch1 in [0, 1]:
if lump_wrong and ch1 == 0:
continue
p1 = p_cond01__rt_ch01_sm[dcond0, dcond1, :, ch0, ch1]
kw = {
'linewidth': 1,
'color': colors[ch1],
'alpha': alpha,
'zorder': 1,
**dict(kw_plot)
}
y = p1 * consts.ch_bool2sign(ch0)
p_cond01__rt_ch01_sm[dcond0, dcond1, :, ch0, ch1] = y
# if 'linestyle' in kw_plot.keys(): # CHECKED
# if kw_plot['linestyle'] == ':':
# print(np.sum(np.abs(y)))
# print('--')
if to_skip_zero_trials and np.sum(np.abs(y) ) < 1e-2:
h = None
else:
h = ax.plot(
t_all, y,
label=label if ch0 == 1 and ch1 == 1 else None,
**kw)
ax.fill_between(
t_all, 0, y,
ec='None',
fc=kw['color'],
alpha=alpha_face,
zorder=-1
)
plt2.box_off(ax=ax)
ax.axhline(0, color='k', linewidth=0.5)
ax.set_yticklabels([])
if row < n_row - 1 or col > 0:
ax.set_xticklabels([])
ax.set_xticks([])
plt2.box_off(['bottom'], ax=ax)
else:
ax.set_xlabel('RT (s)')
# if col > 0:
ax.set_yticks([])
ax.set_yticklabels([])
plt2.box_off(['left'], ax=ax)
plt2.detach_axis('x', 0, 5, ax=ax)
if to_use_sameaxes:
plt2.sameaxes(axs)
axs[-1, 0].set_xlabel('RT (s)')
return axs, p_cond01__rt_ch01, p_cond01__rt_ch01_sm, h
def plot_p_tnd1(model, d=None, data_mode=None):
fig = plt.figure('p_tnd', figsize=[4, 3.5])
gs = plt.GridSpec(
nrows=2, ncols=2,
left=0.2, right=0.95, bottom=0.25, top=0.95,
)
for ch0 in range(consts.N_CH):
for ch1 in range(consts.N_CH):
ch_flat = consts.ch_by_dim2ch_flat(np.array([ch0, ch1]))
ax = plt.subplot(gs[ch1, ch0]) # type: plt.Axes # noqa
model.tnds[ch_flat].plot_p_tnd()
ax.set_ylim(top=1)
ax.set_yticks([0, 1])
if ch1 == 0:
ax.set_xticklabels([])
ax.set_xlabel('')
if ch0 == 1:
ax.set_yticklabels([])
ax.set_ylabel('')
return fig, d
def plot_bound(model, d=None, data_mode=None):
fig = plt.figure('bound', figsize=[4, 2])
gs = plt.GridSpec(
nrows=1, ncols=2,
left=0.2, right=0.95, bottom=0.25, top=0.95
)
for dim_rel in range(consts.N_DIM):
plt.subplot(gs[0, dim_rel])
if hasattr(model.dtb, 'dtb1ds'):
model.dtb.dtb1ds[dim_rel].plot_bound(color='k')
return fig, d
def plot_ch(model, d, data_mode='train_valid'):
data = d['data_' + data_mode]
out = d['out_' + data_mode]
target = d['target_' + data_mode]
fig = plt.figure('ch', figsize=[4, 2])
gs = plt.GridSpec(
nrows=1, ncols=2,
left=0.2, right=0.95, bottom=0.25, top=0.95
)
for dim_rel in range(consts.N_DIM):
plt.subplot(gs[0, dim_rel])
plot_p_ch_vs_ev(data, out, style='pred',
dim_rel=dim_rel)
plot_p_ch_vs_ev(data, target, style='data',
dim_rel=dim_rel)
return fig, d
def plot_rt(model, d, data_mode='train_valid'):
data = d['data_' + data_mode]
out = d['out_' + data_mode]
target = d['target_' + data_mode]
fig = plt.figure('rt', figsize=[4, 2])
gs = plt.GridSpec(
nrows=1, ncols=2,
left=0.2, right=0.95, bottom=0.25, top=0.95
)
for dim_rel in range(consts.N_DIM):
plt.subplot(gs[0, dim_rel])
plot_rt_vs_ev(data, out, style='pred',
dim_rel=dim_rel, dt=model.dt)
plot_rt_vs_ev(data, target, style='data',
dim_rel=dim_rel, dt=model.dt)
return fig, d
def plot_rt_distrib1(model, d, data_mode='train_valid'):
data = d['data_' + data_mode]
out = d['out_' + data_mode]
target = d['target_' + data_mode]
fig = plt.figure('rtdstr', figsize=[4, 4])
ev_cond_dim = npy(data[:, :, :, 0].sum(1))
axs = plot_rt_distrib(
npy(out),
ev_cond_dim,
alpha_face=0.,
colors=['b', 'b'],
fig=fig
)[0]
axs = plot_rt_distrib(
npy(target),
ev_cond_dim,
alpha_face=0.,
colors=['k', 'k'],
axs=axs,
)[0]
return fig, d
def plot_params(model, d=None, data_mode=None):
fig = plt.figure('params', figsize=(6, 12))
gs = plt.GridSpec(nrows=1, ncols=1, left=0.35)
ax = plt.subplot(gs[0, 0])
model.plot_params(ax=ax)
return fig, d
def ____Fit___():
pass
def fun_data(data: Data2DRT, mode='all', i_fold_valid=0, epoch=0,
n_fold_valid=1, i_fold_test=0, n_fold_test=1,
mode_train='all', to_debug=False):
ev_cond_fr_dim_meanvar, n_cond_rt_ch = data.get_data_by_cond(
mode=mode, i_fold_valid=i_fold_valid, epoch=epoch,
n_fold_valid=n_fold_valid,
i_fold_test=i_fold_test, n_fold_test=n_fold_test,
mode_train=mode_train, to_debug=to_debug
)[:2]
return ev_cond_fr_dim_meanvar, n_cond_rt_ch
def fun_loss(
p_cond__rt_ch_pred: torch.Tensor,
n_cond__rt_ch_data: torch.Tensor,
ignore_hard_RT=False,
conds: Union[torch.Tensor, np.ndarray] = None,
**kwargs
) -> torch.Tensor:
"""
:param conds: [cond, dim]
"""
if ignore_hard_RT:
conds = npy(conds)
ix_conds_to_ignore_rt = np.any(
conds == np.amax(np.abs(conds), axis=0),
axis=1
)
else:
ix_conds_to_ignore_rt = None
return sim1d.fun_loss(p_cond__rt_ch_pred,
n_cond__rt_ch_data,
ix_conds_to_ignore_rt=ix_conds_to_ignore_rt,
**kwargs)
def fit_dtb(model: FitRT2D,
data: Data2DRT,
n_fold_valid=1,
n_fold_test=1,
i_fold_test=0,
mode_train='all',
ignore_hard_RT=False,
to_debug=False,
**kwargs
) -> (float, dict, dict, ykt.PlotFunsType):
"""
Provide functions fun_data() and plot_*() to ykt.optimize().
See ykt.optimize() for details about fun_data and plot_*
:param model:
:param data:
:param n_fold_valid
:param mode_train: 'all'|'easiest' - which conditions to use in training
:param kwargs: fed to ykt.optimize()
:return: best_loss, best_state
"""
def fun_data1(mode='all', i_fold_valid=0, epoch=0, n_fold_valid=1):
ev_cond_fr_dim_meanvar, n_cond_rt_ch = fun_data(
data, mode=mode, i_fold_valid=i_fold_valid, epoch=epoch,
n_fold_valid=n_fold_valid,
i_fold_test=i_fold_test,
n_fold_test=n_fold_test,
mode_train=mode_train, to_debug=to_debug
)
return ev_cond_fr_dim_meanvar, n_cond_rt_ch
def fun_loss1(
p_cond__rt_ch_pred: torch.Tensor,
n_cond__rt_ch_data: torch.Tensor,
**kwargs
):
return fun_loss(
p_cond__rt_ch_pred,
n_cond__rt_ch_data,
ignore_hard_RT=ignore_hard_RT,
conds=data.ev_cond_dim,
**kwargs
)
kw_optim = {**{
**{
'n_fold_valid': n_fold_valid
},
**kwargs}, **{
'optimizer_kind': 'Adam',
'learning_rate': .1,
'patience': 100,
'reduce_lr_after': 25,
'thres_patience': 1e-4,
'to_print_grad': False, # CHECKED
}}
plotfuns = [
('ch', plot_ch),
('rt', plot_rt),
('bound', plot_bound),
('tnd', plot_p_tnd1),
# ('rtdstr', plot_rt_distrib1),
('params', plot_params)
]
best_loss, best_state, d = ykt.optimize(
model, fun_data1, fun_loss1,
plotfuns=plotfuns,
**kw_optim
)[:3]
with torch.no_grad():
for data_mode in ['train_valid', 'test', 'all']:
inp, target = fun_data1(data_mode)
out = model(inp)
for loss_kind in ['CE', 'NLL', 'BIC']:
if loss_kind == 'CE':
loss = fun_loss1(out, target, to_average=True,
base_n_bin=True)
elif loss_kind in ['NLL', 'BIC']:
loss = fun_loss1(out, target, to_average=False,
base_n_bin=False)
if loss_kind == 'BIC':
n = npy(target.sum())
k = np.sum([
v.numel() if v.requires_grad else 0
for v in model.parameters()
])
loss = loss * 2 + k * np.log(n)
d['loss_ndata_%s' % data_mode] = n
d['loss_nparam'] = k
d['loss_%s_%s' % (loss_kind, data_mode)] = loss
return best_loss, best_state, d, plotfuns
def ____Demo____():
pass
def demo_interpolate():
"""
Test 1D interpolation along the time axis.
NOTE: F.interpolate is not suitable for my purpose since it's restricted to
rational ratios (output shape should be int); use F.grid_sample instead.
F.grid_sample is for 2D but I can just add one dimension for that.
"""
src = torch.tensor([[[0., 1., 0., 0.]]])
dst = F.interpolate(src, scale_factor=2.152, mode='linear')
print(dst)
print(dst.shape)
print(np.array(dst.shape) / np.array(src.shape))
def ____Main____():
pass
def main():
subsample_factor=5
ev0 = get_demo_ev()
model = FitRT2D(dtb2d=Dtb2DRTParallel)
# ev_cond subsampled by subsample_factor
# stack mean_ev and var_ev
ev = torch.stack(list(sim1d.subsample_ev(ev0, subsample_factor)), -1)
t_st = time.time()
p_cond__rt_ch0 = model(ev)
t_el = time.time() - t_st
print('Elapsed time: %g sec.' % t_el)
# p_cond__rt_ch
n_cond_rt_ch_dat = simulate_p_cond__rt_ch(p_cond__rt_ch0, n_sample=100)
model.zero_grad()
loss = fun_loss(p_cond__rt_ch0, n_cond_rt_ch_dat)
loss.backward()
print(loss)
grads = odict([
(k, v.grad) for (k, v) in model.named_parameters()
])
pprint(grads)
plt.subplot(2, 1, 1)
plot_p_ch_vs_ev(ev, n_cond_rt_ch_dat, style='data')
plot_p_ch_vs_ev(ev, p_cond__rt_ch0, style='pred')
plt.subplot(2, 1, 2)
plot_rt_vs_ev(ev, n_cond_rt_ch_dat, style='data')
plot_rt_vs_ev(ev, p_cond__rt_ch0, style='pred')
plt.show()
print('--')
if __name__ == '__main__':
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_num_threads(6)
torch.set_default_dtype(torch.double)
main()
|
#jci5kb Justin Ingram
#mgb5db Megan Bishop
from random import shuffle
from negotiator_base import BaseNegotiator
class CareBearBot(BaseNegotiator):
iteration_limit = 500
def __init__(self):
super().__init__()
# History of offers
self.our_offer_history = []
self.enemy_offer_history = []
# The enemy's best offer so far
self.enemy_max_offer = []
# Utility Histories
# How much we will get from our own offers
self.our_offer_utility_history = []
# How much the enemy would get from our own offer, scaled to be directly comparable
self.our_offer_enemy_utility_history = []
# Incoming, randomly scaled utility of enemy's offers
self.enemy_offer_rawutility_history = []
# What we expect the enemy to get from their offer, scaled to be comparable to our utilities
self.enemy_utility_from_enemy_offer_history = []
# What we expect to get from the enemy's offer
self.our_utility_from_enemy_offer_history = []
# The actual results (WIN, NUM TURNS)
self.resultHistory = []
# Score history
self.ourScoreHistory = []
self.enemyScoreHistory = []
# Numerical items
self.turnsTaken = 0
self.max_utility = 0
self.enemy_max_utility = 0
self.our_preferences_on_enemy_scale = 0
self.goingFirst = None
self.acceptOffer = False
# initialize(self : BaseNegotiator, preferences : list(String), iter_limit : Int)
# Performs per-round initialization - takes in a list of items, ordered by the item's
# preferability for this negotiator
# You can do other work here, but still need to store the preferences
def initialize(self, preferences, iter_limit):
super().initialize(preferences, iter_limit)
#print("Our preferences " + str(self.preferences))
# Reset all our fields that do not carry over from the past run
self.goingFirst = None
self.turnsTaken = 0
# Offer histories
self.enemy_offer_history.clear()
self.our_offer_history.clear()
# Utility histories
self.our_offer_utility_history.clear()
self.our_offer_enemy_utility_history.clear()
self.enemy_offer_rawutility_history.clear()
self.enemy_utility_from_enemy_offer_history.clear()
self.our_utility_from_enemy_offer_history.clear()
self.enemy_max_utility = float("-inf")
self.our_preferences_on_enemy_scale = 0
self.enemy_max_offer.clear()
# Set our max utility to be the value of the preference utility
self.max_utility = self.calculate_offer_utility(preferences)
# make_offer(self : BaseNegotiator, offer : list(String)) --> list(String)
# Given the opposing negotiator's last offer (represented as an ordered list),
# return a new offer. If you wish to accept an offer & end negotiations, return the same offer
# Note: Store a copy of whatever offer you make in self.offer at the end of this method.
def make_offer(self, offer):
# ## All the calculations up here
print("Turn #" + str(self.turnsTaken))
if offer:
#print("Enemy offer: " + str(offer))
self.enemy_offer_history.append(offer)
#print("Reported utility for this offer " + str(self.enemy_offer_rawutility_history[-1]))
if self.enemy_offer_rawutility_history[-1] > self.enemy_max_utility:
#print("This is the new best guess for their preferences!")
self.enemy_max_utility = self.enemy_offer_rawutility_history[-1]
#print("New max enemy utility is " + str(self.enemy_max_utility))
# recalculate...
self.enemy_max_offer = offer[:]
# Our preferences on the enemy's estimated scale
self.our_preferences_on_enemy_scale = self.calculate_our_offer_on_enemy_scale(self.preferences)
#print("Our preferences on the enemy's estimated utility: " + str(self.our_preferences_on_enemy_scale))
# Previous offers now based on this new preferred ordering
#print("Recalculating previous utility estimates")
self.enemy_utility_from_enemy_offer_history.clear()
for ordering in self.enemy_utility_from_enemy_offer_history:
self.enemy_utility_from_enemy_offer_history.append(self.calculate_our_offer_on_enemy_scale(offer))
#print(self.enemy_utility_from_enemy_offer_history)
# Now that we have reset the best estimate
# Get our utility from this offer
self.our_utility_from_enemy_offer_history.append(self.calculate_offer_utility(offer))
#print("Our utility from enemy's offer: " + str(self.our_utility_from_enemy_offer_history[-1]))
# Estimate enemy's utility from the offer
self.enemy_utility_from_enemy_offer_history.append(self.calculate_our_offer_on_enemy_scale(offer))
#print("Estimated utility enemy receives from their offer: " + str(
# self.enemy_utility_from_enemy_offer_history[-1]))
if self.goingFirst is None:
self.goingFirst = False
#print("I'm not going first!")
else:
if self.goingFirst is None:
self.goingFirst = True
#print("I'm going first!")
### Decision to accept reject in here
if (self.goingFirst == False):
acceptOffer = self.accept_offer(offer)
### Decision to accept reject in here
if(offer):
self.accept_offer(offer)
if self.acceptOffer and offer:
self.offer = offer[:]
### Making Offers ###
# Only make an offer if we are not accepting
else:
# Let's always begin by making our ideal offer
if self.turnsTaken == 0:
self.offer = self.preferences[:]
else:
self.offer = self.generate_offer()
# This is the last offer!! Person going first has to choose whether to
# accept or not
#if not self.goingFirst and self.turnsTaken == self.iter_limit - 1:
#print("make last offer!")
####### Storing the history of the offer we have decided to make #######
# store our offer history
self.our_offer_history.append(self.offer)
# store the utility of the offer we are making
self.our_offer_utility_history.append(self.utility())
print("Offer utility " + str(self.our_offer_utility_history[-1]))
# turns taken increases
self.turnsTaken += 1
# return the offer
return self.offer
def generate_offer(self):
#our utility from our last offer
ourUtil = self.our_offer_utility_history[-1]
#our utility from their last offer
enemyUtil = self.our_utility_from_enemy_offer_history[-1]
#3/4
seventyFive = 0.75*(abs(ourUtil) - abs(enemyUtil))
percent = 0.15*(abs(ourUtil) - abs(enemyUtil))
high = seventyFive + percent
low = seventyFive - percent
# copy the preferences
ordering = self.preferences[:]
x = 0.75 * ourUtil
y = .1 * ourUtil
high1 = x + y
low1 = x-y
i = 0
while i < CareBearBot.iteration_limit:
# lets get a new ordering
shuffle(ordering)
#calculate its utility
utility = self.calculate_offer_utility(ordering)
# is it above the threshold?
if low1 <= utility <= high1:
return ordering
i += 1
# we failed to generate one in the number of iterations specified
#just put forth the last offer
return self.our_offer_history[-1]
def calculate_offer_utility(self, offer):
backup = self.offer[:]
self.offer = offer
utility = self.utility()
self.offer = backup[:]
return utility
def calculate_our_offer_on_enemy_scale(self, offer):
backuppref = self.preferences[:]
self.preferences = self.enemy_max_offer[:]
backup = self.offer[:]
self.offer = offer
utility = self.utility()
self.offer = backup[:]
self.preferences = backuppref[:]
return utility
def convert_enemy_scaled_to_utility(self, ordering):
pass
# receive_utility(self : BaseNegotiator, utility : Float)
# Store the utility the other negotiator received from their last offer
def receive_utility(self, utility):
self.enemy_offer_rawutility_history.append(utility)
# receive_results(self : BaseNegotiator, results : (Boolean, Float, Float, Int))
# Store the results of the last series of negotiation (points won, success, etc.)
def receive_results(self, results):
# Always from the same opponent
self.resultHistory.append((results[0], results[3]))
if self.goingFirst:
self.ourScoreHistory.append(results[1])
self.enemyScoreHistory.append(results[2])
else:
self.ourScoreHistory.append(results[2])
self.enemyScoreHistory.append(results[1])
print(self.our_total_score())
print(self.enemy_total_score())
def our_total_score(self):
return sum(self.ourScoreHistory)
def enemy_total_score(self):
return sum(self.enemyScoreHistory)
def accept_offer(self, offer):
max = self.max_utility
x = max * 0.5
util = self.calculate_offer_utility(offer)
#Last Turn! Final Offer! Always accept
if self.goingFirst and self.turnsTaken == self.iter_limit:
self.acceptOffer = True
else:
if util >= x:
self.acceptOffer = True
|
import random
def get_rand_list(b,e,N):
r_list=random.sample(range(b,e),N)
return (r_list)
def get_overloap(L1,L2):
L3=[]
for num in L1:
if num in L2:
L3.append(num)
return (L3)
def main():
list1=get_rand_list(0,10,5)
list2=get_rand_list(0,10,5)
print(list1)
print(list2)
list3=get_overloap(list1,list2)
print(list3)
main()
|
from utils.io import *
def export_inferred_stance():
predicted_stance_path = ""
input_stance_path = ""
|
"""
Given a binary tree, return the sum of values of its deepest leaves.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def deepestLeavesSum(self, root) -> int:
if not root:
return 0
height = self.get_height(root)
leaves = []
self._get_deepest_leaves(root, 0, height, leaves)
return sum(leaves)
def _get_deepest_leaves(self, root, curr_level, deep_level, leaves):
# if leaves is None:
# leaves = []
if root is None:
return None
# print(root.val)
self._get_deepest_leaves(root.left, curr_level+1, deep_level, leaves)
self._get_deepest_leaves(root.right, curr_level+1, deep_level, leaves)
if root.left is None and root.right is None:
if curr_level + 1 == deep_level:
# print("cur level: ",curr_level)
# print("deep_level", deep_level)
# print("node:", root.val)
leaves.append(root.val)
def get_height(self, root):
if root is None:
return 0
left = self.get_height(root.left)
right = self.get_height(root.right)
height = max(left, right) + 1
return height
def _level_sum(self, root):
"""
We perform bfs to traverse the tree, and calculate
each levels sum
"""
queue = [root]
level_sum = 0
while len(queue) != 0:
level_sum = 0
for i in range(len(queue)):
# pop the value
print("size:", len(queue))
node = queue.pop(0)
level_sum += node.val
# the we add the children
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return level_sum
|
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path.append('../../python')
from testecono.TestUser import TestUserPersist
from testecono.TestUser import TestUserDelete
from testecono.TestUser import TestUserFindById
from testecono.TestUser import TestUserFindAll
from testecono.TestJustification import *
def suite():
"""
Gather all the tests from this module in a test suite.
"""
test_suite = unittest.TestSuite()
test_suite.addTest(TestUserPersist('test_persist'))
test_suite.addTest(TestUserDelete('test_delete'))
test_suite.addTest(TestUserFindById('test_find_by_id'))
test_suite.addTest(TestUserFindAll('test_find_all'))
##### ART 102 #####
test_suite.addTest(TestJustificationArt102('test_persist'))
test_suite.addTest(TestJustificationArt102('test_find_by_id'))
test_suite.addTest(TestJustificationArt102('test_find_by_user_id'))
##### ART #####
test_suite.addTest(TestJustificationArt('test_persist'))
test_suite.addTest(TestJustificationArt('test_find_by_id'))
test_suite.addTest(TestJustificationArt('test_find_by_user_id'))
##### AUTHORITY #####
test_suite.addTest(TestJustificationAuthority('test_persist'))
test_suite.addTest(TestJustificationAuthority('test_find_by_id'))
test_suite.addTest(TestJustificationAuthority('test_find_by_user_id'))
##### AUTHORITY #####
test_suite.addTest(TestJustificationBirthday('test_persist'))
test_suite.addTest(TestJustificationBirthday('test_find_by_id'))
test_suite.addTest(TestJustificationBirthday('test_find_by_user_id'))
##### BLOOD DONATION #####
test_suite.addTest(TestJustificationBloodDonation('test_persist'))
test_suite.addTest(TestJustificationBloodDonation('test_find_by_id'))
test_suite.addTest(TestJustificationBloodDonation('test_find_by_user_id'))
##### COMPENSATORY #####
test_suite.addTest(TestJustificationCompensatory('test_persist'))
test_suite.addTest(TestJustificationCompensatory('test_find_by_id'))
test_suite.addTest(TestJustificationCompensatory('test_find_by_user_id'))
##### EVALUATION #####
test_suite.addTest(TestJustificationEvaluation('test_persist'))
test_suite.addTest(TestJustificationEvaluation('test_find_by_id'))
test_suite.addTest(TestJustificationEvaluation('test_find_by_user_id'))
##### FAMILY ATTENTION #####
test_suite.addTest(TestJustificationFamilyAttention('test_persist'))
test_suite.addTest(TestJustificationFamilyAttention('test_find_by_id'))
test_suite.addTest(TestJustificationFamilyAttention('test_find_by_user_id'))
##### HOLIDAY #####
test_suite.addTest(TestJustificationHoliday('test_persist'))
test_suite.addTest(TestJustificationHoliday('test_find_by_id'))
test_suite.addTest(TestJustificationHoliday('test_find_by_user_id'))
##### INFORMED ABSENCE #####
test_suite.addTest(TestJustificationInformedAbsence('test_persist'))
test_suite.addTest(TestJustificationInformedAbsence('test_find_by_id'))
test_suite.addTest(TestJustificationInformedAbsence('test_find_by_user_id'))
##### LATE ARRIVAL #####
test_suite.addTest(TestJustificationLateArrival('test_persist'))
test_suite.addTest(TestJustificationLateArrival('test_find_by_id'))
test_suite.addTest(TestJustificationLateArrival('test_find_by_user_id'))
##### LEAVE WITHOUT SALARY #####
test_suite.addTest(TestJustificationLeaveWithoutSalary('test_persist'))
test_suite.addTest(TestJustificationLeaveWithoutSalary('test_find_by_id'))
test_suite.addTest(TestJustificationLeaveWithoutSalary('test_find_by_user_id'))
##### LIBRARIAN DAY #####
test_suite.addTest(TestJustificationLibrarianDay('test_persist'))
test_suite.addTest(TestJustificationLibrarianDay('test_find_by_id'))
test_suite.addTest(TestJustificationLibrarianDay('test_find_by_user_id'))
##### LONG DURATION #####
test_suite.addTest(TestJustificationLongDuration('test_persist'))
test_suite.addTest(TestJustificationLongDuration('test_find_by_id'))
test_suite.addTest(TestJustificationLongDuration('test_find_by_user_id'))
##### MARRIAGE #####
test_suite.addTest(TestJustificationMarriage('test_persist'))
test_suite.addTest(TestJustificationMarriage('test_find_by_id'))
test_suite.addTest(TestJustificationMarriage('test_find_by_user_id'))
##### MATERNITY #####
test_suite.addTest(TestJustificationMaternity('test_persist'))
test_suite.addTest(TestJustificationMaternity('test_find_by_id'))
test_suite.addTest(TestJustificationMaternity('test_find_by_user_id'))
##### MEDICAL BOARD #####
test_suite.addTest(TestJustificationMedicalBoard('test_persist'))
test_suite.addTest(TestJustificationMedicalBoard('test_find_by_id'))
test_suite.addTest(TestJustificationMedicalBoard('test_find_by_user_id'))
##### MEDICAL CERTIFICATE JUSTIFICATION #####
test_suite.addTest(TestJustificationMedicalCertificate('test_persist'))
test_suite.addTest(TestJustificationMedicalCertificate('test_find_by_id'))
test_suite.addTest(TestJustificationMedicalCertificate('test_find_by_user_id'))
##### MOURNING FIRST GRADE #####
test_suite.addTest(TestJustificationMourningFirstGrade('test_persist'))
test_suite.addTest(TestJustificationMourningFirstGrade('test_find_by_id'))
test_suite.addTest(TestJustificationMourningFirstGrade('test_find_by_user_id'))
##### MOURNING SECOND GRADE #####
test_suite.addTest(TestJustificationMourningSecondGrade('test_persist'))
test_suite.addTest(TestJustificationMourningSecondGrade('test_find_by_id'))
test_suite.addTest(TestJustificationMourningSecondGrade('test_find_by_user_id'))
##### MOURNING RELATIVE #####
test_suite.addTest(TestJustificationMourningRelative('test_persist'))
test_suite.addTest(TestJustificationMourningRelative('test_find_by_id'))
test_suite.addTest(TestJustificationMourningRelative('test_find_by_user_id'))
##### OUT TICKET WITH RETURN #####
test_suite.addTest(TestJustificationOutTicketWithReturn('test_persist'))
test_suite.addTest(TestJustificationOutTicketWithReturn('test_find_by_id'))
test_suite.addTest(TestJustificationOutTicketWithReturn('test_find_by_user_id'))
##### OUT TICKET WITHOUT RETURN #####
test_suite.addTest(TestJustificationOutTicketWithoutReturn('test_persist'))
test_suite.addTest(TestJustificationOutTicketWithoutReturn('test_find_by_id'))
test_suite.addTest(TestJustificationOutTicketWithoutReturn('test_find_by_user_id'))
##### PATERNITY #####
test_suite.addTest(TestJustificationPaternity('test_persist'))
test_suite.addTest(TestJustificationPaternity('test_find_by_id'))
test_suite.addTest(TestJustificationPaternity('test_find_by_user_id'))
##### PATERNITY #####
test_suite.addTest(TestJustificationPaternity('test_persist'))
test_suite.addTest(TestJustificationPaternity('test_find_by_id'))
test_suite.addTest(TestJustificationPaternity('test_find_by_user_id'))
##### SCHOOL PRE EXAM #####
test_suite.addTest(TestJustificationSchoolPreExam('test_persist'))
test_suite.addTest(TestJustificationSchoolPreExam('test_find_by_id'))
test_suite.addTest(TestJustificationSchoolPreExam('test_find_by_user_id'))
##### UNIVERSITY PRE EXAM #####
test_suite.addTest(TestJustificationUniversityPreExam('test_persist'))
test_suite.addTest(TestJustificationUniversityPreExam('test_find_by_id'))
test_suite.addTest(TestJustificationUniversityPreExam('test_find_by_user_id'))
##### UNIVERSITY PRE EXAM #####
test_suite.addTest(TestJustificationPrenatal('test_persist'))
test_suite.addTest(TestJustificationPrenatal('test_find_by_id'))
test_suite.addTest(TestJustificationPrenatal('test_find_by_user_id'))
##### RESOLUTION 638 #####
test_suite.addTest(TestJustificationResolution638('test_persist'))
test_suite.addTest(TestJustificationResolution638('test_find_by_id'))
test_suite.addTest(TestJustificationResolution638('test_find_by_user_id'))
##### SCHEDULE #####
test_suite.addTest(TestJustificationSchedule('test_persist'))
test_suite.addTest(TestJustificationSchedule('test_find_by_id'))
test_suite.addTest(TestJustificationSchedule('test_find_by_user_id'))
##### SHORT DURATION #####
test_suite.addTest(TestJustificationShortDuration('test_persist'))
test_suite.addTest(TestJustificationShortDuration('test_find_by_id'))
test_suite.addTest(TestJustificationShortDuration('test_find_by_user_id'))
##### SHORT DURATION #####
test_suite.addTest(TestJustificationShortDuration('test_persist'))
test_suite.addTest(TestJustificationShortDuration('test_find_by_id'))
test_suite.addTest(TestJustificationShortDuration('test_find_by_user_id'))
##### STRIKE #####
test_suite.addTest(TestJustificationStrike('test_persist'))
test_suite.addTest(TestJustificationStrike('test_find_by_id'))
test_suite.addTest(TestJustificationStrike('test_find_by_user_id'))
##### SUMMER BREAK #####
test_suite.addTest(TestJustificationSummerBreake('test_persist'))
test_suite.addTest(TestJustificationSummerBreake('test_find_by_id'))
test_suite.addTest(TestJustificationSummerBreake('test_find_by_user_id'))
##### SUSPENSION #####
test_suite.addTest(TestJustificationSuspension('test_persist'))
test_suite.addTest(TestJustificationSuspension('test_find_by_id'))
test_suite.addTest(TestJustificationSuspension('test_find_by_user_id'))
##### TASK WITH RETURN #####
test_suite.addTest(TestJustificationTaskWithReturn('test_persist'))
test_suite.addTest(TestJustificationTaskWithReturn('test_find_by_id'))
test_suite.addTest(TestJustificationTaskWithReturn('test_find_by_user_id'))
##### TASK WITHOUT RETURN #####
test_suite.addTest(TestJustificationTaskWithoutReturn('test_persist'))
test_suite.addTest(TestJustificationTaskWithoutReturn('test_find_by_id'))
test_suite.addTest(TestJustificationTaskWithoutReturn('test_find_by_user_id'))
##### TRAINING #####
test_suite.addTest(TestJustificationTraining('test_persist'))
test_suite.addTest(TestJustificationTraining('test_find_by_id'))
test_suite.addTest(TestJustificationTraining('test_find_by_user_id'))
##### TRAVEL #####
test_suite.addTest(TestJustificationTravel('test_persist'))
test_suite.addTest(TestJustificationTravel('test_find_by_id'))
test_suite.addTest(TestJustificationTravel('test_find_by_user_id'))
##### TRAVEL #####
test_suite.addTest(TestJustificationWeather('test_persist'))
test_suite.addTest(TestJustificationWeather('test_find_by_id'))
test_suite.addTest(TestJustificationWeather('test_find_by_user_id'))
##### WINTER BREAK #####
test_suite.addTest(TestJustificationWinterBreak('test_persist'))
test_suite.addTest(TestJustificationWinterBreak('test_find_by_id'))
test_suite.addTest(TestJustificationWinterBreak('test_find_by_user_id'))
return test_suite
mySuit=suite()
runner=unittest.TextTestRunner()
runner.run(mySuit)
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import threading
from pathlib import Path
from typing import Callable
from pants.bsp.spec.lifecycle import InitializeBuildParams
from pants.bsp.spec.notification import BSPNotification
# Wrapper type to provide BSP rules with the ability to interact with the BSP protocol driver.
#
# Note: Due to limitations in the engine's API regarding what values can be part of a query for a union rule,
# this class is stored in SessionValues. See https://github.com/pantsbuild/pants/issues/12934.
#
# Concurrency: This method can be invoked from multiple threads (for each individual request). The protocol
# driver protects against multiple threads trying to call `initialize_connection` by only allowing
# the thread processing the `build/initialize` RPC to proceed; all other requests return an error _before_
# they enter the engine (and thus would ever have a chance to access this context).
#
# Thus, while this class can mutate due to initialization, it is immutable after it has been initialized and
# is thus compatible with use in the engine.
from pants.util.dirutil import safe_mkdtemp
class BSPContext:
"""Wrapper type to provide BSP rules with the ability to interact with the BSP protocol
driver."""
def __init__(self) -> None:
"""Initialize the context with an empty client params.
This is the "connection uninitialized" state.
"""
self._lock = threading.Lock()
self._client_params: InitializeBuildParams | None = None
self._notify_client: Callable[[BSPNotification], None] | None = None
self.tempdir: Path = Path(safe_mkdtemp(prefix="bsp"))
@property
def is_connection_initialized(self):
with self._lock:
return self._client_params is not None
@property
def client_params(self) -> InitializeBuildParams:
with self._lock:
if self._client_params is None:
raise AssertionError(
"Attempt to access BSP context on an uninitialized connection."
)
return self._client_params
def initialize_connection(
self, client_params: InitializeBuildParams, notify_client: Callable[[BSPNotification], None]
) -> None:
with self._lock:
if self._client_params is not None:
raise AssertionError(
"Attempted to set new BSP client parameters on an already-initialized connection."
)
self._client_params = client_params
self._notify_client = notify_client
def notify_client(self, notification: BSPNotification) -> None:
if not self.is_connection_initialized:
return
assert self._notify_client is not None
self._notify_client(notification)
def __hash__(self):
return hash(self._client_params)
def __eq__(self, other):
if isinstance(other, BSPContext):
return NotImplemented
return self._client_params == other._client_params
|
my_name = 'Damiano'
your_name = input('Enter your name: ')
print(f'Hello {your_name}!')
age = input('Enter your age: ') # Enter 3
print(f'You have lived for {age * 12} months.') # Prints You have lived for 333333333333 months.
age = input('Enter your age: ') # Enter 3
age_num = int(age)
print(f'You have lived for {age_num * 12} months.') # Prints You have lived for 36 months.
age = int(input('Enter your age: ')) # Enter 3
print(f'You have lived for {age * 12} months.') # Prints You have lived for 36 months.
age = int(input('Enter your age: ')) # Enter 3
months = age * 12
print(f'You have lived for {months} months.') # Prints You have lived for 36 months.
|
"""
This file contains your PayPal test account credentials. If you are just
getting started, you'll want to copy api_details_blank.py to api_details.py,
and substitute the placeholders below with your PayPal test account details.
"""
from paypal import PayPalConfig
# Enter your test account's API details here. You'll need the 3-token
# credentials, not the certificate stuff.
CONFIG = PayPalConfig(API_USERNAME="xxx_xxx_apix.xxx.com",
API_PASSWORD="xxxxxxxxxx",
API_SIGNATURE="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
DEBUG_LEVEL=0)
# The following values may be found by visiting https://developer.paypal.com/,
# clicking on the 'Applications' -> 'Sandbox accounts' link in the sandbox,
# and looking at the accounts listed there.
# You'll need a business and a personal account created to run these tests.
# The email address of your personal test account. This is typically the
# customer for these tests.
EMAIL_PERSONAL = 'custX_xxxxxxxxxx_per@xxxxxxxx.com'
# If you view the details of your personal account, you'll see credit card
# details. Enter the credit card number from there.
VISA_ACCOUNT_NO = 'xxxxxxxxxxxxxxxx'
# And the expiration date in the form of MMYYYY. Note that there are no slashes,
# and single-digit month numbers have a leading 0 (IE: 03 for march).
VISA_EXPIRATION = 'mmyyyy'
|
from FK import *
import serial
import math
import time
PI = math.pi
RADIUS = 80
def usage():
print "Usage : input 9 pose parameters."
ser = serial.Serial(
port='/dev/cu.usbmodem1421',
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
ser.isOpen()
print 'Enter your commands below.\r\nInsert "exit" to leave the application.'
cur_th = [0,-50,-50,0,-50,0]
target_th = []
tmp_cmd = []
target_cmd = []
while 1 :
# get keyboard input
cmd = raw_input(">> ")
if cmd == 'exit':
ser.close()
exit()
else:
#print "Wrote : %s" % (cmd)
if cmd == 'r' or cmd == 'c':
ser.write(cmd)
continue
pose = map(float, cmd.split())
if len(pose) != 9:
usage()
continue
target_th.append(pose)
for i in range (1,9):
xp, yp, zp = [pose[0], round(pose[1]+RADIUS*math.sin(7.5*i*PI/180)*math.sin(7.5*i*PI/180)), round(pose[2]-RADIUS*math.sin(7.5*i*PI/180)*math.cos(7.5*i*PI/180))]
#target_th.append([xp, yp, zp, xp+10, yp, zp, xp, yp, zp])#yp-10*(math.sin(7.5*i*PI/180)), zp-10*(1-math.cos(7.5*i*PI/180))])
target_th.append([xp, yp, zp, xp+10, yp, zp, xp, yp+10*(math.sin(7.5*i*PI/180)), zp-10*(math.cos(7.5*i*PI/180))])
for i in range(len(target_th)):
if i == 0:
time_span = 1
else:
time_span = 0.2
ik_result = IK(cur_th, target_th[i], time_span)
cur_th = list(ik_result)
for j in range(6):
tmp_cmd.append(str("%.2f"%(cur_th[j])))
target_cmd.append(tmp_cmd)
tmp_cmd = []
cmd_input = "%i" % len(target_cmd)
for i in range(len(target_cmd)):
cmd_input += " ".join(target_cmd[i])
#cmd_input += " 1"
cmd_input += " "
print target_th[i],target_cmd[i]
#print "cmd %i " % i, target_th[i], target_cmd[i]
target_cmd = []
target_th = []
tmp_cmd = []
print cmd_input
ser.write(cmd_input)
time.sleep(0.5)
#ik_result = IK(cur_th, pose, 1)
#cur_th = list(ik_result)
#cmd_2 = ["","","","","",""]
#for i in range(len(cmd_2)):
# cmd_2[i] = str(int(cur_th[i]))
#
#print "cmd input_1", " ".join(cmd_2)
#print "cmd input_2", " ".join(cmd_1)
#ser.write(" ".join(cmd_2)+" "+" ".join(cmd_1)+" ")
#print cur_th
#ser.write(' '.join(cur_th) + '\n')
#print ' '.join(cur_th) + '\n'
out = 'result'
#while ser.inWaiting() > 0:
# out += ser.read(1)
if out != '':
print "<< " + out
|
import tensorflow as tf
sess = tf.Session()
hello = tf.constant('Hello, TensorFlow!')
print sess.run(hello)
a = tf.constant(10)
b = tf.constant(32)
print sess.run(a + b)
a = tf.placeholder("float")
b = tf.placeholder("float")
y = tf.mul(a, b)
print sess.run(y, feed_dict={a: 3, b: 3})
|
#Transfer weights from model trained with normal LSTM unit to unrolled LSTM units
import sys
sys.path.append('utils/')
from init import *
import pdb
sys.path.insert(0, pycaffe_path)
sys.path.insert(0, 'utils/python_layers/')
import caffe
caffe.set_mode_gpu()
caffe.set_device(0)
import argparse
import numpy as np
def transfer_net_weights(orig_model, orig_model_weights, new_model):
new_model_weights_save = 'snapshots/' + orig_model_weights.split('/')[-1].split('.')[0] + 'indLSTM.caffemodel'
orig_net = caffe.Net(orig_model, orig_model_weights, caffe.TRAIN)
new_net = caffe.Net(new_model, caffe.TRAIN)
for layer in list(set(orig_net.params.keys()) & set(new_net.params.keys())):
for ix in range(len(new_net.params[layer])):
new_net.params[layer][ix].data[...] = orig_net.params[layer][ix].data
embed_weights = orig_net.params['embed'][0].data
lstm1_W_xc = orig_net.params['lstm1'][0].data
lstm1_b_c = orig_net.params['lstm1'][1].data
lstm1_W_hc = orig_net.params['lstm1'][2].data
lstm2_W_xc = orig_net.params['lstm2'][0].data
lstm2_b_c = orig_net.params['lstm2'][1].data
lstm2_W_x_static = orig_net.params['lstm2'][2].data
lstm2_W_hc = orig_net.params['lstm2'][3].data
predict_w = orig_net.params['predict'][0].data
predict_b = orig_net.params['predict'][1].data
new_net.params['embed_0'][0].data[...] = embed_weights
new_net.params['x_static_transform'][0].data[...] = lstm2_W_x_static
new_net.params['lstm1_0_x_transform'][0].data[...] = lstm1_W_xc
new_net.params['lstm1_0_x_transform'][1].data[...] = lstm1_b_c
new_net.params['lstm1_0_h_transform'][0].data[...] = lstm1_W_hc
new_net.params['lstm2_0_x_transform'][0].data[...] = lstm2_W_xc
new_net.params['lstm2_0_x_transform'][1].data[...] = lstm2_b_c
new_net.params['lstm2_0_h_transform'][0].data[...] = lstm2_W_hc
new_net.params['predict_0'][0].data[...] = predict_w
new_net.params['predict_0'][1].data[...] = predict_b
for layer in new_net.params.keys():
for ix in range(len(new_net.params[layer])):
print layer, ix, np.max(new_net.params[layer][ix].data)
new_net.save(new_model_weights_save)
print "New model saved to %s." %new_model_weights_save
return new_model_weights_save
def transfer_combine_weights(model, classify_model, caption_weights, classifier_weights):
net = caffe.Net(model, caption_weights, caffe.TRAIN)
classify_net = caffe.Net(classify_model, classifier_weights, caffe.TRAIN)
for param in classify_net.params.keys():
param_new = '%s_classification' %param
for i in range(len(classify_net.params[param])):
net.params[param_new][i].data[...] = classify_net.params[param][i].data
orig_snap_tag = caption_weights.split('/')[-1].split('.caffemodel')[0]
classify_snap_tag = classifier_weights.split('/')[-1].split('.caffemodel')[0]
new_net_save = 'snapshots/%s_%s.caffemodel' %(orig_snap_tag, classify_snap_tag)
net.save(new_net_save)
for layer in net.params.keys():
for ix in range(len(net.params[layer])):
print layer, ix, np.max(net.params[layer][ix].data)
print "Saved caffemodel to %s." %new_net_save
return new_net_save
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--orig_model",type=str, default='prototxt/lrcn_freeze.prototxt')
parser.add_argument("--orig_model_weights",type=str, default='snapshots/birds_from_scratch_fgSplit_freezeConv_iter_20000.caffemodel')
parser.add_argument("--new_model",type=str, default='prototxt/lrcn_unroll_lstm_train.prototxt')
parser.add_argument("--combine_model", type=str, default='prototxt/lrcn_reinforce_lstm_classification_sentenceLoss_wbT_train.prototxt')
#parser.add_argument("--classify_model", type=str, default='eccv_prototxts/caption_classifier_embedDrop_75_lstmDrop_90_embedHidden_1000_lstmHidden_1000_train.prototxt')
parser.add_argument("--classify_model", type=str, default='prototxt/caption_classifier_embedDrop_75_lstmDrop_75_embedHidden_1000_lstmHidden_1000_train.prototxt')
parser.add_argument("--caption_weights", type=str, default='snapshots/birds_from_scratch_fgSplit_freezeConv_iter_20000indLSTM.caffemodel')
#parser.add_argument("--classify_weights", type=str, default='eccv_snapshots/caption_classifier_embedDrop_75_lstmDrop_75_embedHidden_1000_lstmHidden_1000_iter_6000.caffemodel')
parser.add_argument("--classify_weights", type=str, default='snapshots/caption_classifier_embedDrop_75_lstmDrop_75_embedHidden_1000_lstmHidden_1000_iter_6000.caffemodel')
args = parser.parse_args()
new_model_weights = transfer_net_weights(args.orig_model, args.orig_model_weights, args.new_model)
combine_weights = transfer_combine_weights(args.new_model, args.classify_model, new_model_weights, args.classify_weights)
|
from django.db import models
from django.forms import ModelForm
class Results(models.Model):
petal_length = models.FloatField()
petal_width = models.FloatField()
sepal_length = models.FloatField()
sepal_width = models.FloatField()
prediction = models.CharField(max_length=100)
'''
class PredictForm(ModelForm):
class Meta:
model = Result
exclude = ['prediction']
'''
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: Andy Garcia
# Date: 1/28/2017
import sys, os
from PyQt5.QtWidgets import (QWidget, QGridLayout,
QPushButton, QApplication, QCheckBox, QSlider)
from PyQt5 import (QtGui,QtCore)
import serial
#sys.path.append(os.path.dirname(__file__) + "../XboxController/")
from XboxController import XboxController
def contains(main_list,items):
try:
if type(items) is int:
items = [items]
if type(items) is QtCore.Qt.Key:
items = [items]
return all(x in main_list for x in items)
except:
print ("\nException")
return False
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.ser = None
self.keys = []
self.initUI()
self.xbox_connected = False
self.bluetooth_connected = False
self.motor_powers = [50,50,50] #note that 0 is full reverse, 50 is stop, and 100 is full forward
self.left_motor_power = 50 #note that 0 is full reverse, 50 is stop, and 100 is full forward
self.right_motor_power = 50 #note that 0 is full reverse, 50 is stop, and 100 is full forward
self.z_motor_power = 50 #note that 0 is full reverse, 50 is stop, and 100 is full forward
self.sendSliderTimer = QtCore.QTimer()
self.sendSliderTimer.setInterval(1)
def initUI(self):
grid = QGridLayout()
self.setLayout(grid)
delay = 500
interval = 500
stop_button = QPushButton(text = "Stop")
stop_button.setAutoRepeat(1)
stop_button.setAutoRepeatDelay(delay)
stop_button.pressed.connect(self.stop)
forward_button = QPushButton(text = "Forward")
forward_button.setAutoRepeat(1)
forward_button.setAutoRepeatDelay(delay)
forward_button.setAutoRepeatInterval(interval)
forward_button.pressed.connect(self.forward)
reverse_button = QPushButton(text = "Reverse")
reverse_button.setAutoRepeat(1)
reverse_button.setAutoRepeatDelay(delay)
reverse_button.setAutoRepeatInterval(interval)
reverse_button.pressed.connect(self.reverse)
right_button = QPushButton(text = "Right")
right_button.setAutoRepeat(1)
right_button.setAutoRepeatDelay(delay)
right_button.setAutoRepeatInterval(interval)
right_button.pressed.connect(self.right)
left_button = QPushButton(text = "Left")
left_button.setAutoRepeat(1)
left_button.setAutoRepeatDelay(delay)
left_button.setAutoRepeatInterval(interval)
left_button.pressed.connect(self.left)
sl_button = QPushButton(text = "Swing Left")
sl_button.setAutoRepeat(1)
sl_button.setAutoRepeatDelay(delay)
sl_button.setAutoRepeatInterval(interval)
sl_button.pressed.connect(self.swing_left)
sr_button = QPushButton(text = "Swing Right")
sr_button.setAutoRepeat(1)
sr_button.setAutoRepeatDelay(delay)
sr_button.setAutoRepeatInterval(interval)
sr_button.pressed.connect(self.swing_right)
bt_button = QCheckBox(text = "Connect to Drone")
bt_button.pressed.connect(lambda: self.bt_handle(bt_button))
xbox_button = QCheckBox(text = "Use Xbox Controller")
xbox_button.setChecked(False)
xbox_button.stateChanged.connect(lambda:self.setXboxSend(xbox_button))
send_button = QCheckBox(text = "Send slider values")
send_button.setChecked(False)
send_button.stateChanged.connect(lambda:self.setSendSlider(send_button))
self.sliders = []
for x in range (3):
self.sliders.append(QSlider(QtCore.Qt.Vertical))
self.sliders[x].setMinimum(0)
self.sliders[x].setMaximum(100)
self.sliders[x].setValue(50)
self.sliders[x].valueChanged.connect(lambda: self.setSliderValues(x))
self.sliders[x].setInvertedAppearance(True)
grid.addWidget(forward_button,1,3)
grid.addWidget(stop_button,2,3)
grid.addWidget(right_button,2,4)
grid.addWidget(left_button,2,2)
grid.addWidget(reverse_button,3,3)
grid.addWidget(sl_button,1,2)
grid.addWidget(sr_button,1,4)
grid.addWidget(bt_button,1,1)
grid.addWidget(xbox_button,2,1)
grid.addWidget(send_button,3,1)
grid.addWidget(self.sliders[0],1,5,2,4)
grid.addWidget(self.sliders[1],1,7,2,4)
grid.addWidget(self.sliders[2],1,9,2,4)
self.move(300, 150)
self.setWindowTitle('AquaDrone Controller')
self.show()
def setSendSlider(button):
if button.isChecked():
pass
def setSliderValues(self,motor):
self.motor_powers[motor] = self.sliders[motor].value()
def defaultStop(self,button):
if (button.isChecked()):
pass
def printMotorPower(self):
if (self.left_motor_power < 50 and self.right_motor_power < 50):
print ("Reverse at {0}%".format(self.left_motor_power*2))
elif (self.left_motor_power >50 and self.right_motor_power > 50):
print ("Forward at {0}%".format(self.left_motor_power*2))
else:
print ("Stop")
def bt_handle(self,button,port = '/dev/cu.HC-05-DevB'):
if button.isChecked() == False:
try:
self.ser = serial.Serial(port)
self.bluetooth_connected = True
print ("Connected on " + self.ser.name)
except ValueError:
self.ser = None
print ("Could not connect to " + port + ": Value Error")
except serial.SerialException:
self.ser = None
print ("Could not connect to " + port + ": Device not found")
except:
self.ser = None
print ("Could not connect to " + port + ":Unknown error")
else:
try:
if (self.ser.is_open):
print ("Closing " + port.name)
port.close()
self.bluetooth_connected = False
else:
print (port.name + " is not open")
except Exception as e:
print ("Invalid Port")
print (e)
def bt_close(self,port):
try:
if (port.is_open):
print ("Closing " + port.name)
port.close()
self.bluetooth_connected = False
else:
print (port.name + " is not open")
except:
print ("Invalid Port")
def swing_right(self):
try:
print("Swing Right")
self.sliders[0].setValue(100)
self.sliders[1].setValue(50)
self.ser.write(bytes([100,50,0,126]))
except:
print ("Could not send swing right command")
def swing_left(self):
try:
print("Swing Left")
self.sliders[0].setValue(50)
self.sliders[1].setValue(100)
self.ser.write(bytes([50,100,0,126]))
except:
print ("Could not send swing left command")
def stop(self):
try:
print ("Stop")
self.sliders[0].setValue(50)
self.sliders[1].setValue(50)
#self.ser.write(bytes([50,50,0,126]))
self.ser.write(bytes([100]))
self.ser.flush()
except:
print ("Could not send stop command")
def forward(self):
try:
print ("Forward")
self.sliders[0].setValue(100)
self.sliders[1].setValue(100)
self.ser.write(bytes([100,100,0,126]))
except:
print ("Could not send forward command")
def reverse(self):
try:
print ("Reverse")
self.sliders[0].setValue(0)
self.sliders[1].setValue(0)
self.ser.write(bytes([0,0,0,126]))
except:
print ("Could not send reverse command")
def right(self):
try:
print ("Right")
self.sliders[0].setValue(100)
self.sliders[1].setValue(0)
self.ser.write(bytes([100,0,0,126]))
except:
print ("Could not send right command")
def left(self):
try:
print ("Left")
self.sliders[0].setValue(0)
self.sliders[1].setValue(100)
self.ser.write(bytes([0,100,0,126]))
except:
print ("Could not send left command")
def keyPressEvent(self,event):
if type(event) == QtGui.QKeyEvent:
if (not contains(self.keys,event.key())):
self.keys.append(event.key())
self.multikey()
event.accept()
def keyReleaseEvent(self,event):
if type(event) == QtGui.QKeyEvent:
try:
self.keys.remove(event.key())
except:
pass
def setXboxSend(self,button):
if button.isChecked() == True:
try:
#generic call back
def controlCallBack(xboxControlId, value):
if (xboxControlId == 1):
print ("Yval: " + str(value))
self.left_y = value
#print ("Y axis: {0}".format(value))
#setup xbox controller, set out the deadzone and scale, also invert the Y Axis (for some reason in Pygame negative is up - wierd!
self.xboxCont = XboxController(controlCallBack, deadzone = 10, scale = 50, invertYAxis = True)
self.xboxCont.start()
print ("Xbox 360 Controller Connected")
except Exception as e:
print (e)
button.setChecked(False)
else:
try:
self.xboxCont.stop()
self.xboxCont = None
print ("Xbox 360 Controller Disconnected")
except:
pass
def multikey(self):
if (contains(self.keys,[QtCore.Qt.Key_W,QtCore.Qt.Key_A])):
self.swing_left()
elif (contains(self.keys,[QtCore.Qt.Key_W,QtCore.Qt.Key_D])):
self.swing_right()
elif (contains(self.keys,QtCore.Qt.Key_W)):
self.forward()
elif (contains(self.keys,QtCore.Qt.Key_S)):
self.reverse()
elif (contains(self.keys,QtCore.Qt.Key_A)):
self.left()
elif (contains(self.keys,QtCore.Qt.Key_D)):
self.right()
elif (contains(self.keys,QtCore.Qt.Key_Space)):
try:
send = []
#send.append(int(self.xboxCont.left_y))
send.append(int(self.xboxCont.LTHUMBY))
send.append(int(self.xboxCont.LTHUMBY))
send.append(int(self.xboxCont.LTHUMBY))
send.append(126)
self.ser.write(bytes(send))
print ("Sent {0}".format(send))
except Exception as e:
print ("Could not send xbox command")
print (type(e))
print(e.args)
print (e)
else:
print ("Unknown Key: " + str(self.keys))
def closeEvent(self,event):
try:
self.xboxCont.stop()
except:
pass
event.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
win = MainWindow()
sys.exit(app.exec_())
|
import os.path
import regions_classifier as rc
from svm_tools import get_dataset_prediction_rank
def usage_regions_classifier():
alphabet = "ACGT"
radius = 10
count = 10
dataset = rc.Dataset(os.path.abspath(r"..\..\data\germline\human\VJL_combinations.fasta"),
os.path.abspath(r"..\..\data\nomenclature\human\VJL_combinations.kabat"),
alphabet, count, False)
classifier = rc.learn_svm_classifier(dataset, radius)
# dataset = rc.Dataset(os.path.abspath(r"..\..\data\germline\human\VJL_combinations.fasta"),
# None, alphabet, count, False)
result = rc.predict_dataset(classifier, dataset, radius, alphabet)
print '-' * 40, "dataset prediction"
for line in result:
print line
seq = dataset.data[0].seq
print '-' * 40, "sequence prediction"
print rc.predict_sequence(classifier, seq, radius, alphabet)
check_result = get_dataset_prediction_rank(dataset, result)
print '-' * 40, "raw check result"
print check_result
print '-' * 40, "check result with sequence id"
print zip([data.id for data in dataset.data], check_result)
def usage_type_classifier():
alphabet = "ACGT"
radius = 10
count = 10
if __name__ == "__main__":
# usage_regions_classifier()
usage_type_classifier()
|
from bing_image_downloader import downloader
downloader.download('apple fruit', limit=3, output_dir='dataset/train')
# downloader.download('fresh guava', limit=350, output_dir='dataset/test')
|
print('5 задание')
str = input('Введите сторку: ')
list = str.split(' ')
amount = len(list)
print('Количество слов в строке: ', amount)
#python task5.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/27 11:38
# @Author : Jason
# @Site :
# @File : decision_tree.py
# @Software: PyCharm
import pandas as pd
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
filename = 'data/sales_data.xls'
data = pd.read_excel(filename,index_col=u'序号')
data[data == u'好'] = 1
data[data == u'是'] = 1
data[data == u'高'] = 1
data[data != 1] = -1
x = data.iloc[:,:3].astype(int)
y = data.iloc[:,3].astype(int)
dtc = DTC(criterion='entropy')
dtc.fit(x,y) #训练模型
with open("tree.dot",'w') as f:
f = export_graphviz(dtc,feature_names=x.columns,out_file=f)
|
import unittest
import depfinder.finder as finder
class FinderTest(unittest.TestCase):
def test_find_deps(self):
deps = finder.find_deps('depfinder')
ground_truth = ['coverage', 'chardet', 'idna', 'urllib3',
'requests', 'docopt', 'coveralls']
result = [True for package in ground_truth if package in deps]
self.assertEqual(sum(result), len(result))
def test_generate_requirements(self):
deps = ['chardet', 'coverage', 'coveralls', 'depfinder','docopt',
'idna', 'requests', 'urllib3']
ground_truth = ['chardet==', 'coverage==', 'coveralls==',
'depfinder==', 'docopt==', 'idna==', 'requests==',
'urllib3==']
reqs = finder.generate_requirements(deps)
result = [True for package in ground_truth if ground_truth in reqs]
self.assertEqual(sum(result), len(result))
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
import itertools
import logging
import unittest
import urllib
import environment
import keyspace_util
import utils
from protocols_flavor import protocols_flavor
from vtdb import dbexceptions
from vtdb import vtgate_cursor
from vtdb import vtgate_client
shard_0_master = None
shard_1_master = None
lookup_master = None
keyspace_env = None
create_vt_user = '''create table vt_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user2 = '''create table vt_user2 (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_user_extra = '''create table vt_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_vt_user_extra2 = '''create table vt_user_extra2 (
user_id bigint,
lastname varchar(64),
address varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_vt_multicolvin = '''create table vt_multicolvin (
kid bigint,
cola varchar(64),
colb varchar(64),
colc varchar(64),
primary key (kid)
) Engine=InnoDB'''
create_vt_aggr = '''create table vt_aggr (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_vt_music = '''create table vt_music (
user_id bigint,
id bigint,
song varchar(64),
primary key (user_id, id)
) Engine=InnoDB'''
create_vt_music_extra = '''create table vt_music_extra (
music_id bigint,
user_id bigint,
artist varchar(64),
primary key (music_id)
) Engine=InnoDB'''
create_upsert = '''create table upsert (
pk bigint,
owned bigint,
user_id bigint,
col bigint,
primary key (pk)
) Engine=InnoDB'''
create_join_user = '''create table join_user (
id bigint,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
create_join_user_extra = '''create table join_user_extra (
user_id bigint,
email varchar(64),
primary key (user_id)
) Engine=InnoDB'''
create_join_name_info = '''create table join_name_info (
name varchar(128),
info varchar(128),
primary key (name)
) Engine=InnoDB'''
create_twopc_user = '''create table twopc_user (
user_id bigint,
val varchar(128),
primary key (user_id)
) Engine=InnoDB'''
create_vt_user_seq = '''create table vt_user_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_user_seq = 'insert into vt_user_seq values(0, 1, 2)'
create_vt_music_seq = '''create table vt_music_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_music_seq = 'insert into vt_music_seq values(0, 1, 2)'
create_vt_main_seq = '''create table vt_main_seq (
id int,
next_id bigint,
cache bigint,
primary key(id)
) comment 'vitess_sequence' Engine=InnoDB'''
init_vt_main_seq = 'insert into vt_main_seq values(0, 1, 2)'
create_name_user2_map = '''create table name_user2_map (
name varchar(64),
user2_id bigint,
primary key (name, user2_id)
) Engine=InnoDB'''
create_lastname_user_extra2_map = '''create table lastname_user_extra2_map (
lastname varchar(64),
user_id bigint,
primary key (lastname, user_id)
) Engine=InnoDB'''
create_cola_map = '''create table cola_map (
cola varchar(64),
kid binary(8),
primary key (cola, kid)
) Engine=InnoDB'''
create_colb_colc_map = '''create table colb_colc_map (
colb varchar(64),
colc varchar(64),
kid binary(8),
primary key (colb, colc, kid)
) Engine=InnoDB'''
create_address_user_extra2_map = '''create table address_user_extra2_map (
address varchar(64),
user_id bigint,
primary key (address)
) Engine=InnoDB'''
create_music_user_map = '''create table music_user_map (
music_id bigint,
user_id bigint,
primary key (music_id)
) Engine=InnoDB'''
create_upsert_primary = '''create table upsert_primary (
id bigint,
ksnum_id bigint,
primary key (id)
) Engine=InnoDB'''
create_upsert_owned = '''create table upsert_owned (
owned bigint,
ksnum_id bigint,
primary key (owned)
) Engine=InnoDB'''
create_main = '''create table main (
id bigint,
val varchar(128),
primary key(id)
) Engine=InnoDB'''
create_twopc_lookup = '''create table twopc_lookup (
id bigint,
val varchar(128),
primary key (id)
) Engine=InnoDB'''
vschema = {
'user': '''{
"sharded": true,
"vindexes": {
"hash_index": {
"type": "hash"
},
"unicode_hash": {
"type": "unicode_loose_md5"
},
"name_user2_map": {
"type": "lookup_hash",
"params": {
"table": "name_user2_map",
"from": "name",
"to": "user2_id"
},
"owner": "vt_user2"
},
"lastname_user_extra2_map": {
"type": "lookup_hash",
"params": {
"table": "lastname_user_extra2_map",
"from": "lastname",
"to": "user_id"
},
"owner": "vt_user_extra2"
},
"cola_map": {
"type": "lookup",
"params": {
"table": "cola_map",
"from": "cola",
"to": "kid"
},
"owner": "vt_multicolvin"
},
"colb_colc_map": {
"type": "lookup",
"params": {
"table": "colb_colc_map",
"from": "colb,colc",
"to": "kid"
},
"owner": "vt_multicolvin"
},
"address_user_extra2_map": {
"type": "lookup_hash_unique",
"params": {
"table": "address_user_extra2_map",
"from": "address",
"to": "user_id"
},
"owner": "vt_user_extra2"
},
"music_user_map": {
"type": "lookup_hash_unique",
"params": {
"table": "music_user_map",
"from": "music_id",
"to": "user_id"
},
"owner": "vt_music"
},
"upsert_primary": {
"type": "lookup_hash_unique",
"params": {
"table": "upsert_primary",
"from": "id",
"to": "ksnum_id"
}
},
"upsert_owned": {
"type": "lookup_hash_unique",
"params": {
"table": "upsert_owned",
"from": "owned",
"to": "ksnum_id"
},
"owner": "upsert"
}
},
"tables": {
"vt_user": {
"column_vindexes": [
{
"column": "id",
"name": "hash_index"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_user_seq"
}
},
"vt_user2": {
"column_vindexes": [
{
"column": "id",
"name": "hash_index"
},
{
"column": "name",
"name": "name_user2_map"
}
]
},
"vt_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "hash_index"
}
]
},
"vt_user_extra2": {
"column_vindexes": [
{
"column": "user_id",
"name": "hash_index"
},
{
"column": "lastname",
"name": "lastname_user_extra2_map"
},
{
"column": "address",
"name": "address_user_extra2_map"
}
]
},
"vt_multicolvin": {
"column_vindexes": [
{
"column": "kid",
"name": "hash_index"
},
{
"column": "cola",
"name": "cola_map"
},
{
"columns": ["colb", "colc"],
"name": "colb_colc_map"
}
]
},
"vt_aggr": {
"column_vindexes": [
{
"column": "id",
"name": "hash_index"
}
],
"columns": [
{
"name": "name",
"type": "VARCHAR"
}
]
},
"vt_music": {
"column_vindexes": [
{
"column": "user_id",
"name": "hash_index"
},
{
"column": "id",
"name": "music_user_map"
}
],
"auto_increment": {
"column": "id",
"sequence": "vt_music_seq"
}
},
"vt_music_extra": {
"column_vindexes": [
{
"column": "music_id",
"name": "music_user_map"
},
{
"column": "user_id",
"name": "hash_index"
}
]
},
"upsert": {
"column_vindexes": [
{
"column": "pk",
"name": "upsert_primary"
},
{
"column": "owned",
"name": "upsert_owned"
},
{
"column": "user_id",
"name": "hash_index"
}
]
},
"join_user": {
"column_vindexes": [
{
"column": "id",
"name": "hash_index"
}
]
},
"join_user_extra": {
"column_vindexes": [
{
"column": "user_id",
"name": "hash_index"
}
]
},
"join_name_info": {
"column_vindexes": [
{
"column": "name",
"name": "unicode_hash"
}
]
},
"twopc_user": {
"column_vindexes": [
{
"column": "user_id",
"name": "hash_index"
}
]
}
}
}''',
'lookup': '''{
"sharded": false,
"tables": {
"vt_user_seq": {
"type": "sequence"
},
"vt_music_seq": {
"type": "sequence"
},
"vt_main_seq": {
"type": "sequence"
},
"music_user_map": {},
"cola_map": {},
"colb_colc_map": {},
"name_user2_map": {},
"lastname_user_extra2_map": {},
"address_user_extra2_map": {},
"upsert_primary": {},
"upsert_owned": {},
"main": {
"auto_increment": {
"column": "id",
"sequence": "vt_main_seq"
}
},
"twopc_lookup": {}
}
}''',
}
def setUpModule():
global keyspace_env
global shard_0_master
global shard_1_master
global lookup_master
logging.debug('in setUpModule')
try:
environment.topo_server().setup()
logging.debug('Setting up tablets')
keyspace_env = keyspace_util.TestEnv()
keyspace_env.launch(
'user',
shards=['-80', '80-'],
ddls=[
create_vt_user,
create_vt_user2,
create_vt_user_extra,
create_vt_user_extra2,
create_vt_multicolvin,
create_vt_aggr,
create_vt_music,
create_vt_music_extra,
create_upsert,
create_join_user,
create_join_user_extra,
create_join_name_info,
create_twopc_user,
],
rdonly_count=1, # to test SplitQuery
twopc_coordinator_address='localhost:15028', # enables 2pc
)
keyspace_env.launch(
'lookup',
ddls=[
create_vt_user_seq,
create_vt_music_seq,
create_vt_main_seq,
create_music_user_map,
create_name_user2_map,
create_lastname_user_extra2_map,
create_address_user_extra2_map,
create_cola_map,
create_colb_colc_map,
create_upsert_primary,
create_upsert_owned,
create_main,
create_twopc_lookup,
],
twopc_coordinator_address='localhost:15028', # enables 2pc
)
shard_0_master = keyspace_env.tablet_map['user.-80.master']
shard_1_master = keyspace_env.tablet_map['user.80-.master']
lookup_master = keyspace_env.tablet_map['lookup.0.master']
utils.apply_vschema(vschema)
utils.VtGate().start(
tablets=[shard_0_master, shard_1_master, lookup_master],
extra_args=['-transaction_mode', 'TWOPC'])
utils.vtgate.wait_for_endpoints('user.-80.master', 1)
utils.vtgate.wait_for_endpoints('user.80-.master', 1)
utils.vtgate.wait_for_endpoints('lookup.0.master', 1)
except:
tearDownModule()
raise
def tearDownModule():
logging.debug('in tearDownModule')
utils.required_teardown()
if utils.options.skip_teardown:
return
logging.debug('Tearing down the servers and setup')
if keyspace_env:
keyspace_env.teardown()
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
def get_connection(timeout=10.0):
protocol, endpoint = utils.vtgate.rpc_endpoint(python=True)
try:
return vtgate_client.connect(protocol, endpoint, timeout)
except Exception:
logging.exception('Connection to vtgate (timeout=%s) failed.', timeout)
raise
class TestVTGateFunctions(unittest.TestCase):
decimal_type = 18
int_type = 265
string_type = 6165
varbinary_type = 10262
def setUp(self):
self.master_tablet = shard_1_master
def execute_on_master(self, vtgate_conn, sql, bind_vars):
return vtgate_conn._execute(
sql, bind_vars, tablet_type='master', keyspace_name=None)
def test_health(self):
f = urllib.urlopen('http://localhost:%d/debug/health' % utils.vtgate.port)
response = f.read()
f.close()
self.assertEqual(response, 'ok')
def test_srv_vschema(self):
"""Makes sure the SrvVSchema object is properly built."""
v = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj'])
self.assertEqual(len(v['keyspaces']), 2, 'wrong vschema: %s' % str(v))
self.assertIn('user', v['keyspaces'])
self.assertIn('lookup', v['keyspaces'])
# Now deletes it.
utils.run_vtctl(['DeleteSrvVSchema', 'test_nj'])
_, stderr = utils.run_vtctl(['GetSrvVSchema', 'test_nj'],
expect_fail=True)
self.assertIn('node doesn\'t exist', stderr)
# And rebuilds it.
utils.run_vtctl(['RebuildVSchemaGraph', '-cells=test_nj'])
v = utils.run_vtctl_json(['GetSrvVSchema', 'test_nj'])
self.assertEqual(len(v['keyspaces']), 2, 'wrong vschema: %s' % str(v))
self.assertIn('user', v['keyspaces'])
self.assertIn('lookup', v['keyspaces'])
# Wait for vtgate to re-read it.
timeout = 10
while True:
vschema_json = utils.vtgate.get_vschema()
if 'lookup' in vschema_json:
break
timeout = utils.wait_step('vtgate re-read vschema', timeout)
def test_user(self):
count = 4
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True)
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
cursor.begin()
cursor.execute(init_vt_user_seq, {})
cursor.commit()
# Test insert
for x in xrange(count):
i = x+1
cursor.begin()
cursor.execute(
'insert into vt_user (name) values (:name)',
{'name': 'test %s' % i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 1L, i, []))
cursor.commit()
# Test select equal
for x in xrange(count):
i = x+1
cursor.execute('select id, name from vt_user where id = :id', {'id': i})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(i, 'test %s' % i)], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test case sensitivity
cursor.execute('select Id, Name from vt_user where iD = :id', {'id': 1})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(1, 'test 1')], 1L, 0,
[('Id', self.int_type), ('Name', self.string_type)]))
# test directive timeout
try:
cursor.execute('SELECT /*vt+ QUERY_TIMEOUT_MS=10 */ SLEEP(1)', {})
self.fail('Execute went through')
except dbexceptions.DatabaseError as e:
s = str(e)
self.assertIn(protocols_flavor().rpc_timeout_message(), s)
# test directive timeout longer than the query time
cursor.execute('SELECT /*vt+ QUERY_TIMEOUT_MS=2000 */ SLEEP(1)', {})
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([(0,)], 1L, 0,
[(u'SLEEP(1)', self.int_type)]))
# test shard errors as warnings directive
cursor.execute('SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS */ bad from vt_user', {})
print vtgate_conn.get_warnings()
warnings = vtgate_conn.get_warnings()
self.assertEqual(len(warnings), 2)
for warning in warnings:
self.assertEqual(warning.code, 1054)
self.assertIn('errno 1054', warning.message)
self.assertIn('Unknown column', warning.message)
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 0L, 0, []))
# test shard errors as warnings directive with timeout
cursor.execute('SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS QUERY_TIMEOUT_MS=10 */ SLEEP(1)', {})
print vtgate_conn.get_warnings()
warnings = vtgate_conn.get_warnings()
self.assertEqual(len(warnings), 1)
for warning in warnings:
self.assertEqual(warning.code, 1317)
self.assertIn('context deadline exceeded', warning.message)
self.assertEqual(
(cursor.fetchall(), cursor.rowcount, cursor.lastrowid,
cursor.description),
([], 0L, 0, []))
# Test insert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id, :name)',
{'id': 6, 'name': 'test 6'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6')))
# Test MultiValueInsert with no auto-inc
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id0, :name0), (:id1, :name1)',
{'id0': 5, 'name0': 'test 5', 'id1': 7, 'name1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
# Verify values in db
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'),
(5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((4L, 'test 4'), (6L, 'test 6'), (7L, 'test 7')))
# Test IN clause
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 4})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (4L, 'test 4')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user where id in (:a, :b)', {'a': 1, 'b': 2})
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test scatter
result = vtgate_conn._execute(
'select id, name from vt_user',
{}, tablet_type='master', keyspace_name=None)
result[0].sort()
self.assertEqual(
result,
([(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'), (6L, 'test 6'), (7L, 'test 7')], 7L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test stream over scatter
stream_cursor_1 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_1.execute('select id, name from vt_user', {})
stream_cursor_2 = vtgate_conn.cursor(
tablet_type='master', keyspace=None,
cursorclass=vtgate_cursor.StreamVTGateCursor)
stream_cursor_2.execute('select id, name from vt_user', {})
self.assertEqual(stream_cursor_1.description,
[('id', self.int_type), ('name', self.string_type)])
self.assertEqual(stream_cursor_2.description,
[('id', self.int_type), ('name', self.string_type)])
rows_1 = []
rows_2 = []
for row_1, row_2 in itertools.izip(stream_cursor_1, stream_cursor_2):
rows_1.append(row_1)
rows_2.append(row_2)
self.assertEqual(
sorted(rows_1),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'), (6L, 'test 6'), (7L, 'test 7')])
self.assertEqual(
sorted(rows_2),
[(1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3'), (4L, 'test 4'),
(5L, 'test 5'), (6L, 'test 6'), (7L, 'test 7')])
# Test updates
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 1, 'name': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name = :name where id = :id',
{'id': 4, 'name': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3'),
(5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(
result, ((4L, 'test four'), (6L, 'test 6'), (7L, 'test 7')))
# Test deletes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id = :id',
{'id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3'), (5L, 'test 5')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user')
self.assertEqual(result, ((6L, 'test 6'), (7L, 'test 7')))
# test passing in the keyspace in the cursor
lcursor = vtgate_conn.cursor(
tablet_type='master', keyspace='lookup', writable=True)
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*table vt_user not found in schema.*'):
lcursor.execute('select id, name from vt_user', {})
def test_user2(self):
# user2 is for testing non-unique vindexes
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 7, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 2, 'name0': 'name2', 'id1': 3, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2'), (3L, 'name2')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L),
('name2', 3L)))
# Test select by id
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where id = :id', {'id': 1})
self.assertEqual(
result, ([(1, 'name1')], 1L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test select by lookup
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where name = :name', {'name': 'name1'})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test IN clause using non-unique vindex
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1', 'name2')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (2, 'name2'), (3, 'name2'), (7, 'name1')], 4L, 0,
[('id', self.int_type), ('name', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
"select id, name from vt_user2 where name in ('name1')", {})
result[0].sort()
self.assertEqual(
result,
([(1, 'name1'), (7, 'name1')], 2L, 0,
[('id', self.int_type), ('name', self.string_type)]))
# Test delete
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 2})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((3L, 'name2'),))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 7L), ('name2', 3L)))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 7})
vtgate_conn.commit()
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user2 where id = :id',
{'id': 3})
vtgate_conn.commit()
# Test scatter delete
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 22, 'name0': 'name2', 'id1': 33, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user where id > :id',
{'id': 20})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
# Test scatter update
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 22, 'name0': 'name2', 'id1': 33, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user set name=:name where id > :id',
{'id': 20, 'name': 'jose'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
def test_user_truncate(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id, :name)',
{'id': 7, 'name': 'name1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1)',
{'id0': 2, 'name0': 'name2', 'id1': 3, 'name1': 'name2'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name1'), (2L, 'name2'), (3L, 'name2')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((7L, 'name1'),))
result = lookup_master.mquery(
'vt_lookup', 'select name, user2_id from name_user2_map')
self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L),
('name2', 3L)))
vtgate_conn.begin()
result = vtgate_conn._execute(
'truncate vt_user2',
{},
tablet_type='master',
keyspace_name='user'
)
vtgate_conn.commit()
lookup_master.mquery('vt_lookup', 'truncate name_user2_map')
self.assertEqual(result, ([], 0L, 0L, []))
# Test select by id
result = self.execute_on_master(
vtgate_conn,
'select id, name from vt_user2 where id = :id', {'id': 1})
self.assertEqual(
result, ([], 0L, 0, [('id', self.int_type),
('name', self.string_type)]))
def test_user_extra(self):
# user_extra is for testing unowned functional vindex
count = 4
vtgate_conn = get_connection()
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': i, 'email': 'test %s' % i})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, email from vt_user_extra where user_id = :user_id',
{'user_id': i})
self.assertEqual(
result,
([(i, 'test %s' % i)], 1L, 0,
[('user_id', self.int_type), ('email', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test 1'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test 4'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 1, 'email': 'test one'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra set email = :email where user_id = :user_id',
{'user_id': 4, 'email': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((1L, 'test one'), (2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((4L, 'test four'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 1})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ((2L, 'test 2'), (3L, 'test 3')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, email from vt_user_extra')
self.assertEqual(result, ())
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 2})
self.execute_on_master(
vtgate_conn,
'delete from vt_user_extra where user_id = :user_id',
{'user_id': 3})
vtgate_conn.commit()
def test_user_scatter_limit(self):
vtgate_conn = get_connection()
# Works when there is no data
result = self.execute_on_master(
vtgate_conn,
'select id from vt_user2 order by id limit :limit offset :offset ', {'limit': 4, 'offset': 1})
self.assertEqual(
result, ([], 0L, 0, [('id', self.int_type)]))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1),(:id2, :name2), (:id3, :name3), (:id4, :name4)',
{
'id0': 1, 'name0': 'name0',
'id1': 2, 'name1': 'name1',
'id2': 3, 'name2': 'name2',
'id3': 4, 'name3': 'name3',
'id4': 5, 'name4': 'name4',
}
)
self.assertEqual(result, ([], 5L, 0L, []))
vtgate_conn.commit()
# Assert that rows are in multiple shards
result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((1L, 'name0'), (2L, 'name1'), (3L, 'name2'), (5L, 'name4')))
result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2')
self.assertEqual(result, ((4L, 'name3'),))
# Works when limit is set
result = self.execute_on_master(
vtgate_conn,
'select id from vt_user2 order by id limit :limit', {'limit': 2 })
self.assertEqual(
result,
([(1,),(2,),], 2L, 0,
[('id', self.int_type)]))
# Fetching with offset works
count = 4
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select id from vt_user2 order by id limit :limit offset :offset ', {'limit': 1, 'offset': x})
self.assertEqual(
result,
([(i,)], 1L, 0,
[('id', self.int_type)]))
# Works when limit is greater than values in the table
result = self.execute_on_master(
vtgate_conn,
'select id from vt_user2 order by id limit :limit offset :offset ', {'limit': 100, 'offset': 1})
self.assertEqual(
result,
([(2,),(3,),(4,),(5,)], 4L, 0,
[('id', self.int_type)]))
# Works without bind vars
result = self.execute_on_master(
vtgate_conn,
'select id from vt_user2 order by id limit 1 offset 1', {})
self.assertEqual(
result,
([(2,)], 1L, 0,
[('id', self.int_type)]))
vtgate_conn.begin()
result = vtgate_conn._execute(
'truncate vt_user2',
{},
tablet_type='master',
keyspace_name='user'
)
vtgate_conn.commit()
lookup_master.mquery('vt_lookup', 'truncate name_user2_map')
def test_user_extra2(self):
# user_extra2 is for testing updates to secondary vindexes
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra2 (user_id, lastname, address) '
'values (:user_id, :lastname, :address)',
{'user_id': 5, 'lastname': 'nieves', 'address': 'invernalia'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Updating both vindexes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra2 set lastname = :lastname,'
' address = :address where user_id = :user_id',
{'user_id': 5, 'lastname': 'buendia', 'address': 'macondo'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select lastname, address from vt_user_extra2 where user_id = 5', {})
self.assertEqual(
result,
([('buendia', 'macondo')], 1, 0,
[('lastname', self.string_type),
('address', self.string_type)]))
result = lookup_master.mquery(
'vt_lookup', 'select lastname, user_id from lastname_user_extra2_map')
self.assertEqual(
result,
(('buendia', 5L),))
result = lookup_master.mquery(
'vt_lookup', 'select address, user_id from address_user_extra2_map')
self.assertEqual(
result,
(('macondo', 5L),))
# Updating only one vindex
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra2 set address = :address where user_id = :user_id',
{'user_id': 5, 'address': 'yoknapatawpha'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select lastname, address from vt_user_extra2 where user_id = 5', {})
self.assertEqual(
result,
([('buendia', 'yoknapatawpha')], 1, 0,
[('lastname', self.string_type),
('address', self.string_type)]))
result = lookup_master.mquery(
'vt_lookup', 'select address, user_id from address_user_extra2_map')
self.assertEqual(
result,
(('yoknapatawpha', 5L),))
result = lookup_master.mquery(
'vt_lookup', 'select lastname, user_id from lastname_user_extra2_map')
self.assertEqual(
result,
(('buendia', 5L),))
# It works when you update to same value on unique index
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_user_extra2 set address = :address where user_id = :user_id',
{'user_id': 5, 'address': 'yoknapatawpha'})
self.assertEqual(result, ([], 0L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select lastname, address from vt_user_extra2 where user_id = 5', {})
self.assertEqual(
result,
([('buendia', 'yoknapatawpha')], 1, 0,
[('lastname', self.string_type),
('address', self.string_type)]))
result = lookup_master.mquery(
'vt_lookup', 'select lastname, user_id from lastname_user_extra2_map')
self.assertEqual(
result,
(('buendia', 5L),))
result = lookup_master.mquery(
'vt_lookup', 'select address, user_id from address_user_extra2_map')
self.assertEqual(
result,
(('yoknapatawpha', 5L),))
# you can find the record by either vindex
result = self.execute_on_master(
vtgate_conn,
'select lastname, address from vt_user_extra2'
' where lastname = "buendia"', {})
self.assertEqual(
result,
([('buendia', 'yoknapatawpha')], 1, 0,
[('lastname', self.string_type),
('address', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select lastname, address from vt_user_extra2'
' where address = "yoknapatawpha"', {})
self.assertEqual(
result,
([('buendia', 'yoknapatawpha')], 1, 0,
[('lastname', self.string_type),
('address', self.string_type)]))
def test_multicolvin(self):
# multicolvin tests a table with a multi column vindex
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_multicolvin (cola, colb, colc, kid) '
'values (:cola, :colb, :colc, :kid)',
{'kid': 5, 'cola': 'cola_value', 'colb': 'colb_value',
'colc': 'colc_value'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
# Updating both vindexes
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_multicolvin set cola = :cola, colb = :colb, colc = :colc'
' where kid = :kid',
{'kid': 5, 'cola': 'cola_newvalue', 'colb': 'colb_newvalue',
'colc': 'colc_newvalue'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select cola, colb, colc from vt_multicolvin where kid = 5', {})
self.assertEqual(
result,
([('cola_newvalue', 'colb_newvalue', 'colc_newvalue')], 1, 0,
[('cola', self.string_type),
('colb', self.string_type),
('colc', self.string_type)]))
result = lookup_master.mquery(
'vt_lookup', 'select cola from cola_map')
self.assertEqual(
result,
(('cola_newvalue',),))
result = lookup_master.mquery(
'vt_lookup', 'select colb, colc from colb_colc_map')
self.assertEqual(
result,
(('colb_newvalue', 'colc_newvalue'),))
# Updating only one vindex
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_multicolvin set colb = :colb, colc = :colc where kid = :kid',
{'kid': 5, 'colb': 'colb_newvalue2', 'colc': 'colc_newvalue2'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn, 'select colb, colc from vt_multicolvin where kid = 5', {})
self.assertEqual(
result,
([('colb_newvalue2', 'colc_newvalue2')], 1, 0,
[('colb', self.string_type),
('colc', self.string_type)]))
result = lookup_master.mquery(
'vt_lookup', 'select colb, colc from colb_colc_map')
self.assertEqual(
result,
(('colb_newvalue2', 'colc_newvalue2'),))
# Works when inserting multiple rows
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_multicolvin (cola, colb, colc, kid) '
'values (:cola0, :colb0, :colc0, :kid0),'
' (:cola1, :colb1, :colc1, :kid1)',
{'kid0': 6, 'cola0': 'cola0_value', 'colb0': 'colb0_value',
'colc0': 'colc0_value',
'kid1': 7, 'cola1': 'cola1_value', 'colb1': 'colb1_value',
'colc1': 'colc1_value'
})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
def test_aggr(self):
# test_aggr tests text column aggregation
vtgate_conn = get_connection()
vtgate_conn.begin()
# insert upper and lower-case mixed rows in jumbled order
result = self.execute_on_master(
vtgate_conn,
'insert into vt_aggr (id, name) values '
'(10, \'A\'), '
'(9, \'a\'), '
'(8, \'b\'), '
'(7, \'B\'), '
'(6, \'d\'), '
'(5, \'c\'), '
'(4, \'C\'), '
'(3, \'d\'), '
'(2, \'e\'), '
'(1, \'E\')',
{})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn, 'select sum(id), name from vt_aggr group by name', {})
values = [v1 for v1, v2 in result[0]]
print values
self.assertEqual(
[v1 for v1, v2 in result[0]],
[(Decimal('19')),
(Decimal('15')),
(Decimal('9')),
(Decimal('9')),
(Decimal('3'))])
def test_music(self):
# music is for testing owned lookup index
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_music_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, song) values (:user_id, :song)',
{'user_id': i, 'song': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
for x in xrange(count):
i = x+1
result = self.execute_on_master(
vtgate_conn,
'select user_id, id, song from vt_music where id = :id', {'id': i})
self.assertEqual(
result,
([(i, i, 'test %s' % i)], 1, 0,
[('user_id', self.int_type),
('id', self.int_type),
('song', self.string_type)]))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music (user_id, id, song) '
'values (:user_id0, :id0, :song0), (:user_id1, :id1, :song1)',
{'user_id0': 5, 'id0': 6, 'song0': 'test 6', 'user_id1': 7, 'id1': 7,
'song1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result,
((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test 4'), (7L, 7L, 'test 7')))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(
result,
((1L, 1L), (2L, 2L), (3L, 3L), (4L, 4L), (6L, 5L), (7L, 7L)))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 6, 'song': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music set song = :song where id = :id',
{'id': 4, 'song': 'test four'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (3L, 3L, 'test 3'),
(5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((4L, 4L, 'test four'), (7L, 7L, 'test 7')))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where id = :id',
{'id': 3})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music where user_id = :user_id',
{'user_id': 4})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(
result, ((1L, 1L, 'test 1'), (2L, 2L, 'test 2'), (5L, 6L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select user_id, id, song from vt_music')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
result = lookup_master.mquery(
'vt_lookup', 'select music_id, user_id from music_user_map')
self.assertEqual(result, ((1L, 1L), (2L, 2L), (6L, 5L), (7L, 7L)))
def test_music_extra(self):
# music_extra is for testing unonwed lookup index
vtgate_conn = get_connection()
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, user_id, artist) '
'values (:music_id, :user_id, :artist)',
{'music_id': 1, 'user_id': 1, 'artist': 'test 1'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'insert into vt_music_extra (music_id, artist) '
'values (:music_id0, :artist0), (:music_id1, :artist1)',
{'music_id0': 6, 'artist0': 'test 6', 'music_id1': 7,
'artist1': 'test 7'})
self.assertEqual(result, ([], 2L, 0L, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select music_id, user_id, artist '
'from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(
result, ([(6L, 5L, 'test 6')], 1, 0,
[('music_id', self.int_type),
('user_id', self.int_type),
('artist', self.string_type)]))
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test 6')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test 7'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 6, 'artist': 'test six'})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'update vt_music_extra set artist = :artist '
'where music_id = :music_id',
{'music_id': 7, 'artist': 'test seven'})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'), (6L, 5L, 'test six')))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((7L, 7L, 'test seven'),))
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 6})
self.assertEqual(result, ([], 1L, 0L, []))
result = self.execute_on_master(
vtgate_conn,
'delete from vt_music_extra where music_id = :music_id',
{'music_id': 7})
self.assertEqual(result, ([], 1L, 0L, []))
vtgate_conn.commit()
result = shard_0_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ((1L, 1L, 'test 1'),))
result = shard_1_master.mquery(
'vt_user', 'select music_id, user_id, artist from vt_music_extra')
self.assertEqual(result, ())
def test_main_seq(self):
vtgate_conn = get_connection()
# Initialize the sequence.
# TODO(sougou): Use DDL when ready.
vtgate_conn.begin()
self.execute_on_master(vtgate_conn, init_vt_main_seq, {})
vtgate_conn.commit()
count = 4
for x in xrange(count):
i = x+1
vtgate_conn.begin()
result = self.execute_on_master(
vtgate_conn,
'insert into main (val) values (:val)',
{'val': 'test %s' % i})
self.assertEqual(result, ([], 1L, i, []))
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn, 'select id, val from main where id = 4', {})
self.assertEqual(
result,
([(4, 'test 4')], 1, 0,
[('id', self.int_type),
('val', self.string_type)]))
# Now test direct calls to sequence.
result = self.execute_on_master(
vtgate_conn, 'select next 1 values from vt_main_seq', {})
self.assertEqual(
result,
([(5,)], 1, 0,
[('nextval', self.int_type)]))
def test_upsert(self):
vtgate_conn = get_connection()
# Create lookup entries for primary vindex:
# No entry for 2. upsert_primary is not owned.
# So, we need to pre-create entries that the
# subsequent will insert will use to compute the
# keyspace id.
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into upsert_primary(id, ksnum_id) values'
'(1, 1), (3, 3), (4, 4), (5, 5), (6, 6)',
{})
vtgate_conn.commit()
# Create rows on the main table.
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into upsert(pk, owned, user_id, col) values'
'(1, 1, 1, 0), (3, 3, 3, 0), (4, 4, 4, 0), (5, 5, 5, 0), (6, 6, 6, 0)',
{})
vtgate_conn.commit()
# Now upsert: 1, 5 and 6 should succeed.
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into upsert(pk, owned, user_id, col) values'
'(1, 1, 1, 1), (2, 2, 2, 2), (3, 1, 1, 3), (4, 4, 1, 4), '
'(5, 5, 5, 5), (6, 6, 6, 6) '
'on duplicate key update col = values(col)',
{})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select pk, owned, user_id, col from upsert order by pk',
{})
self.assertEqual(
result[0],
[(1, 1, 1, 1), (3, 3, 3, 0), (4, 4, 4, 0),
(5, 5, 5, 5), (6, 6, 6, 6)])
# insert ignore
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into upsert_primary(id, ksnum_id) values(7, 7)',
{})
vtgate_conn.commit()
# 1 will be sent but will not change existing row.
# 2 will not be sent because there is no keyspace id for it.
# 7 will be sent and will create a row.
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert ignore into upsert(pk, owned, user_id, col) values'
'(1, 1, 1, 2), (2, 2, 2, 2), (7, 7, 7, 7)',
{})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select pk, owned, user_id, col from upsert order by pk',
{})
self.assertEqual(
result[0],
[(1, 1, 1, 1), (3, 3, 3, 0), (4, 4, 4, 0),
(5, 5, 5, 5), (6, 6, 6, 6), (7, 7, 7, 7)])
def test_joins_subqueries(self):
vtgate_conn = get_connection()
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'insert into join_user (id, name) values (:id, :name)',
{'id': 1, 'name': 'name1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 1, 'email': 'email1'})
self.execute_on_master(
vtgate_conn,
'insert into join_user_extra (user_id, email) '
'values (:user_id, :email)',
{'user_id': 2, 'email': 'email2'})
self.execute_on_master(
vtgate_conn,
'insert into join_name_info (name, info) '
'values (:name, :info)',
{'name': 'name1', 'info': 'name test'})
vtgate_conn.commit()
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id',
{})
self.assertEqual(
result,
([(1L, 'name1', 1L, 'email1')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e where e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+1',
{})
self.assertEqual(
result,
([(1L, 'name1', 2L, 'email2')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u left join join_user_extra e on e.user_id = u.id+2',
{})
self.assertEqual(
result,
([(1L, 'name1', None, None)],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, e.user_id, e.email '
'from join_user u join join_user_extra e on e.user_id = u.id+2 '
'where u.id = 2',
{})
self.assertEqual(
result,
([],
0,
0,
[('id', self.int_type),
('name', self.string_type),
('user_id', self.int_type),
('email', self.string_type)]))
result = self.execute_on_master(
vtgate_conn,
'select u.id, u.name, n.info '
'from join_user u join join_name_info n on u.name = n.name '
'where u.id = 1',
{})
self.assertEqual(
result,
([(1L, 'name1', 'name test')],
1,
0,
[('id', self.int_type),
('name', self.string_type),
('info', self.string_type)]))
# test a cross-shard subquery
result = self.execute_on_master(
vtgate_conn,
'select id, name from join_user '
'where id in (select user_id from join_user_extra)',
{})
self.assertEqual(
result,
([(1L, 'name1')],
1,
0,
[('id', self.int_type),
('name', self.string_type)]))
vtgate_conn.begin()
self.execute_on_master(
vtgate_conn,
'delete from join_user where id = :id',
{'id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 1})
self.execute_on_master(
vtgate_conn,
'delete from join_user_extra where user_id = :user_id',
{'user_id': 2})
vtgate_conn.commit()
def test_insert_value_required(self):
vtgate_conn = get_connection()
try:
vtgate_conn.begin()
with self.assertRaisesRegexp(dbexceptions.DatabaseError,
'.*could not map NULL to a keyspace id.*'):
self.execute_on_master(
vtgate_conn,
'insert into vt_user_extra (email) values (:email)',
{'email': 'test 10'})
finally:
vtgate_conn.rollback()
def test_vindex_func(self):
vtgate_conn = get_connection()
result = self.execute_on_master(
vtgate_conn,
'select id, keyspace_id from hash_index where id = :id',
{'id': 1})
self.assertEqual(
result,
([('1', '\x16k@\xb4J\xbaK\xd6')],
1,
0,
[('id', self.varbinary_type),
('keyspace_id', self.varbinary_type)]))
def test_analyze_table(self):
vtgate_conn = get_connection()
self.execute_on_master(
vtgate_conn,
'use user',
{})
result = self.execute_on_master(
vtgate_conn,
'analyze table vt_user',
{})
self.assertEqual(
result[0],
[('vt_user.vt_user', 'analyze', 'status', 'OK')])
def test_transaction_modes(self):
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, single_db=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
with self.assertRaisesRegexp(
dbexceptions.DatabaseError, '.*multi-db transaction attempted.*'):
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor = vtgate_conn.cursor(
tablet_type='master', keyspace=None, writable=True, twopc=True)
cursor.begin()
cursor.execute(
'insert into twopc_user (user_id, val) values(1, \'val\')', {})
cursor.execute(
'insert into twopc_lookup (id, val) values(1, \'val\')', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [(1, 'val')])
cursor.begin()
cursor.execute('delete from twopc_user where user_id = 1', {})
cursor.execute('delete from twopc_lookup where id = 1', {})
cursor.commit()
cursor.execute('select user_id, val from twopc_user where user_id = 1', {})
self.assertEqual(cursor.fetchall(), [])
cursor.execute('select id, val from twopc_lookup where id = 1', {})
self.assertEqual(cursor.fetchall(), [])
def test_vtclient(self):
"""This test uses vtclient to send and receive various queries.
"""
# specify a good default keyspace for the connection here.
utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='user',
bindvars=[10, 'test 10'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 10']],
})
utils.vtgate.vtclient(
'update vt_user_extra set email=:v2 where user_id = :v1',
bindvars=[10, 'test 1000'])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], streaming=True, json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': [[u'10', u'test 1000']],
})
utils.vtgate.vtclient(
'delete from vt_user_extra where user_id = :v1', bindvars=[10])
out, _ = utils.vtgate.vtclient(
'select user_id, email from vt_user_extra where user_id = :v1',
bindvars=[10], json_output=True)
self.assertEqual(out, {
u'fields': [u'user_id', u'email'],
u'rows': None,
})
# check that specifying an invalid keyspace is propagated and triggers an
# error
_, err = utils.vtgate.vtclient(
'insert into vt_user_extra(user_id, email) values (:v1, :v2)',
keyspace='invalid',
bindvars=[10, 'test 10'],
raise_on_error=False)
self.assertIn('keyspace invalid not found in vschema', err)
def test_vtctl_vtgate_execute(self):
"""This test uses 'vtctl VtGateExecute' to send and receive various queries.
"""
utils.vtgate.execute(
'insert into vt_user_extra(user_id, email) values (:user_id, :email)',
bindvars={'user_id': 11, 'email': 'test 11'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Original row: %s', str(qr))
self.assertEqual(qr['fields'][0]['name'], 'user_id')
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
# test using exclude_field_names works.
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11}, execute_options='included_fields:TYPE_ONLY ')
logging.debug('Original row: %s', str(qr))
self.assertNotIn('name', qr['fields'][0])
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 11')
utils.vtgate.execute(
'update vt_user_extra set email=:email where user_id = :user_id',
bindvars={'user_id': 11, 'email': 'test 1100'})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
logging.debug('Modified row: %s', str(qr))
self.assertEqual(len(qr['rows']), 1)
v = qr['rows'][0][1]
self.assertEqual(v, 'test 1100')
utils.vtgate.execute(
'delete from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
qr = utils.vtgate.execute(
'select user_id, email from vt_user_extra where user_id = :user_id',
bindvars={'user_id': 11})
self.assertEqual(len(qr['rows'] or []), 0)
def test_split_query(self):
"""This test uses 'vtctl VtGateSplitQuery' to validate the Map-Reduce APIs.
We want to return KeyRange queries.
"""
sql = 'select id, name from vt_user'
s = utils.vtgate.split_query(sql, 'user', 2)
self.assertEqual(len(s), 2)
first_half_queries = 0
second_half_queries = 0
for q in s:
self.assertEqual(q['query']['sql'], sql)
self.assertIn('key_range_part', q)
self.assertEqual(len(q['key_range_part']['key_ranges']), 1)
kr = q['key_range_part']['key_ranges'][0]
eighty_in_base64 = 'gA=='
is_first_half = 'start' not in kr and kr['end'] == eighty_in_base64
is_second_half = 'end' not in kr and kr['start'] == eighty_in_base64
self.assertTrue(is_first_half or is_second_half,
'invalid keyrange %s' % str(kr))
if is_first_half:
first_half_queries += 1
else:
second_half_queries += 1
self.assertEqual(first_half_queries, 1, 'invalid split %s' % str(s))
self.assertEqual(second_half_queries, 1, 'invalid split %s' % str(s))
def test_vschema_vars(self):
"""Tests the variables exported by vtgate.
This test needs to run as the last test, as it depends on what happened
previously.
"""
v = utils.vtgate.get_vars()
self.assertIn('VtgateVSchemaCounts', v)
self.assertIn('Reload', v['VtgateVSchemaCounts'])
self.assertGreater(v['VtgateVSchemaCounts']['Reload'], 0)
self.assertNotIn('WatchError', v['VtgateVSchemaCounts'], 0)
self.assertNotIn('Parsing', v['VtgateVSchemaCounts'])
if __name__ == '__main__':
utils.main()
|
#encoding:utf-8
import pymysql
import requests
# 打开数据库连接
db = pymysql.connect("rm-2ze6g25687k3mjso64o.mysql.rds.aliyuncs.com",
"shaozi2016", "XLyNF4I0TQA9YXZf", "db_test")
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
query_sql='select content from sz_message where mobile="17713162100" order by created_at desc limit 1'
cursor.execute(query_sql)
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print(data[0],type(data[0]))
# print(data)
# print(type(data))
# 关闭数据库连接
db.close()
@classmethod
class Get_code():
code=getattr('Get_code','code',data[0])
# if __name__ == '__main__':
# print(Get_code,type(Get_code))
def login():
url='https://api.shaoziketang.com/wap/v2.8/data/login'
data={"mobile":"15312362160","rand":Get_code.code}
res=requests.post(url,data)
print(res.json())
if __name__ == '__main__':
login()
|
import csv
import sys
from urllib import request, error
import shutil
import json
import os
import ssl
def import_species_list(species):
SPECIES_LIST = {}
fields = ["num","name"]
with open(species) as csvfile:
f = csv.DictReader(csvfile, fields)
for row in f:
SPECIES_LIST[int(row["num"])] = row["name"].replace('"','')
return SPECIES_LIST
def main():
# Error checking - argv input
# if len(sys.argv) != 2:
# return
SPECIES_LIST = import_species_list(sys.argv[1])
for idx in range(1,len(SPECIES_LIST)):
print(SPECIES_LIST[idx])
try:
download(SPECIES_LIST[idx], idx)
except KeyError:
print(f"No listings for {SPECIES_LIST[idx]}.")
# adapted from https://github.com/ntivirikin/xeno-canto-py.git by Nazariy Tivirikin
# Disable certificate verification
ssl._create_default_https_context = ssl._create_unverified_context
def metadata(filt):
page = 1
page_num = 1
filt_path = list()
filt_url = list()
print("Retrieving metadata...")
# Scrubbing input for file name and url
for f in filt:
filt_url.append(f.replace(' ', '%20'))
filt_path.append((f.replace(' ', '')).replace(':', '_').replace("\"",""))
path = 'dataset/metadata/' + ''.join(filt_path)
# Overwrite metadata query folder
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
# Save all pages of the JSON response
while page < page_num + 1:
url = 'https://www.xeno-canto.org/api/2/recordings?query={0}&page={1}'.format('%20'.join(filt_url), page)
try:
r = request.urlopen(url)
except error.HTTPError as e:
print('An error has occurred: ' + str(e))
exit()
print("Downloading metadate page " + str(page) + "...")
data = json.loads(r.read().decode('UTF-8'))
filename = path + '/page' + str(page) + '.json'
with open(filename, 'w') as saved:
json.dump(data, saved)
page_num = data['numPages']
page += 1
# Return the path to the folder containing downloaded metadata
return path
# Retrieves metadata and audio recordings
# adapted from https://github.com/ntivirikin/xeno-canto-py.git by Nazariy Tivirikin
def download(filt, species_number):
page = 1
page_num = 1
print("Downloading all recordings for query...")
# Retrieve metadata to parse for download links
path = metadata(filt)
with open(path + '/page' + str(page) + ".json", 'r') as jsonfile:
data = jsonfile.read()
data = json.loads(data)
page_num = data['numPages']
print("Found " + str(data['numRecordings']) + " recordings for given query, downloading...")
while page < page_num + 1:
# Pulling species name, track ID, and download link for naming and retrieval
# while i < range(len)
for i in range(len((data['recordings']))):
url = 'http:' + data['recordings'][i]['file']
name = (data['recordings'][i]['en']).replace(' ', '')
track_id = data['recordings'][i]['id']
# altered from original code - data saved in numbered directory in the
# 'data' directory. the number is generated from the existing bird species
# loaded and mapped into the BIRDS.csv (and loaded into the global variable BIRDS).
audio_path = 'data/' + species_number + '/'
audio_file = track_id + '.mp3'
if not os.path.exists(audio_path):
os.makedirs(audio_path)
# If the file exists in the directory, we will skip it
elif os.path.exists(audio_path + audio_file):
continue
print("Downloading " + track_id + ".mp3...")
request.urlretrieve(url, audio_path + audio_file)
page += 1
if __name__=="__main__":
main()
|
# Generated by Django 3.0.4 on 2020-04-19 12:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0008_auto_20200419_1502'),
]
operations = [
migrations.AlterField(
model_name='question',
name='answer',
field=models.TextField(max_length=1000, unique=True, verbose_name='Ответ'),
),
migrations.AlterField(
model_name='question',
name='question',
field=models.TextField(max_length=500, unique=True, verbose_name='Вопрос'),
),
]
|
"""Package level common values"""
import os
DEBUG = False
BASE_URL = 'http://localhost:5000/api/' if DEBUG else 'http://auacm.com/api/'
try:
session = open(
os.path.join(os.path.expanduser('~'), '.auacm_session.txt'),
'r').readline().strip()
except IOError:
session = ''
# pylint: disable=anomalous-backslash-in-string
logo = """
/$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$ /$$
/$$__ $$| $$ | $$ /$$__ $$ /$$__ $$| $$$ /$$$
| $$ \ $$| $$ | $$| $$ \ $$| $$ \__/| $$$$ /$$$$
| $$$$$$$$| $$ | $$| $$$$$$$$| $$ | $$ $$/$$ $$
| $$__ $$| $$ | $$| $$__ $$| $$ | $$ $$$| $$
| $$ | $$| $$ | $$| $$ | $$| $$ $$| $$\ $ | $$
| $$ | $$| $$$$$$/| $$ | $$| $$$$$$/| $$ \/ | $$
|__/ |__/ \______/ |__/ |__/ \______/ |__/ |__/
"""
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 22 00:22:04 2019
@author: HP
"""
T=int(input())
while(T):
n,m=[int(x) for x in input().split()]
A=[]
for i in range(n):
B=[int(x) for x in input().split()]
A.append(B)
DP=[[0 for i in range(m)] for j in range(n)]
for i in range(m):
if i<m-1:
DP[0][i]=DP[0][i-1]+A[1][i]+A[0][i+1]
else:
DP[0][i]=DP[0][i-1]+A[1][i]
for i in range(n):
if i<n-1:
DP[i][0]=DP[i-1][0]+A[i+1][0]+A[i][1]
else:
DP[i][0]=DP[i-1][0]+A[i][1]
for i in range(1,n):
for j in range(1,m):
if i<n-1 and j<m-1:
DP[i][j]=min(DP[i-1][j],DP[i][j-1])+A[i+1][j]+A[i][j+1]
elif i<n-1:
DP[i][j]=min(DP[i-1][j],DP[i][j-1])+A[i+1][j]
elif j<m-1:
DP[i][j]=min(DP[i-1][j],DP[i][j-1])+A[i][j+1]
else:
DP[i][j]=min(DP[i-1][j],DP[i][j-1])
print(DP)
T=T-1
|
#-*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
catering_sale = '../data/catering_sale.xls'
data = pd.read_excel(catering_sale, index_col=u'日期')
plt.rcParams['font.sans-serif'] = ['SimHei'] #正常显示中文
plt.rcParams['axes.unicode_minus'] = False #正常显示正负号
plt.figure()
# box_line = plt.boxplot(return_type='dict')
box_line = data.boxplot(return_type='dict')
x = box_line['fliers'][0].get_xdata()
y = box_line['fliers'][0].get_ydata()
y.sort()
#annotate
for i in range(len(x)):
if i > 0:
plt.annotate(y[i], xy=(x[i],y[i]), xytext=(x[i]+0.05-0.8/(y[i]-y[i-1]), y[i]))
else:
plt.annotate(y[i], xy=(x[i],y[i]), xytext=(x[i]+0.08,y[i]))
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 00:53:28 2020
@author: lifecell
"""
print(7+8)
7+8
|
from django.conf import settings
from django.http import HttpResponse
from django.utils import timezone
from django.views import View
from osmcal import views
from pytz import timezone as tzp
from timezonefinder import TimezoneFinder
from . import serializers
from .decorators import ALLOWED_HEADERS, cors_any, language_from_header
JSON_CONTENT_TYPE = (
"application/json; charset=" + settings.DEFAULT_CHARSET
) # This shall be utf-8, otherwise we're not friends anymore.
tf = TimezoneFinder()
class CORSOptionsMixin(object):
def options(self, request, *args, **kwargs):
r = HttpResponse()
r["Access-Control-Allow-Headers"] = ", ".join(ALLOWED_HEADERS)
r["Access-Control-Allow-Origin"] = "*"
return r
class EventList(CORSOptionsMixin, views.EventListView):
def get_serializer(self):
return serializers.EventsSerializer
@cors_any
@language_from_header
def get(self, request, *args, **kwargs):
es = self.get_serializer()(self.get_queryset(request.GET), context={"request": request})
return HttpResponse(es.json, content_type=JSON_CONTENT_TYPE)
class PastEventList(EventList):
RESULT_LIMIT = 20
def filter_queryset(self, qs, **kwargs):
return qs.filter(start__lte=timezone.now()).order_by("-local_start")
def get_queryset(self, *args, **kwargs):
return super().get_queryset(*args, **kwargs)[: self.RESULT_LIMIT]
class Timezone(View):
def get(self, request, *args, **kwargs):
lat = float(request.GET["lat"])
lon = float(request.GET["lon"])
tz = tf.timezone_at(lng=lon, lat=lat)
if tz is None:
return HttpResponse("", status=400)
return HttpResponse(tzp(tz))
|
#!/usr/bin/env python3
'''
python_package_template setup config
'''
import os
from setuptools import setup
here = os.path.dirname(__file__)
about = {}
with open(os.path.join(here, 'python_package_template', '__about__.py')) as fobj:
exec(fobj.read(), about)
setup(
name='python_package_template',
version=about['__version__'],
packages = [
'python_package_template',
],
install_requires = [
'numpy',
'requests',
],
include_package_data = True,
author = about['__author__'],
author_email = about['__author_email__'],
maintainer = about['__author__'],
maintainer_email = about['__author_email__'],
description = about['__description__'],
url = about['__url__']
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import getopt
import json
import logging
from analyse_immo.factory import Factory
from analyse_immo.database import Database
from analyse_immo.rendement import Rendement
from analyse_immo.rapports.rapport import generate_rapport
__NAME = 'Analyse Immo'
__VERSION = '2.0.0'
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
__INPUT_FILEPATH = os.path.join(__location__, 'data', 'input_2020.json')
__OUTPUT_FILEPATH = os.path.join(__location__, 'analyse_immo.log')
def main(argv):
configure_logger()
logging.info('{} {}\n'.format(__NAME, __VERSION))
inputfile = parse_args(argv)
# If path to input file is provided, create output log file in the same folder
if inputfile:
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(inputfile)))
filepath = os.path.join(location, 'analyse_immo.log')
add_logger_file_handler(filepath)
input_data = load_file(inputfile)
achat_data = input_data['achat']
defaut_data = input_data['defaut']
lots_data = input_data['lots']
credit_data = input_data['credit']
impot_data = input_data['impot']
database = Database()
defaut = Factory.make_defaut(defaut_data)
bien_immo = Factory.make_bien_immo(achat_data, lots_data, defaut)
credit = Factory.make_credit(credit_data, bien_immo)
rendement = Rendement(bien_immo, credit)
# Impot
annee_achat = achat_data['annee']
credit_duree = credit_data['duree_annee']
# IRPP + 2044
irpp_2044_list = list()
for i_annee in range(credit_duree):
annee_revenu = annee_achat + i_annee
irpp = Factory.make_irpp(database, impot_data, annee_revenu)
irpp.annexe_2044 = Factory.make_annexe_2044(database, bien_immo, credit, i_annee + 1)
irpp_2044_list.append(irpp)
# IRPP + Micro foncier
irpp_micro_foncier_list = list()
for i_annee in range(credit_duree):
annee_revenu = annee_achat + i_annee
irpp = Factory.make_irpp(database, impot_data, annee_revenu)
irpp.micro_foncier = Factory.make_micro_foncier(database, bien_immo)
irpp_micro_foncier_list.append(irpp)
# Rapport
generate_rapport(bien_immo, credit, annee_achat, irpp_2044_list, irpp_micro_foncier_list, rendement)
def parse_args(argv):
inputfile = None
try:
opts, _ = getopt.getopt(argv, 'i:h', [])
except getopt.GetoptError:
print_help()
quit()
for opt, arg in opts:
if opt == '-h':
print_help()
quit()
elif opt in ('-i'):
inputfile = arg
return inputfile
def print_help():
print('-i input file')
print('-h help')
def configure_logger():
'''
write to console, simple message with log level info
write to file, formatted message with log level debug
'''
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleFormatter = logging.Formatter('%(message)s')
consoleHandler.setFormatter(consoleFormatter)
logger.addHandler(consoleHandler)
fileHandler = logging.FileHandler(__OUTPUT_FILEPATH, mode='w')
fileHandler.setLevel(logging.DEBUG)
fileFormatter = logging.Formatter('%(message)s')
fileHandler.setFormatter(fileFormatter)
logger.addHandler(fileHandler)
def add_logger_file_handler(filepath):
import logging.handlers
logger = logging.getLogger()
fileHandler = logging.handlers.TimedRotatingFileHandler(filepath, when='S')
fileHandler.setLevel(logging.DEBUG)
fileFormatter = logging.Formatter('%(message)s')
fileHandler.setFormatter(fileFormatter)
logger.addHandler(fileHandler)
def load_file(inputfile):
if not inputfile:
inputfile = __INPUT_FILEPATH
with open(inputfile, 'r') as file:
user_input = json.load(file)
return user_input
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train = pd.read_csv("kospi200test_r.csv")
end_prices = train['종가']
#normalize window
x_len = 40
y_len = 10
sequence_length = x_len + y_len
result = []
for index in range(len(end_prices) - sequence_length + 1):
idk = []
idk[:] = end_prices[index: index + sequence_length]
result.append(idk)
#normalize data
def normalize_windows(data):
normalized_data = []
for window in data:
normalized_window = [((float(p) / float(window[0])) - 1) for p in window]
normalized_data.append(normalized_window)
return np.array(normalized_data)
# norm_result = normalize_windows(result)
norm_result = np.array(result)
# split train and test data
row = int(round(norm_result.shape[0] * 0.9))
train = norm_result[:row, :]
np.random.shuffle(train)
x_train = train[:, : -y_len]
# x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
y_train = train[:, -y_len: ]
x_test = norm_result[row:, :-y_len]
# x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
y_test = norm_result[row:, -y_len: ]
print(x_test.shape)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
print(x_test[0])
print(y_test[0])
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Activation, Conv1D, Embedding, MaxPooling1D
model = Sequential()
model.add(Embedding(5000, 100))
model.add(Dropout(0.5))
model.add(Conv1D(64, 5, padding='valid', activation='relu', strides=1))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(55))
model.add(Dense(10))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
model.summary()
model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=int(x_len/5), epochs=20)
model.save("model2.h5")
pred = model.predict(x_test)
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(y_test, label='True')
ax.plot(pred, label='Prediction')
ax.legend()
plt.show()
# result = []
# result[:] = end_prices[-seq_len:]
# result = np.array(result)
# x_test = result.reshape(1, -1, 1)
# print('역정규화 개시')
# un_norm = result[0]
# pred_today = (pred[-1]+1) * un_norm
# print("last 5 days:\n ", x_test)
# print("prediction: ", pred_today)
|
import json
from celery import shared_task
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Count
from responses.models import NewsOrgType, Response, Tool, ToolTask
from responses.utils.aws import defaults, get_bucket
@shared_task(acks_late=True)
def add_survey_response(response):
news_org_type = NewsOrgType.objects.get_or_create(
name=response['newsOrgType']
)[0]
tools_used = [Tool.objects.get_or_create(
name=tool
)[0] for tool in response["toolsUsed"]]
most_important_tool = Tool.objects.get_or_create(
name=response['mostImportantTool']
)[0]
tasks_used = [ToolTask.objects.get_or_create(
name=task
)[0] for task in response["tasksUsed"]]
obj = Response.objects.create(
job_title=response['jobTitle'],
job_duties=response['jobDuties'],
news_org_name=response['newsOrgName'],
news_org_type=news_org_type,
news_org_age=response['newsOrgAge'],
most_important_tool=most_important_tool,
tool_satisfaction=response['toolSatisfaction'],
tool_recommendation=response['toolRecommendation'],
tool_recommendation_why_not=response[
'toolRecommendationWhyNot'
],
stopped_using=response['stoppedUsing'],
why_stopped_using=response['whyStoppedUsing'],
org_struggle=response['orgStruggles'],
org_struggle_other=response['orgStrugglesOther'],
org_comparison=response['orgComparison'],
org_communication=response['orgCommunication'],
org_sustainability=response['orgSustainability'],
talk_more=True if response['talkMore'] == 'Yes' else False,
email=response['email']
)
obj.tools_used.set(tools_used)
obj.tasks_used.set(tasks_used)
@shared_task(acks_late=True)
def publish_survey_data():
responses = Response.objects.all()
tools = Tool.objects.all()
tasks = ToolTask.objects.all()
time_series = [
response.date_submitted.isoformat() for response in responses
]
news_org_type_counts = list(
responses.values('news_org_type__name').annotate(
num=Count('news_org_type__name')
)
)
news_org_name_counts = list(responses.values('news_org_name').annotate(
num=Count('news_org_name')
))
org_struggle_counts = list(responses.values('org_struggle').annotate(
num=Count('org_struggle')
))
org_comparison_counts = list(responses.values('org_comparison').annotate(
num=Count('org_comparison')
))
org_communication_counts = list(responses.values('org_communication').annotate(
num=Count('org_communication')
))
org_sustainability_counts = list(responses.values('org_sustainability').annotate(
num=Count('org_sustainability')
))
number_of_tools = tools.count()
tool_counts = list(tools.values('name').annotate(num=Count('response')))
task_counts = list(tasks.values('name').annotate(num=Count('response')))
data = {
"time_series": time_series,
"org_struggle_counts": org_struggle_counts,
"org_comparison_counts": org_comparison_counts,
"org_communication_counts": org_communication_counts,
"org_sustainability_counts": org_sustainability_counts,
"news_org_type_counts": news_org_type_counts,
"news_org_name_counts": news_org_name_counts,
"number_of_tools": number_of_tools,
"tool_counts": tool_counts,
"task_counts": task_counts
}
# with open('data.json', 'w') as f:
# json.dump(data, f, cls=DjangoJSONEncoder)
key = "responses/data.json"
bucket = get_bucket()
bucket.put_object(
Key=key,
ACL=defaults.ACL,
Body=json.dumps(data, cls=DjangoJSONEncoder),
CacheControl=defaults.CACHE_HEADER,
ContentType="application/json",
)
|
#!/usr/bin/env python
# encoding: utf-8
from datetime import datetime
TIMESTAMP = datetime.today().strftime('%Y%m%d-%H%M%S')
|
import abc
from typing import List
from app.domainmodel.user import User
from app.domainmodel.movie import Movie
from app.domainmodel.actor import Actor
from app.domainmodel.genre import Genre
from app.domainmodel.review import Review
from app.domainmodel.director import Director
repository_instance = None
class RepositoryException(Exception):
def __init__(self, message=None):
pass
class AbstractRepository(abc.ABC):
@abc.abstractmethod
def add_user(self, user: User):
"""" Adds a User to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_user(self, username) -> User:
""" Returns the User named username from the repository.
If there is no User with the given username, this method returns None.
"""
raise NotImplementedError
@abc.abstractmethod
def add_movie(self, movie: Movie):
""" Adds an Movie to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_movie(self, id: int) -> Movie:
""" Returns Movie with id from the repository.
If there is no Movie with the given id, this method returns None.
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies_by_id(self, id_list):
""" Returns a list of Movies, whose ids match those in id_list, from the repository.
If there are no matches, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies_id_all(self):
""" Returns a list of ids representing all Movies.
If there are no Movies, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_number_of_movies(self):
""" Returns the number of Movies in the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_movies_for_genre(self, genre_name: str):
""" Returns a list of ids representing Movies that are tagged by genre_name.
If there are no Movies that are tagged by genre_name, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies_for_director(self, director_name: str):
""" Returns a list of ids representing Movies that are tagged by director name.
If there are no Movies that are tagged by director name, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies_for_actor(self, actor_name: str):
""" Returns a list of ids representing Movies that are tagged by actor name.
If there are no Movies that are tagged by actor name, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def add_genre(self, genre: Genre):
""" Adds a Genre to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_genres(self) -> List[Genre]:
""" Returns the Genres stored in the repository. """
raise NotImplementedError
@abc.abstractmethod
def add_actor(self, actor: Actor):
""" Adds a Actor to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_actors(self) -> List[Actor]:
""" Returns the Actors stored in the repository. """
raise NotImplementedError
@abc.abstractmethod
def add_director(self, director: Director):
""" Adds a Director to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_directors(self) -> List[Director]:
""" Returns the Director stored in the repository. """
raise NotImplementedError
|
import math
import random
import time
import matplotlib.pyplot as plt
from pyrep import PyRep
from pyrep.objects.shape import Shape
class RRT:
'''
Define a node class that represents a node in our RRT
'''
class Node:
def __init__(self, x, y, theta):
self.x = x
self.y = y
self.theta = theta
self.parent = None
self.path = []
def distance_to(self, node):
return math.sqrt((self.x - node.x)**2 + (self.y - node.y)**2)
rrt_tree = []
def __init__(self, pr, shape, start_pose, target_pose, limits_x, limits_y):
random.seed()
self.pr = pr
self.shape = shape
self.start_pose = start_pose
self.target_pose = target_pose
self.rrt_tree.append(self.Node(start_pose[0], start_pose[1], start_pose[2]))
self.min_x, self.max_x = limits_x
self.min_y, self.max_y = limits_y
self.max_iterations = 2000
self.edge_length = 0.05
self.path_resolution = 0.01
# TODO: dont forget to move the shape back to start pose
def solve(self):
goal = self.Node(self.target_pose[0], self.target_pose[1], self.target_pose[2])
for i in range(self.max_iterations):
q_rand = self.sample_free()
q_near = self.getNearestNeighbour(q_rand)
q_new = self.steer(q_near, q_rand)
if i % 20 == 0:
self.draw_graph(q_rand)
if q_new != None and self.check_collision(q_new.x, q_new.y, q_new.theta) == False:
self.rrt_tree.append(q_new)
if q_new.distance_to(goal) <= self.edge_length:
final_node = self.steer(q_new, goal)
if final_node != None and self.check_collision(final_node.x, final_node.y, final_node.theta) == False:
self.rrt_tree.append(final_node)
return self.get_path()
print("ERROR: NO PATH FOUND")
return None
def check_collision(self, x, y, theta):
# Move the shape so that we can test collision
self.shape.set_position([x, y, self.shape.get_position()[2]])
self.shape.set_orientation([0, 0, theta])
return self.shape.check_collision()
# Goal region biasing
def sample_free(self):
i = random.random()
# 10% of the time put the goal as the random node
if i >= 0.9:
return self.Node(self.target_pose[0], self.target_pose[1], self.target_pose[2])
while True:
rand_x, rand_y, rand_theta = self.sample()
if(self.check_collision(rand_x, rand_y, rand_theta) == False):
return self.Node(rand_x, rand_y, rand_theta)
def sample(self):
rand_x = random.uniform(self.min_x, self.max_x)
rand_y = random.uniform(self.min_y, self.max_y)
rand_theta = random.uniform(math.radians(-180), math.radians(180))
return (rand_x, rand_y, rand_theta)
# Adapted from https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathPlanning/RRT/rrt.py
def steer(self, from_node, to_node):
distance, required_angle = self.calculate_motion(from_node, to_node)
new_node = self.Node(from_node.x, from_node.y, from_node.theta)
new_node.path = [(new_node.x, new_node.y, new_node.theta)]
extend_length = min(distance, self.edge_length)
n_expand = math.floor(extend_length / self.path_resolution)
angle_to_rotate_by = self.normalizePi(required_angle - new_node.theta)
# new_node.theta = required_angle
if n_expand != 0:
angle_step = angle_to_rotate_by / n_expand
for _ in range(n_expand):
new_node.x += self.path_resolution * math.cos(required_angle)
new_node.y += self.path_resolution * math.sin(required_angle)
new_node.theta += angle_step
if self.check_collision(new_node.x, new_node.y, new_node.theta):
return None
new_node.path.append((new_node.x, new_node.y, new_node.theta))
distance, _ = self.calculate_motion(new_node, to_node)
if distance <= self.path_resolution:
new_node.path.append((to_node.x, to_node.y, self.normalizePi(required_angle)))
new_node.parent = from_node
return new_node
# Returns the node in the rrt_tree that has the minimum distance to query_node
def getNearestNeighbour(self, query_node):
return min(self.rrt_tree, key=lambda x: x.distance_to(query_node))
def calculate_motion(self, from_node, to_node):
dx = to_node.x - from_node.x
dy = to_node.y - from_node.y
distance = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
return distance, theta
# Keep any angle we move through between -pi and pi
def normalizePi(self, val):
while val < -math.pi:
val += 2*math.pi
while val > math.pi:
val -= 2*math.pi
return val
def get_path(self):
cur_node = self.rrt_tree[-1]
path = [(cur_node.x, cur_node.y, cur_node.theta)]
for i in cur_node.path[::-1]:
path.append(i)
while cur_node.parent != None:
cur_node = cur_node.parent
path.append((cur_node.x, cur_node.y, cur_node.theta))
for i in cur_node.path[::-1]:
path.append(i)
return path[::-1]
def draw_graph(self, rnd=None):
plt.clf()
if rnd is not None:
plt.plot(rnd.x, rnd.y, "^k")
for node in self.rrt_tree:
if node.parent:
x = [i[0] for i in node.path]
y = [i[1] for i in node.path]
plt.plot(x, y, "-g")
plt.plot(node.x, node.y, "o")
plt.plot(self.start_pose[0], self.start_pose[1], "xr")
plt.plot(self.target_pose[0], self.target_pose[1], "xr")
plt.axis("equal")
# plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
|
class Book:
title = ""
author = ""
code = 0
def init(self, title, author, code):
self.title = title
self.author = author
self.code = code
def print(self):
print(self.title)
print(self.author)
print(self.code)
|
from django.db import models
from django.db.models.fields import TextField
from .base_modles import Timestamp
from .card_models import CardInfo
class CardComment(Timestamp):
for_card = models.ForeignKey(CardInfo,on_delete=models.CASCADE)
name = models.CharField(max_length=515,null=True,blank=True)
body = models.TextField()
def __str__(self):
return self.name
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# plot_data.py: plot all science run interruption related data #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 09, 2021 #
# #
#################################################################################
import math
import re
import sys
import os
import string
import numpy as np
import Chandra.Time
#
#--- pylab plotting routine related modules
#
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Interrupt/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a privte folder to python directory
#
sys.path.append(bin_dir)
#
#--- Science Run Interrupt related funcions shared
#
import interrupt_suppl_functions as itrf
#
#--- Ephin/HRC ploting routines
#
import plot_ephin as ephin
#
#---- GOES ploting routiens
#
import plot_goes as goes
#
#---- ACE plotting routines
#
import plot_ace_rad as ace
#---------------------------------------------------------------------------------------------
#--- plot_data: plot all data related to the science run interruption (EPHIN/HRC/GOES/ACE)----
#---------------------------------------------------------------------------------------------
def plot_data(ifile):
"""
plot all data related to the science run interruption (NOAA/EPHIN/GOES)
input: file --- input file name. if it is not given, the script will ask
output: <plot_dir>/*.png --- ace data plot
<goes_dir>/*.png --- goes data plot
"""
if ifile == '':
ifile = raw_input('Please put the intrrupt timing list: ')
}
data = mcf.read_data_file(ifile)
for ent in data:
atemp = re.split('\s+|\t+', ent)
event = atemp[0]
start = atemp[1]
stop = atemp[2]
#
#--- plot Ephin/HRC data (after 2014, only hrc is plotted)
#
ephin.plot_ephin_main(event, start, stop)
#
#---- plot GOES data
#
goes.plot_goes_main(event, start, stop)
#
#---- plot ACE data
#
ace.start_ace_plot(event, start, stop)
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) == 2:
ifile = sys.argv[1]
else:
ifile = ''
plot_data(ifile)
|
#!/usr/bin/python3
## Get image data
## to the "imu_data" topic
import rospy
from sensor_msgs.msg import Image
from geometry_msgs.msg import Pose
# Dependencies for estimation
import numpy as np
from scipy.signal import find_peaks, butter, filtfilt
class PosEstimator():
def __init__(self):
self.topic_name_camera = rospy.get_param("topic_name_image", "camera/image")
self.topic_name_pos = rospy.get_param("topic_name_position", "position")
self.frame_name = rospy.get_param("frame_name", "camera")
# Parameters for estimation
self.scan_line = rospy.get_param("~scan_line", 170)
self.peak_thres = rospy.get_param("~peak_threshold", 170)
self.track_width = rospy.get_param("~track_width", 600)
self.camera_center = rospy.get_param("~camera_center", 320)
self.butter_b, self.butter_a = butter(3, 0.1)
def start(self):
self.sub_camera = rospy.Subscriber(self.topic_name_camera, Image, self.camera_callback)
self.pub_pos = rospy.Publisher(self.topic_name_pos, Pose, queue_size=10)
rospy.spin()
def camera_callback(self, img_msg):
width = img_msg.width
height = img_msg.height
I = np.frombuffer(img_msg.data, dtype=np.uint8).reshape((width, height))
rospy.loginfo("Image with shape " + str(I.shape) + " received.")
line_pos = self.camera_center - self.pos_estimate(I)
rospy.loginfo("Estimated line_pos = " + str(line_pos))
pos_msg = Pose()
pos_msg.position.x = line_pos
self.pub_pos.publish(pos_msg)
def pos_estimate(self, I):
# Select a horizontal line in the middle of the image
L = I[self.scan_line, :]
# Smooth the transitions so we can detect the peaks
Lf = filtfilt(self.butter_b, self.butter_a, L)
# Find peaks which are higher than 0.5
peaks, p_val = find_peaks(Lf, height=self.peak_thres)
line_pos = self.camera_center
line_left = None
line_right = None
peaks_left = peaks[peaks < self.camera_center]
peaks_right = peaks[peaks > self.camera_center]
# Peaks on the left
if peaks_left.size:
line_left = peaks_left.max()
# Peaks on the right
if peaks_right.size:
line_right = peaks_right.min()
# Evaluate the line position
if line_left and line_right:
line_pos = (line_left + line_right ) // 2
self.track_width = line_right - line_left
elif line_left and not line_right:
line_pos = line_left + int(self.track_width / 2)
elif not line_left and line_right:
line_pos = line_right - int(self.track_width / 2)
else:
rospy.loginfo("no line")
return line_pos
if __name__ == "__main__":
rospy.init_node("pos_estimation")
estimator = PosEstimator()
try:
estimator.start()
except rospy.ROSInterruptException:
pass
|
import sys
import random
import time
import config
import py3buddy
import globals
colorlist = {"NOCOLOUR": py3buddy.NOCOLOUR,
"RED": py3buddy.RED,
"BLUE": py3buddy.BLUE,
"GREEN": py3buddy.GREEN,
"CYAN": py3buddy.CYAN,
"YELLOW": py3buddy.YELLOW,
"PURPLE":py3buddy.PURPLE,
"WHITE": py3buddy.WHITE}
def init_buddy():
buddy_config = {'productid': int(config.productid), 'reset_position': config.reset_position}
# initialize an iBuddy and check if a device was found and is accessible
ibuddy = py3buddy.iBuddy(buddy_config)
globals.ibuddy = ibuddy
if ibuddy.dev is None:
print("No iBuddy found, or iBuddy not accessible", file=sys.stderr)
sys.exit(1)
globals.ibuddy.wiggle(globals.wingle)
globals.ibuddy.wings(globals.wing)
print(bool(globals.heart))
globals.ibuddy.toggleheart(eval(globals.heart))
globals.ibuddy.setcolour(get_color(globals.color))
globals.ibuddy.sendcommand()
print(get_status())
def get_color(color):
for i in colorlist:
#print (i)
#print(colorlist[i])
if i is color:
return colorlist[i]
def get_status():
#print("status: %s %s %s %s" %(globals.wingle,globals.wing,globals.heart,globals.color))
#ws_client.notify_state()
return "status: %s %s %s %s" %(globals.wingle,globals.wing,globals.heart,globals.color)
def get_status_json():
return
def panic(paniccount):
# a demo version to show some of the capabilities of the iBuddy
# first reset the iBuddy
globals.ibuddy.reset()
for i in range(0, paniccount):
# set the wings to high
globals.ibuddy.wings('high')
# turn on the heart LED
globals.ibuddy.toggleheart(True)
# pick a random colour for the head LED
globals.ibuddy.setcolour(random.choice(py3buddy.allcolours))
# wiggle randomly
globals.ibuddy.wiggle(random.choice(['right', 'left', 'middle', 'middlereset']))
# create the message, then send it, and sleep for 0.1 seconds
globals.ibuddy.sendcommand()
time.sleep(0.1)
# set the wings to low
globals.ibuddy.wings('low')
# turn off the heart LED
globals.ibuddy.toggleheart(False)
# pick a random colour for the head LED
globals.ibuddy.setcolour(random.choice(py3buddy.allcolours))
# random wiggle
globals.ibuddy.wiggle(random.choice(['right', 'left', 'middle', 'middlereset']))
globals.ibuddy.sendcommand()
time.sleep(0.1)
# extra reset as sometimes the device doesn't respond
globals.ibuddy.reset()
globals.ibuddy.reset()
def colourloop(loopcount):
globals.ibuddy.reset()
for i in range(0, loopcount):
for c in py3buddy.allcolours:
globals.ibuddy.setcolour(c)
globals.ibuddy.sendcommand()
time.sleep(1)
globals.ibuddy.reset()
globals.ibuddy.reset()
def flaploop(loopcount):
#globals.ibuddy.reset()
for i in range(0, loopcount):
# set the wings to high
globals.ibuddy.wings('high')
globals.ibuddy.sendcommand()
time.sleep(0.1)
globals.ibuddy.wings('low')
globals.ibuddy.sendcommand()
time.sleep(0.1)
globals.ibuddy.wings('low')
globals.ibuddy.sendcommand()
|
# -*- coding:utf-8 -*-
# inheritance
class FirstClass(object):
pass
if __name__ == '__main__':
f = FirstClass()
|
from ._title import Title
from plotly.graph_objs.parcoords.line.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
"""Exceptions, error handlers, and high level validators."""
class TinyFlowException(Exception):
"""Base exception for ``tinyflow``."""
class NotAnOperation(TinyFlowException):
"""Raise when an object should be an instance of
``tinyflow.ops.Operation()`` but isn't.
"""
class NotACoroOperation(NotAnOperation):
"""Like ``NotAnOperation()`` but for ``tinyflow.coro.ops``."""
class NotACoroTarget(NotACoroOperation):
"""Like ``NotACoroOperation()`` but for coroutine targets."""
class TooManyTargets(TinyFlowException):
"""Raised when a ``tinyflow.coro.CoroPipeline()`` receives too many
``tinyflow.coro.ops.CoroTarget()``'s.
"""
class NoPipeline(TinyFlowException):
"""Raised when an operation has not been attached to a pipeline, but
requests its parent pipeline.
"""
class NoPool(TinyFlowException):
"""Raised when a thread or process pool is requested but was not passed
to ``tinyflow.Pipeline()``.
"""
|
# Generated by Django 2.2.4 on 2019-10-06 08:03
from django.db import migrations, models
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20191005_1658'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='area',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='community', chained_model_field='community', on_delete=django.db.models.deletion.CASCADE, to='residents.Area'),
),
migrations.AlterField(
model_name='announcement',
name='publish_datetime',
field=models.DateTimeField(verbose_name='Publish Date/Time'),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 11:18:32 2019
@author: se14
"""
# train the FP reduction network for fold k (user inputted variable)
import os
import sys
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import time
import torch.nn as nn
import torch.optim as optim
from matplotlib import cm
import pandas as pd
import scipy.sparse
import scipy.ndimage
import os
import warnings
import sklearn.metrics
import SimpleITK as sitk
import math
warnings.filterwarnings('ignore', '.*output shape of zoom.*')
torch.backends.cudnn.deterministic = False#True
torch.backends.cudnn.enabled = True#False
torch.manual_seed(0)
np.random.seed(0)
#os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
try:
get_ipython().run_line_magic('matplotlib', 'qt')
except:
pass
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
#device = torch.device("cpu")
dType = torch.float32
# get the current fold from the command line input.
# for fold k, we use the kth subset as the test, and train on the remaining data
fold_k = int(sys.argv[1])
print(f'Training fold {fold_k}')
#%% paths
cand_path = '/media/se14/DATA_LACIE/LUNA16/candidates/'
out_path = f'results_fold_{fold_k}/'
if (not os.path.exists(out_path)) & (out_path != ""):
os.makedirs(out_path)
train_subset_folders = [f'subset{i}/' for i in [x for x in range(10) if x!=fold_k]]
train_subset_folders = [cand_path + train_subset_folders[i] for i in range(len(train_subset_folders))]
test_subset_folders = [f'subset{i}/' for i in [x for x in range(10) if x==fold_k]]
test_subset_folders = [cand_path + test_subset_folders[i] for i in range(len(test_subset_folders))]
# set the validation subset
val_subset_folders = [train_subset_folders[fold_k-1]]
# and then remove this from the training subsets
train_subset_folders.remove(val_subset_folders[0])
#%% network architecture for FP reduction
# Cheng et al LUNA16 paper
def conv3dBasic(ni, nf, ks, stride,padding = 0):
return nn.Sequential(
nn.Conv3d(ni, nf, kernel_size = (ks, ks, ks), bias = True, stride = stride, padding = padding),
nn.ReLU(inplace=True),
nn.BatchNorm3d(nf))
class discriminatorNet(nn.Module):
def __init__(self):
super().__init__()
# first block convolutions
self.block1_L1 = conv3dBasic(1, 24, 3, 2, 1)
self.block1_L2 = conv3dBasic(24, 32, 3, 1, 1)
self.block1_R = conv3dBasic(1, 32, 1, 2, 0)
# second block convolutions
self.block2_L1 = conv3dBasic(32, 48, 3, 1, 1)
self.block2_L2 = conv3dBasic(48, 48, 3, 1, 1)
self.block2_R = conv3dBasic(32, 48, 1, 1, 0)
# 3rd block
self.block3_1 = conv3dBasic(48, 48, 3, 1, 1)
self.block3_2 = conv3dBasic(48, 48, 3, 1, 1)
# 4th block
self.block4_L1 = conv3dBasic(48, 64, 3, 2, 1)
self.block4_L2 = conv3dBasic(64, 64, 3, 1, 1)
self.block4_R = conv3dBasic(48, 64, 1, 2, 0)
# 5th block
self.block5_L1 = conv3dBasic(64, 96, 3, 1, 1)
self.block5_L2 = conv3dBasic(96, 96, 3, 1, 1)
self.block5_R = conv3dBasic(64, 96, 1, 1, 0)
# 6th block
self.block6_L1 = conv3dBasic(96, 96, 3, 1, 1)
self.block6_L2 = conv3dBasic(96, 96, 3, 1, 1)
self.block6_R = conv3dBasic(96, 96, 1, 1, 0)
# 7th block
self.block7_1 = conv3dBasic(96, 96, 3, 1, 1)
self.block7_2 = conv3dBasic(96, 96, 3, 1, 1)
# 8th block
self.block8_L1 = conv3dBasic(96, 128, 3, 2, 1)
self.block8_L2 = conv3dBasic(128, 128, 3, 1, 1)
self.block8_R = conv3dBasic(96, 128, 1, 2, 0)
# 9th block
self.block9_L1 = conv3dBasic(128, 128, 3, 1, 1)
self.block9_L2 = conv3dBasic(128, 128, 3, 1, 1)
self.block9_R = conv3dBasic(128, 128, 1, 1, 0)
# 10th block
self.block10_1 = conv3dBasic(128, 128, 3, 1, 1)
self.block10_2 = conv3dBasic(128, 128, 3, 1, 1)
self.testFC = nn.Sequential(nn.Linear(5*5*5*128,128),nn.ReLU(inplace=True))
# 11th block - have a global average pool to give a 128 vector, then we
# fully connect this to a 2-element softmax
# self.block11_2 = nn.Linear(128, 2)
self.block11_2 = nn.Linear(128, 1) # for single-value output
# experimental dropout layers
self.dropout1 = nn.Dropout3d(p=0.5)
self.dropout2 = nn.Dropout3d(p=0.6)
self.dropout3 = nn.Dropout3d(p=0.5)
def forward(self, x):
# 1st block
xL = self.block1_L1(x)
xL = self.block1_L2(xL)
xR = self.block1_R(x)
x = xL + xR
# 2nd block
xL = self.block2_L1(x)
xL = self.block2_L2(xL)
xR = self.block2_R(x)
x = xL + xR
# 3rd block
x1 = self.block3_1(x)
x1 = self.block3_2(x1)
x = x + x1
# 4th block
xL = self.block4_L1(x)
xL = self.block4_L2(xL)
xR = self.block4_R(x)
x = xL + xR
# 5th block
xL = self.block5_L1(x)
xL = self.block5_L2(xL)
xR = self.block5_R(x)
x = xL + xR
# # experimental dropout---------
# x = self.dropout1(x)
# #-----------------------------
# 6th block
xL = self.block6_L1(x)
xL = self.block6_L2(xL)
xR = self.block6_R(x)
x = xL + xR
# 7th block
x1 = self.block7_1(x)
x1 = self.block7_2(x1)
x = x + x1
# 8th block
xL = self.block8_L1(x)
xL = self.block8_L2(xL)
xR = self.block8_R(x)
x = xL + xR
# 9th block
xL = self.block9_L1(x)
xL = self.block9_L2(xL)
xR = self.block9_R(x)
x = xL + xR
# 10th block
x1 = self.block10_1(x)
x1 = self.block10_2(x1)
x = x + x1
# # experimental dropout---------
# x = self.dropout2(x)
# #-----------------------------
# 11th block
x = x.view(x.size(0),x.size(1),-1)
x = torch.mean(x, dim=2) #GlobalAveragePool (average in each channel)
# experimental FC layer
# x = self.dropout3(x)
# x = x.view(x.size(0),-1)
# x = self.testFC(x)
x = self.block11_2(x)
# we can include these functions in the loss function to save computations
# but here we do not
# x = F.softmax(x,dim=1)
x = torch.sigmoid(x).view(-1) # for single value output
return x
# initialization function, first checks the module type,
# then applies the desired changes to the weights
def init_net(m):
if (type(m) == nn.Linear) or (type(m) == nn.modules.conv.Conv3d):
nn.init.kaiming_uniform_(m.weight)
if hasattr(m, 'bias'):
try:
nn.init.constant_(m.bias,0.0)
except:
pass
model = discriminatorNet()
model = model.to(dtype=dType)
model = model.apply(init_net).to(device)
#image = torch.zeros((1,1,40,40,40)).to(dtype=dType).to(device)
#out = model(image)
#print(out)
#%% dataset object to read in all candidates from our training data
def eulerAnglesToRotationMatrix(theta):
thetaRad = np.zeros_like(theta)
for ii in range(len(theta)):
thetaRad[ii] = (theta[ii] / 180.) * np.pi
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
class lidcCandidateLoader(Dataset):
def __init__(self,data_folders,augmentFlag,balanceFlag,n=None):
# data_folders are the locations of the data that we want to use
# e.g. '/media/se14/DATA/LUNA16/candidates/subset9/'
cand_df = pd.DataFrame(columns=['seriesuid','coordX','coordY','coordZ','class','diameter_mm','filename'])
for fldr in data_folders:
csvfiles = f'cand_df_{fldr[-2]}.csv'
# csvfiles = [f for f in os.listdir(fldr) if os.path.isfile(os.path.join(fldr, f)) if '.csv' in f][0]
cand_df = cand_df.append(pd.read_csv(fldr + csvfiles),ignore_index=True,sort=False)
# if we are balancing the data, then we need to do that here by oversampling
# the positives
if balanceFlag==True:
true_df = cand_df.loc[cand_df['class']==1]
false_df = cand_df.loc[cand_df['class']==0]
numRepeats = int(np.ceil(len(false_df) / len(true_df)))
true_df_aug = pd.concat([true_df]*numRepeats)[0:len(false_df)]
cand_df = true_df_aug.append(false_df,ignore_index=False,sort=False).reset_index(drop=True)
# only set augmentation for training, not validation or testing
if augmentFlag == True:
self.augmentFlag = True
else:
self.augmentFlag = False
# shuffle repeatably
cand_df = cand_df.sample(frac=1,replace=False,random_state=fold_k)
# pull out n examples only if possible
try:
cand_df = cand_df.iloc[0:n]
except:
pass
self.cand_df = cand_df
def __len__(self):
return len(self.cand_df)
# return 2
def __getitem__(self,idx):
currFileName = self.cand_df.iloc[idx]['filename']
currLabel = self.cand_df.iloc[idx]['class']
currPatch = np.fromfile(currFileName,dtype='int16').astype('float32')
currPatch = currPatch.reshape((80,80,80))
# some intensity transforms/normalisations
currPatch[np.where(currPatch<-1000)]= -1000
currPatch[np.where(currPatch>400)] = 400
currPatch = (currPatch + 1000)/1400
# augment if augmentFlag is True
if self.augmentFlag == True:
# random flippings
flipX = np.random.rand() > 0.5
flipY = np.random.rand() > 0.5
if flipX:
currPatch = np.flip(currPatch,axis=1).copy()
if flipY:
currPatch = np.flip(currPatch,axis=2).copy()
# random offset
offSet = 0.3*np.random.rand() - 0.15
currPatch += offSet
# random Gaussian blur
randSigma = 0#np.random.rand() # between 0 and 1mm smoothing (standard deviation, not FWHM!)
currPatch = scipy.ndimage.gaussian_filter(currPatch,randSigma)
# random rotation and scaling
scaleFact = 0.5*np.random.rand() + 0.75
rotFactX = 60.*np.random.rand() - 30
rotFactY = 60.*np.random.rand() - 30
rotFactZ = 60.*np.random.rand() - 30
image_center = tuple(np.array(currPatch.shape) / 2 - 0.5)
rotMat = eulerAnglesToRotationMatrix((rotFactX,rotFactY,rotFactZ))
scaleMat = np.eye(3,3)
scaleMat[0,0] *= scaleFact
scaleMat[1,1] *= scaleFact
scaleMat[2,2] *= scaleFact
affMat = np.dot(rotMat,scaleMat)
affine = sitk.AffineTransform(3)
affine.SetMatrix(affMat.ravel())
affine.SetCenter(image_center)
img = sitk.GetImageFromArray(currPatch)
refImg = img
imgNew = sitk.Resample(img, refImg, affine,sitk.sitkLinear,0)
currPatch = sitk.GetArrayFromImage(imgNew).copy()
# crop out a 40 x 40 x 40 shifted region
# random translation of up to 5 mm in each direction
transFact = np.round(10*np.random.rand(3)).astype('int16') - 5
elif self.augmentFlag == False:
transFact = np.array([0,0,0])
currPatch = currPatch[20+transFact[0]:60+transFact[0],
20+transFact[1]:60+transFact[1],
20+transFact[2]:60+transFact[2]]
# output results
currPatch = torch.from_numpy(currPatch[None,:,:,:])
currLabel = torch.from_numpy(np.array(currLabel)).to(dtype=dType)
sample = {'image': currPatch, 'labels': currLabel, 'candIdx' : idx} # return these values
return sample
#%% set up dataloader
batch_size = 256
trainData = lidcCandidateLoader(train_subset_folders,augmentFlag=True,balanceFlag=True)
train_dataloader = DataLoader(trainData, batch_size = batch_size,shuffle = True,num_workers = 2,pin_memory=True)
valData = lidcCandidateLoader(val_subset_folders,augmentFlag=False,balanceFlag=False)
val_dataloader = DataLoader(valData, batch_size = batch_size,shuffle = False,num_workers = 2,pin_memory=True)
#%% set up training
criterion = torch.nn.BCELoss()
LR = 5e-5
optimizer = optim.Adam(model.parameters(),lr = LR)
ctr = 0
num_epochs = 10
epoch_list = np.array(list(range(num_epochs)))
bestValLoss = 1e6
bestValLossNetFileName = f'bestDiscriminator_model.pt'#_BS{batch_size}_samples{len(trainData)}_epochs{num_epochs}_LR{LR}.pt'
allTrainLoss = np.zeros((num_epochs,1))
allValLoss = np.zeros((num_epochs,1))
optimizer.zero_grad()
currModelFilename = f'current_model.pt'
#%% alternative learning rate finder
findLR = False
if findLR == True:
print('LR finder')
allLRs = np.logspace(-7,-1,100)
LRfinderLoss = np.zeros_like(allLRs).astype('float32')
data = next(iter(train_dataloader))
# get the inputs
inputs, labels = data['image'],data['labels']
inputs = inputs.to(device)
labels = labels.to(device)
model2 = discriminatorNet()
model2 = model2.to(dtype=dType)
model2 = model2.apply(init_net).to(device)
for ii, lr in enumerate(allLRs):
optimizer2 = optim.Adam(model2.parameters(),lr = allLRs[ii])
# forward + backward + optimize (every numAccum iterations)
outputs = model2(inputs) # forward pass
loss = criterion(outputs, labels) # calculate loss
print(f'Batch loss = {loss.item()}')
loss.backward() # backprop the loss to each weight to get gradients
optimizer2.step() # take a step in this direction according to our optimiser
optimizer2.zero_grad()
LRfinderLoss[ii] = loss.item()
plt.semilogx(allLRs,LRfinderLoss)
#%% main loop
start = time.time()
# try to load our previous state, if possible
# find the epoch we were up to
if os.path.exists(f'{out_path}lastCompletedEpoch.txt'):
lastEpoch = np.loadtxt(f'{out_path}lastCompletedEpoch.txt').astype('int16').item()
epoch_list = epoch_list[epoch_list>lastEpoch]
print('Found previous progress, amended epoch list')
# load the current model, if it exists
modelToUse = out_path + currModelFilename
if os.path.exists(modelToUse):
model = discriminatorNet()
model.load_state_dict(torch.load(modelToUse))
model = model.to(device)
print('Loaded previous model')
# set the torch random state to what it last was
if os.path.exists(f'{out_path}randomState.txt'):
random_state = torch.from_numpy(np.loadtxt(f'{out_path}randomState.txt').astype('uint8'))
torch.set_rng_state(random_state)
print('Loaded torch random state')
# load the previous training losses
if os.path.exists(out_path + '/allValLoss.txt') and os.path.exists(out_path + '/allTrainLoss.txt'):
allValLoss = np.loadtxt(out_path + '/allValLoss.txt')
allTrainLoss = np.loadtxt(out_path + '/allTrainLoss.txt')
print('Loaded previous loss history')
print(f'model.training = {model.training}')
#%%
for epoch in epoch_list:
print(f'Epoch = {epoch}')
running_loss = 0.0
print('Training')
for i, data in enumerate(train_dataloader, 0):
print(f'{i} of {len(train_dataloader)}')
# get the inputs
inputs, labels = data['image'],data['labels']
inputs = inputs.to(device)
labels = labels.to(device)
# forward + backward + optimize (every numAccum iterations)
outputs = model(inputs) # forward pass
loss = criterion(outputs, labels) # calculate loss
print(f'Batch loss = {loss.item()}')
loss.backward() # backprop the loss to each weight to get gradients
optimizer.step() # take a step in this direction according to our optimiser
optimizer.zero_grad()
running_loss += loss.item() # item() gives the value in a tensor
allTrainLoss[epoch] = running_loss/len(train_dataloader)
print('Validate')
with torch.no_grad():
model = model.eval()
valLoss = 0.0
for i, data in enumerate(val_dataloader,0):
print(f'{i} of {len(val_dataloader)}')
loss = 0.
# get the inputs
inputs, labels, valIdx = data['image'],data['labels'],data['candIdx']
inputs = inputs.to(device)
labels = labels.to(device)
# calculate loss
outputs = model(inputs) # forward pass
loss = criterion(outputs, labels).cpu().detach().numpy() # calculate loss
print(f'Validation loss = {loss.item()}')
valLoss += loss
allValLoss[epoch] = valLoss/len(val_dataloader)
np.savetxt(out_path + '/allValLoss.txt',allValLoss)
np.savetxt(out_path + '/allTrainLoss.txt',allTrainLoss)
if allValLoss[epoch] < bestValLoss:
print(f'Best seen validation performance ({bestValLoss} -> {allValLoss[epoch]}), saving...')
torch.save(model.state_dict(),out_path + bestValLossNetFileName)
np.savetxt(out_path + '/bestEpochNum.txt',np.array([epoch]))
bestValLoss = allValLoss[epoch]
# checkpointing at the end of every epoch
torch.save(model.state_dict(),out_path + currModelFilename)
np.savetxt(f'{out_path}lastCompletedEpoch.txt',np.asarray([epoch]))
np.savetxt(f'{out_path}randomState.txt',torch.get_rng_state().numpy())
model = model.train()
print(f'Epoch = {epoch} finished')
print('Finished Training')
end = time.time()
print(f'Training took {end-start} seconds')
|
a = 1
print(a)
a = 3
print(a)
print(a)
# 檔名: indent_demo.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
__author__ = 'Justin'
import googlemaps
from googlemaps import convert
from datetime import datetime
from datetime import timedelta
gmaps = googlemaps.Client(key='AIzaSyAVf9cLmfR52ST0VZcFsf-L-HynMTCzZEM')
# Geocoding an address
# geocode_result = gmaps.geocode('1600 Amphitheatre Parkway, Mountain View, CA')
# print(geocode_result)
# Look up an address with reverse geocoding
# reverse_geocode_result = gmaps.reverse_geocode((40.714224, -73.961452))
# print(reverse_geocode_result)
# Request directions via public transit
# now = datetime.now()
# directions_result = gmaps.directions("Sydney Town Hall",
# "Parramatta, NSW",
# mode="transit",
# departure_time=now)
# Distance Matrix / Directions
origin = '29.584769, -98.617857'
destination = '29.599569, -98.385984'
origin = '30.630637, -96.333554'
waypoints = ['via:30.629712, -96.325264']
destination = '30.617907, -96.322968'
print(origin)
print(destination)
now = datetime.now()
later = datetime(now.year,now.month,now.day+1,17,0)
'''------------------------------------------------------------------------------'''
directions_result = gmaps.directions(origin,
destination,
mode="driving",
departure_time=now,
traffic_model = 'best_guess',
alternatives = True)
print(directions_result)
encodedpolyline = directions_result[0]['overview_polyline']['points']
print(encodedpolyline)
print(type(encodedpolyline))
polyline = convert.decode_polyline(encodedpolyline)
print(polyline)
traveltime = directions_result[0]['legs'][0]['duration']['value']
fulltime = directions_result[0]['legs'][0]['duration_in_traffic']['value']
traffictime = fulltime-traveltime
print('TravelTime:',traveltime)
print('FullTime:',fulltime)
print('Traffictime:',traffictime)
'''------------------------------------------------------------------------------'''
# directions_result = gmaps.directions(origin,
# destination,
# mode="driving", waypoints = waypoints,
# departure_time=later, traffic_model = 'best_guess')
#
# print(directions_result)
#
# traveltime = 0
# legs = directions_result[0]['legs']
# for leg in legs:
# seconds = leg['duration']['value']
# traveltime = traveltime + seconds
#
# print(traveltime)
# print(traveltime/60)
'''------------------------------------------------------------------------------'''
# distance_matrix_result = gmaps.distance_matrix(origin,
# destination,
# mode="driving",
# departure_time=now,
# traffic_model ='best_guess')
# print(distance_matrix_result)
'''------------------------------------------------------------------------------'''
distance_matrix_result2 = gmaps.distance_matrix(origin,
destination,
mode="driving",
departure_time=later,
traffic_model ='best_guess')
print('Best Guess:',distance_matrix_result2)
#
# distance_matrix_result2 = gmaps.distance_matrix(origin,
# destination,
# mode="driving",
# departure_time=later,
# traffic_model ='optimistic')
# print('Optimistic:',distance_matrix_result2)
#
# distance_matrix_result2 = gmaps.distance_matrix(origin,
# destination,
# mode="driving",
# departure_time=later,
# traffic_model ='pessimistic')
# print('Pessimistic',distance_matrix_result2)
'''------------------------------------------------------------------------------'''
print(now)
print(later)
|
#!/usr/bin/env python
class arraylist:
def __init__(self):
self.maxlength = 10000
self.elements = [None]*self.maxlength
self.last = 0
def first(self):
return 0
def end(self):
return self.last
def retrieve(self,p):
if p > self.last or p < 0:
print "Position does not exist"
else:
return self.elements[p]
def locate(self,x):
for q in range(self.last):
if self.elements[q] == x:
return q
def next_cell(self,p):
if p >= self.last or p < 0:
return None
else:
return p + 1
def previous(self,p):
if p > self.last or p <= 0:
return None
else:
return p - 1
def insert(self,x,p):
if self.last >= self.maxlength:
print "List is full"
elif p > self.last or p < 0:
print "Position does not exist"
elif p == self.last:
self.elements[p] = x
self.last = self.last + 1
else:
self.elements[p+1:self.last+1] = self.elements[p:self.last]
self.elements[p] = x
self.last = self.last + 1
def delete(self,p):
if 0 <= p <= self.last:
for q in range(p,self.last):
self.elements[q-1] = self.elements[q]
self.last = self.last - 1
def makenull(self):
self.__init__()
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import inception_v3
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate arrays.
img = keras.preprocessing.image.load_img(image_path)
img = keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a NumPy array into a valid image.
x = x.reshape((x.shape[1], x.shape[2], 3))
# Undo inception v3 preprocessing
x /= 2.0
x += 0.5
x *= 255.0
# Convert to uint8 and clip to the valid range [0, 255]
x = np.clip(x, 0, 255).astype("uint8")
return x
|
#python create_cmpd_image.py /home/chrism/metab_id/data/frag/metlin/C00642_neg_40mv.frag /home/chrism/metab_id/misc/unique_mass_lst_C00642_children_only.txt 1 0
from numpy import *
import sys
from matplotlib import pyplot as plt
CPD_MASSES = sys.argv[1]
MASS_LIST = sys.argv[2]
use_metlin = float(sys.argv[3])
mass_or_id = int(sys.argv[4])
if use_metlin == 0 :
print('Using non-metlin')
if mass_or_id != 0 :
masses = genfromtxt(MASS_LIST, dtype='<S50')
cpd_masses = genfromtxt(CPD_MASSES, dtype='<S50')
else :
masses = genfromtxt(MASS_LIST)
cpd_masses = genfromtxt(CPD_MASSES)
masses = sort(masses)
if len(cpd_masses.shape) == 0 :
cpd_masses = array([cpd_masses])
img = zeros((masses.shape[0],1))
for i in range(0, cpd_masses.shape[0]) :
if use_metlin == 0 :
idx = argmax(masses == cpd_masses[i])
#print metlin[i]
else :
idx = nonzero(logical_and(cpd_masses[i]+use_metlin >= masses, cpd_masses[i]-use_metlin <= masses))[0]
img[idx] = 1
if mass_or_id != 0 :
img_rs = reshape(img, (26,47))
else :
img_rs = reshape(img, (15,11))
#print(img_rs)
plt.imshow(img_rs, interpolation='none')
plt.show()
|
import sys
import requests
import html5lib
from bs4 import BeautifulSoup
from tqdm import tqdm
import time
reload(sys)
sys.setdefaultencoding('utf-8')
def text_save(filename, data):
file = open(filename, 'w')
for i in range(len(data)):
s = str(data[i]).replace('[', '').replace(']', '')
s = s.replace("u'", '').replace(',', '') + '\n'
s = s.replace("'", '')
s = s.replace("\u2018", '"').replace("\u2019", '"').replace("\u201c", '"').replace("\u201d", '"').replace("\u2026", '...').replace("\u2013", '-').replace("\xa0", ':')
file.write(s)
file.close()
print ("Saved")
def Scraping():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'
}
res = requests.get('https://wuxiaworld.com/novel/battle-through-the-heavens', headers=headers)
res.encoding = res.apparent_encoding
html = res.text
soup = BeautifulSoup(html, "html.parser")
title = soup.find('div', class_="novel-body").find('h2')
author = soup.find('div', style="flex: 1").find('dd')
intro = soup.find('div', class_="novel-bottom p-15").find('div', class_='fr-view').find('p')
print('Novel:', title.text, '\n Author:', author.text, '\n Intro:', intro.text)
chapter_url = requests.get('https://wuxiaworld.com/novel/battle-through-the-heavens#chapters', headers=headers)
chapter_url.encoding = chapter_url.apparent_encoding
xml = chapter_url.text
soup1 = BeautifulSoup(xml, "html.parser")
lists = soup1.find_all('li', class_='chapter-item')
list_all = []
list_m = []
pbar = tqdm(total=len(lists), desc='Scraping')
for list_n in lists:
a = list_n.find('a')
#name = a.text
chapter = 'https://www.wuxiaworld.com'+a['href']
req = requests.get(chapter, headers=headers)
req.encoding = req.apparent_encoding
req = req.text
s = BeautifulSoup(req, "html.parser")
find_content = s.find('div', id='chapter-content')
content = find_content.find_all('p')
for artical in content:
list_all.append([artical.text])
pbar.update(1)
pbar.close()
list_m[0:3] = [title.text, author.text, intro.text]
list_m += list_all
text_save('BattleThroughTheHeavens.txt', list_m)
if __name__ == '__main__':
Scraping()
|
import ast
import time
from datetime import datetime,timedelta,date
from decimal import Decimal, ROUND_HALF_UP
from django.http import JsonResponse
from django.shortcuts import render
from django.contrib.auth.models import User
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from django.db.models import Q
from django.db.models import Q
from registration.models import Profile
from garmin.models import GarminFitFiles,\
UserGarminDataDaily,\
UserGarminDataActivity,\
UserGarminDataManuallyUpdated
from fitbit.models import UserFitbitDataActivities
from quicklook.calculations.garmin_calculation import get_filtered_activity_stats
from quicklook.calculations.converter.fitbit_to_garmin_converter import get_epoch_offset_from_timestamp
from user_input.utils.daily_activity import get_daily_activities_in_base_format
from user_input.views.garmin_views import _get_activities
from fitparse import FitFile
import fitbit
import quicklook
from hrr.models import Hrr,AAdashboard, AA
import pprint
from hrr import aa_ranges, fitbit_aa
from hrr.calculation_helper import fitfile_parse
from hrr.views import aa_create_instance
def get_garmin_activities(user,start_date_timestamp,end_date_timestamp):
'''
Get Garmin activities from Garmn models
'''
try:
garmin_data_activities = UserGarminDataActivity.objects.filter(
user=user,start_time_in_seconds__range=[start_date_timestamp,end_date_timestamp])
garmin_list = []
garmin_dic = {}
if garmin_data_activities:
garmin_activity_files = [pr.data for pr in garmin_data_activities]
for i,k in enumerate(garmin_activity_files):
act_files=ast.literal_eval(garmin_activity_files[i])
act_id=act_files['summaryId']
garmin_dic[act_id]=act_files
garmin_list.append(act_files)
except (ValueError, SyntaxError):
garmin_list = []
garmin_dic = {}
return garmin_list,garmin_dic
def get_fitbit_activities(user,start_date):
'''
Get Garmin activities from Garmn models
'''
try:
activity_files_qs=UserFitbitDataActivities.objects.filter(user= user,date_of_activities__icontains=str(start_date)[:10])
fitbit_list = []
fitbit_dic = {}
if activity_files_qs:
fitbit_activity_files = [pr.activities_data for pr in activity_files_qs]
for i,k in enumerate(fitbit_activity_files):
act_files=ast.literal_eval(fitbit_activity_files[i])
act_id=act_files['activities'][0].get('logId',None)
fitbit_dic[act_id]=act_files
fitbit_list.append(act_files)
except (ValueError, SyntaxError):
garmin_list = []
garmin_dic = {}
return fitbit_list,fitbit_dic
def get_garmin_manully_activities(user,start_date_timestamp,end_date_timestamp):
'''
Get Garmin manually edited activities from Garmn models
'''
try:
manually_updated_activities = UserGarminDataManuallyUpdated.objects.filter(
user=user,start_time_in_seconds__range=[start_date_timestamp,end_date_timestamp])
manually_edited_dic = {}
manually_edited_list = []
if manually_updated_activities:
manual_activity_files = [activity.data for activity in manually_updated_activities]
for i,k in enumerate(manual_activity_files):
manual_files=ast.literal_eval(manual_activity_files[i])
manual_act_id=manual_files['summaryId']
manually_edited_dic[manual_act_id]=manual_files
manually_edited_list.append(manual_files)
except (ValueError, SyntaxError):
manually_edited_dic = {}
manually_edited_list = []
return manually_edited_dic,manually_edited_list
def get_usernput_activities(user,start_date):
'''
Get activities from user input models
'''
activities_dic = get_daily_activities_in_base_format(user,start_date)
if activities_dic:
return activities_dic
else:
return {}
def get_fitfiles(user,start_date,start,end,start_date_timestamp=None,end_date_timestamp=None):
'''
get the today fitfiles or 3 days fitfiles
'''
activity_files_qs=UserGarminDataActivity.objects.filter(
user=user,start_time_in_seconds__range=[start_date_timestamp,end_date_timestamp])
fitfiles_obj = GarminFitFiles.objects.filter(user=user,fit_file_belong_date=start_date)
if not fitfiles_obj or len(activity_files_qs) != len(fitfiles_obj):
fitfiles_obj=GarminFitFiles.objects.filter(user=user,created_at__range=[start,end])
return fitfiles_obj
def get_fitfiles_fitbit(user,start_date):
'''
get the today fitfiles or 3 days fitfiles
'''
activity_files_qs=activity_files_qs=UserFitbitDataActivities.objects.filter(user= user,date_of_activities__icontains=str(start_date)[:10])
fitfiles_obj = GarminFitFiles.objects.filter(user=user,fit_file_belong_date=start_date)
if not fitfiles_obj or len(activity_files_qs) != len(fitfiles_obj):
fitfiles_obj=GarminFitFiles.objects.filter(user=user,created_at__icontains=str(start_date)[:10])
return fitfiles_obj
def generate_aa_new_table(heartrate,time_difference,current_user_aa_ranges):
'''This function will generaate the new table for AA dashboard
Args: heart rate(int)
time_difference(int)
current_user_aa_ranges(dict)
Return: dict witj updated current user aa ranges
'''
new_format = {}
for ranges,values in current_user_aa_ranges.items():
from_hr = int(ranges.split('-')[0])
to_hr = int(ranges.split('-')[1])+1
if heartrate in range(from_hr,to_hr):
values['duration'] = time_difference + values.get('duration',0)
new_data = {from_hr:values}
new_format.update(new_data)
for key,value in new_format.items():
is_duration = value.get('duration')
if not is_duration:
value['duration'] = 0
return new_format
def aa_dashboard_ranges(user,start_date):
'''
This function calculates the A/A third chart data
'''
heart_rate_zone_low_end = ""
heart_rate_zone_high_end = ""
time_in_zone_for_last_7_days = ""
prcnt_total_duration_in_zone = ""
# start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
start_date_str = start_date.strftime('%Y-%m-%d')
start_date_timestamp = start_date
start_date_timestamp = start_date_timestamp.timetuple()
start_date_timestamp = time.mktime(start_date_timestamp)
end_date_timestamp = start_date_timestamp + 86400
activity_files_qs=UserGarminDataActivity.objects.filter(user= user,start_time_in_seconds__range=[start_date_timestamp,end_date_timestamp])
if activity_files_qs:
activity_files = [pr.data for pr in activity_files_qs]
one_activity_file_dict = ast.literal_eval(activity_files[0])
offset = one_activity_file_dict['startTimeOffsetInSeconds']
garmin_list,garmin_dic = get_garmin_activities(
user,start_date_timestamp,end_date_timestamp)
manually_edited_dic,manually_edited_list = get_garmin_manully_activities(
user,start_date_timestamp,end_date_timestamp)
activities_dic = get_usernput_activities(
user,start_date)
user_age = user.profile.age()
filtered_activities_files = get_filtered_activity_stats(activities_json=garmin_list,
user_age=user_age,
manually_updated_json=manually_edited_dic,
userinput_activities=activities_dic,
user=user,calendar_date=start_date)
filtered_activities_only = get_filtered_activity_stats(activities_json=garmin_list,
user_age=user_age,
manually_updated_json=manually_edited_dic,
user=user,calendar_date=start_date)
activities = []
hrr_summary_id = []
workout_summary_id = []
id_act = 0
workout_data = []
for i,k in enumerate(filtered_activities_files):
if filtered_activities_files[i]['activityType'] == 'HEART_RATE_RECOVERY':
id_act = int(filtered_activities_files[i]['summaryId'])
activities.append(filtered_activities_files[i])
hrr_summary_id.append(filtered_activities_files[i]['summaryId'])
else:
if filtered_activities_files[i]["duplicate"] == False:
workout_data.append(filtered_activities_files[i])
workout_summary_id.append(filtered_activities_files[i]['summaryId'])
workout = []
hrr = []
start = start_date
end = start_date + timedelta(days=3)
fitfiles_obj = get_fitfiles(user,start_date,start,end,start_date_timestamp,end_date_timestamp)
print(fitfiles_obj,"GARMINFILESSSSS")
if activities_dic and fitfiles_obj:
for tmp in fitfiles_obj:
meta = tmp.meta_data_fitfile
meta = ast.literal_eval(meta)
print(meta,"GARMINFILESSSSS")
data_id = meta['activityIds'][0]
if str(data_id) in workout_summary_id and str(data_id):
workout.append(tmp)
elif str(data_id) in hrr_summary_id :
hrr.append(tmp)
elif fitfiles_obj:
for tmp in fitfiles_obj:
meta = tmp.meta_data_fitfile
meta = ast.literal_eval(meta)
data_id = meta['activityIds'][0]
if str(data_id) in workout_summary_id:
workout.append(tmp)
elif str(data_id) in hrr_summary_id:
hrr.append(tmp)
aa_ranges_all_users = aa_ranges.all_age_aa_ranges()
# print(aa_ranges_all_users,"user age")
current_user_aa_ranges = aa_ranges_all_users.get(str(user_age))
if workout:
workout_data = fitfile_parse(workout,offset,start_date_str)
workout_final_heartrate,workout_final_timestamp,workout_timestamp = workout_data
for heartrate,time_difference in zip(workout_final_heartrate,workout_final_timestamp):
aa_dashboard_table = generate_aa_new_table(
heartrate,time_difference,current_user_aa_ranges)
else:
aa_dashboard_table = {}
return aa_dashboard_table
def aa_dashboard_ranges_fitbit(user,start_date):
'''
This function calculates the A/A third chart data
'''
heart_rate_zone_low_end = ""
heart_rate_zone_high_end = ""
time_in_zone_for_last_7_days = ""
prcnt_total_duration_in_zone = ""
# start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
start_date_str = start_date.strftime('%Y-%m-%d')
start_date_timestamp = start_date
start_date_timestamp = start_date_timestamp.timetuple()
start_date_timestamp = time.mktime(start_date_timestamp)
end_date_timestamp = start_date_timestamp + 86400
activity_files_qs=UserFitbitDataActivities.objects.filter(user= user,date_of_activities__icontains=str(start_date)[:10])
if activity_files_qs:
activity_files = [pr.activities_data for pr in activity_files_qs]
one_activity_file_dict = ast.literal_eval(activity_files[0])
activity_data = one_activity_file_dict['activities']
offset = get_epoch_offset_from_timestamp(activity_data[0].get('startTime', None))
fitbit_list,fitbit_dic = get_fitbit_activities(
user,start_date)
manually_edited_dic = {}
activities_dic = get_usernput_activities(
user,start_date)
if fitbit_list:
fitbit_list = [quicklook.calculations.converter.fitbit_to_garmin_converter.fitbit_to_garmin_activities(act) for act in activity_data]
user_age = user.profile.age()
filtered_activities_files = get_filtered_activity_stats(activities_json=fitbit_list,
user_age=user_age,
manually_updated_json=manually_edited_dic,
userinput_activities=activities_dic,
user=user,calendar_date=start_date)
filtered_activities_only = get_filtered_activity_stats(activities_json=fitbit_list,
user_age=user_age,
manually_updated_json=manually_edited_dic,
user=user,calendar_date=start_date)
activities = []
hrr_summary_id = []
workout_summary_id = []
id_act = 0
workout_data = []
for i,k in enumerate(filtered_activities_files):
#print(filtered_activities_files[1]['activities'][0].get('activityName'))
if filtered_activities_files[i].get('activityType') == 'HEART_RATE_RECOVERY':
id_act = int(filtered_activities_files[i].get('summaryId'))
activities.append(filtered_activities_files[i])
hrr_summary_id.append(filtered_activities_files[i].get('summaryId'))
else:
if filtered_activities_files[i]["duplicate"] == False:
workout_data.append(filtered_activities_files[i])
workout_summary_id.append(filtered_activities_files[i].get('summaryId'))
workout = []
hrr = []
start = start_date
end = start_date + timedelta(days=3)
fitfiles_obj = get_fitfiles_fitbit(user,start_date)
print(fitfiles_obj,"5555555555555555555")
print(activities_dic,"666666666666666666")
if activities_dic and fitfiles_obj:
for tmp in fitfiles_obj:
print(tmp,"33333333333333333333333")
meta = tmp.meta_data_fitfile
meta = ast.literal_eval(meta)
data_id = meta['activityIds'][0]
if str(data_id) in workout_summary_id and str(data_id):
workout.append(tmp)
elif str(data_id) in hrr_summary_id :
hrr.append(tmp)
elif fitfiles_obj:
for tmp in fitfiles_obj:
print(tmp,"33333333333333333333333")
meta = tmp.meta_data_fitfile
meta = ast.literal_eval(meta)
data_id = meta['activityIds'][0]
if str(data_id) in workout_summary_id:
workout.append(tmp)
elif str(data_id) in hrr_summary_id:
hrr.append(tmp)
aa_ranges_all_users = aa_ranges.all_age_aa_ranges()
# print(aa_ranges_all_users,"user age")
current_user_aa_ranges = aa_ranges_all_users.get(str(user_age))
if workout:
workout_data = fitfile_parse(workout,offset,start_date_str)
workout_final_heartrate,workout_final_timestamp,workout_timestamp = workout_data
for heartrate,time_difference in zip(workout_final_heartrate,workout_final_timestamp):
aa_dashboard_table = generate_aa_new_table(
heartrate,time_difference,current_user_aa_ranges)
else:
aa_dashboard_table = {}
return aa_dashboard_table
def aadashboard_update_instance(user,start_date,data):
AAdashboard.objects.filter(user=user,created_at=start_date).update(data=data)
def aadashboard_create_instance(user, data, start_date):
created_at = start_date
AAdashboard.objects.create(user = user,created_at = created_at,data=data)
def store_garmin_aa_dashboard(user,from_date,to_date):
print("AA dashboard calculations got started",user.username)
from_date_obj = datetime.strptime(from_date, "%Y-%m-%d").date()
to_date_obj = datetime.strptime(to_date, "%Y-%m-%d").date()
current_date = to_date_obj
aa_dashboard = AAdashboard.objects.filter(user=user,created_at=from_date_obj)
while (current_date >= from_date_obj):
device_type = quicklook.calculations.calculation_driver.which_device(user)
if device_type == "garmin":
data = aa_dashboard_ranges(user,current_date)
elif device_type == "fitbit":
data = aa_dashboard_ranges_fitbit(user,current_date)
if data:
print("AA dashboard calculations creating")
try:
user_aa = AAdashboard.objects.get(user=user, created_at=current_date)
aadashboard_update_instance(user,current_date,data)
except AAdashboard.DoesNotExist:
aadashboard_create_instance(user, data, current_date)
else:
print("NO AA dashboard")
current_date -= timedelta(days=1)
print("AA ddashboard calculations got finished")
# def store_fitbit_aa1(user,from_date,to_date):
# from_date_obj = datetime.strptime(from_date, "%Y-%m-%d").date()
# to_date_obj = datetime.strptime(to_date, "%Y-%m-%d").date()
# current_date = to_date_obj
# while (current_date >= from_date_obj):
# activities_dict = get_usernput_activities(user,current_date)
# data = fitbit_aa.fitbit_aa_chart_one_new(user,current_date,user_input_activities=activities_dict)
# if data.get('total_time'):
# print("Fitbit AA1 calculations creating")
# try:
# user_aa = AA.objects.get(user=user, created_at=current_date)
# aa_update_instance(user_aa, data)
# except AA.DoesNotExist:
# aa_create_instance(user, data, current_date)
# else:
# print("NO Fitbit AA1")
# current_date -= timedelta(days=1)
def store_aadashboard_calculations(user,from_date,to_date):
'''
This function takes user start date and end date, calculate the AA calculations
then stores in Data base
Args:user(user object)
:from_date(start date)
:to_date(end date)
Return:None
'''
# device_type = quicklook.calculations.calculation_driver.which_device(user)
# print(device_type,"device typeFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFff")
# if device_type == "garmin":
store_garmin_aa_dashboard(user,from_date,to_date)
# elif device_type == "fitbit":
# print("Fitbit AA chat1 data calculation got started")
# store_fitbit_aa1(user,from_date,to_date)
# print("Fitbit AA chat1 data calculation finished")
return None
|
from django.shortcuts import render
from inventory.models import Item #use to query databse
from django.http import Http404 #to return 404 page when needed
# Create your views here.
def index(request):
items = Item.objects.exclude(amount=0)
return render(request, 'inventory/index.html', {'items': items,})
#creates http response and wires view to template
#name of var in template, value from query set
def item_detail(request, id):
try:
item = Item.objects.get(id=id)#look for utem matching that id
except Item.DoesNotExist:
raise Http404('This item does not exist')
return render(request, 'inventory/item_detail.html', {'item':item,})
#views take a response, and id but depends
|
import json
import requests
from requests.packages.urllib3.filepost import encode_multipart_formdata
class Media(object):
#def __init__(self):
#register_openers()
def uplaod(accessToken, filePath, mediaType):
openFile = open(filePath, "rb")
param = {'media': openFile.read()}
postData, content_type = encode_multipart_formdata(param)
postUrl = "https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=%s" % (accessToken, mediaType)
headers = {'Content-Type': content_type}
files = {'media': open(filePath, "rb")}
urlResp = requests.post(postUrl, files=files)
#print(urlResp.text)
return json.loads(urlResp.text)['media_id']
|
# Compatibility layer between Python 2 and Python 3
from __future__ import print_function
import sqlite3
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Reshape, GlobalAveragePooling1D
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D
from keras.utils import np_utils
def feature_normalize(dataset):
mu = np.mean(dataset, axis=0)
sigma = np.std(dataset, axis=0)
return (dataset - mu)/sigma
def show_confusion_matrix(validations, predictions):
matrix = metrics.confusion_matrix(validations, predictions)
plt.figure(figsize=(10, 8))
sns.heatmap(matrix,
cmap="coolwarm",
linecolor='white',
linewidths=1,
xticklabels=activity_map.values(),
yticklabels=activity_map.values(),
annot=True,
fmt="d")
plt.title("Confusion Matrix")
plt.ylabel("True Label")
plt.xlabel("Predicted Label")
plt.show()
def show_basic_dataframe_info(dataframe, preview_rows=20):
"""
This function shows basic information for the given dataframe
Args:
dataframe: A Pandas DataFrame expected to contain data
preview_rows: An integer value of how many rows to preview
Returns:
Nothing
"""
# Shape and how many rows and columns
print("Number of columns in the dataframe: %i" % (dataframe.shape[1]))
print("Number of rows in the dataframe: %i\n" % (dataframe.shape[0]))
print("First 20 rows of the dataframe:\n")
# Show first 20 rows
print(dataframe.head(preview_rows))
print("\nDescription of dataframe:\n")
# Describe dataset like mean, min, max, etc.
# print(dataframe.describe()
def read_data(file_path):
"""
This function reads the accelerometer data from a file
Args:
file_path: URL pointing to the CSV file
Returns:
A pandas dataframe
"""
column_names = ['user-id',
'activity',
'timestamp',
'x-axis',
'y-axis',
'z-axis']
df = pd.read_csv(file_path,
header=None,
names=column_names)
# Last column has a ";" character which must be removed ...
df['z-axis'].replace(regex=True,
inplace=True,
to_replace=r';',
value=r'')
# ... and then this column must be transformed to float explicitly
df['z-axis'] = df['z-axis'].apply(convert_to_float)
# This is very important otherwise the model will not fit and loss
# will show up as NAN
df.dropna(axis=0, how='any', inplace=True)
return df
def read_data_pamap(file_path, activity_list, column_list, limit=1000000000):
"""
This function reads the accelerometer data from a db
Args:
file_path: URL pointing to the DB file
Returns:
A pandas dataframe
"""
conn = sqlite3.connect(file_path)
# column_list_str = ", ".join(['"' + x +'"' for x in column_list])
training_records_list=[]
testing_records_list=[]
for activity_id in activity_list:
query = '''select %s from main where "activity_id"=%d and "sub_id" = 1 order by "index" limit %d'''%(column_list, activity_id, limit)
print(query)
df = pd.read_sql(query, conn)
mark_80 = int(len(df) * .8)
training_records = df.iloc[:mark_80, :]
testing_records = df.iloc[mark_80:, :]
training_records_list.append(training_records)
testing_records_list.append(testing_records)
training_return_df = pd.concat(training_records_list)
testing_return_df = pd.concat(testing_records_list)
# This is very important otherwise the model will not fit and loss
# will show up as NAN
training_return_df.dropna(axis=0, how='any', inplace=True)
testing_return_df.dropna(axis=0, how='any', inplace=True)
return training_return_df, testing_return_df
def convert_to_float(x):
try:
return np.float(x)
except:
return np.nan
def plot_axis(ax, x, y, title):
ax.plot(x, y)
ax.set_title(title)
ax.xaxis.set_visible(False)
ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])
ax.set_xlim([min(x), max(x)])
ax.grid(True)
def plot_activity(activity, data):
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3,
figsize=(15, 10),
sharex=True)
plot_axis(ax0, data['timestamp'], data['x-axis'], 'x-axis')
plot_axis(ax1, data['timestamp'], data['y-axis'], 'y-axis')
plot_axis(ax2, data['timestamp'], data['z-axis'], 'z-axis')
plt.subplots_adjust(hspace=0.2)
fig.suptitle(activity)
plt.subplots_adjust(top=0.90)
plt.show()
def create_segments_and_labels(df, m_feature_list, time_steps, step, label_name):
"""
This function receives a dataframe and returns the reshaped segments
of x,y,z acceleration as well as the corresponding labels
Args:
df: Dataframe in the expected format
time_steps: Integer value of the length of a segment that is created
Returns:
reshaped_segments
labels:
"""
# x, y, z acceleration as features
N_FEATURES = 9
# Number of steps to advance in each iteration (for me, it should always
# be equal to the time_steps in order to have no overlap between segments)
# step = time_steps
segments = []
labels = []
for i in range(0, len(df) - time_steps, step):
segment = []
for feature in m_feature_list:
segment.append(df[feature].values[i: i + time_steps])
# xs = df['x-axis'].values[i: i + time_steps]
# ys = df['y-axis'].values[i: i + time_steps]
# zs = df['z-axis'].values[i: i + time_steps]
# Retrieve the most often used label in this segment
label = stats.mode(df[label_name][i: i + time_steps])[0][0]
segments.append(segment)
labels.append(label)
# Bring the segments into a better shape
reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, time_steps, N_FEATURES)
labels = np.asarray(labels)
return reshaped_segments, labels
# ------- THE PROGRAM TO LOAD DATA AND TRAIN THE MODEL -------
# Set some standard parameters upfront
pd.options.display.float_format = '{:.1f}'.format
sns.set() # Default seaborn look and feel
plt.style.use('ggplot')
print('keras version ', keras.__version__)
# LABELS = ["Downstairs",
# "Jogging",
# "Sitting",
# "Standing",
# "Upstairs",
# "Walking"]
activity_map = {
# 0: 'transient',
1:'lying',
2:'sitting',
3:'standing',
4:'walking',
5:'running',
6:'cycling',
7:'Nordic walking',
9:'watching TV',
# 10:'computer work',
11:'car driving',
12:'ascending stairs',
13:'descending stairs',
# 16:'vacuum cleaning',
17:'ironing',
18:'folding laundry',
19:'house cleaning',
# 20:'playing soccer',
24:'rope jumping'
}
LABELS = activity_map.keys()
# The number of steps within one time segment
TIME_PERIODS = 200#80
# The steps to take from one segment to the next; if this value is equal to
# TIME_PERIODS, then there is no overlap between the segments
STEP_DISTANCE = 100#40
print("\n--- Load, inspect and transform data ---\n")
# Load data set containing all the data from csv
db_path = '/Users/kasun/projects/activity_capsule/data/pmap2.db'
feature_list_hand = ["acc_16_01_hand",
"acc_16_02_hand",
"acc_16_03_hand",
"gyr_01_hand",
"gyr_02_hand",
"gyr_03_hand",
"mag_01_hand",
"mag_02_hand",
"mag_03_hand",
]
feature_list_chest = ["acc_16_01_chest",
"acc_16_02_chest",
"acc_16_03_chest",
"gyr_01_chest",
"gyr_02_chest",
"gyr_03_chest",
"mag_01_chest",
"mag_02_chest",
"mag_03_chest",
]
feature_list_train = feature_list_hand
feature_list_chest = feature_list_hand
df_hand_train, df_hand_test = read_data_pamap(db_path, activity_map.keys(),"sub_id, activity_id," +", ".join(feature_list_hand), 10899)
df_chest_train, df_chest_test = read_data_pamap(db_path,activity_map.keys(),"sub_id, activity_id,"+", ".join(feature_list_chest), 10899)
df_train = df_chest_train
df_test = df_chest_test
# df_test = df_test2
# df_train.rename(inplace=True, columns={"sub_id": "user-id",
# "activity_id": "activity",
# "acc_16_01_hand": "x-axis",
# "acc_16_02_hand": "y-axis",
# "acc_16_03_hand": "z-axis"})
# df_test.rename(inplace=True, columns={"sub_id": "user-id",
# "activity_id": "activity",
# "acc_16_01_hand": "x-axis",
# "acc_16_02_hand": "y-axis",
# "acc_16_03_hand": "z-axis"})
# ankle_df = read_data_pamap(db_path,[4,5,12],"sub_id, activity_id, acc_16_01_ankle, acc_16_02_ankle, acc_16_03_ankle", 85000)
# chest_query = '''select %s from main where "activity_id" in (4,5,12)'''%("sub_id, activity_id, acc_16_01_chest, acc_16_02_chest, acc_16_03_chest")
# print(chest_query)
# chest_df = pd.read_sql(chest_query, PMAP_DB)
# # This is very important otherwise the model will not fit and loss
# # will show up as NAN
# chest_df.dropna(axis=0, how='any', inplace=True)
# chest_df.rename(inplace=True, columns={"sub_id":"user-id",
# "activity_id":"activity",
# "acc_16_01_chest":"x-axis",
# "acc_16_02_chest":"y-axis",
# "acc_16_03_chest":"z-axis"})
# ankle_query = '''select %s from main where "activity_id" in (4,5,12)'''%("sub_id, activity_id, acc_16_01_ankle, acc_16_02_ankle, acc_16_03_ankle")
# print(ankle_query)
# ankle_df = pd.read_sql(ankle_query, PMAP_DB)
# # This is very important otherwise the model will not fit and loss
# # will show up as NAN
# ankle_df.dropna(axis=0, how='any', inplace=True)
# ankle_df.rename(inplace=True, columns={"sub_id":"user-id",
# "activity_id":"activity",
# "acc_16_01_ankle":"x-axis",
# "acc_16_02_ankle":"y-axis",
# "acc_16_03_ankle":"z-axis"})
# Describe the data
# show_basic_dataframe_info(df, 20)
# df['activity'].value_counts().plot(kind='bar',
# title='Training Examples by Activity Type')
# plt.show()
# df['user-id'].value_counts().plot(kind='bar',
# title='Training Examples by User')
# plt.show()
# plot the accl signals
# for activity in np.unique(df["activity"]):
# subset = df[df["activity"] == activity][:180]
# plot_activity(activity, subset)
# Define column name of the label vector
LABEL = "ActivityEncoded"
# Transform the labels from String to Integer via LabelEncoder
le = preprocessing.LabelEncoder()
# Add a new column to the existing DataFrame with the encoded values
df_train[LABEL] = le.fit_transform(df_train["activity_id"].values.ravel())
df_test[LABEL] = le.fit_transform(df_test["activity_id"].values.ravel())
# chest_df[LABEL] = le.fit_transform(chest_df["activity"].values.ravel())
# ankle_df[LABEL] = le.fit_transform(ankle_df["activity"].values.ravel())
# %%
# current_df = wrist_df
print("\n--- Reshape the data into segments ---\n")
# Differentiate between test set and training set
#
# df_train['activity_id'].value_counts().plot(kind='bar',
# title='Training Examples by Activity Type')
# plt.show()
# df_train['user-id'].value_counts().plot(kind='bar',
# title='Training Examples by User')
# plt.show()
#
# df_test['activity_id'].value_counts().plot(kind='bar',
# title='testing Examples by Activity Type')
# plt.show()
#
# df_test['user-id'].value_counts().plot(kind='bar',
# title='testing Examples by User')
# plt.show()
# df_train = watch_df[watch_df['user-id'] > 1610] #1611 - 1651
# df_test = phone_df[phone_df['user-id'] <= 1610] #1600 - 1610
#
# df_train = pd.concat([phone_df[phone_df['user-id'] > 1610], watch_df[watch_df['user-id'] > 1610]]) #1611 - 1651
# df_test = pd.concat([phone_df[phone_df['user-id'] <= 1610], watch_df[watch_df['user-id'] <= 1610]]) #1600 - 1610
# Normalize features for training data set
for feature in feature_list_hand:
df_train[feature] = feature_normalize(df_train[feature])
for feature in feature_list_hand:
df_test[feature] = feature_normalize(df_test[feature])
# Round in order to comply to NSNumber from iOS
# df_train = df_train.round({'x-axis': 6, 'y-axis': 6, 'z-axis': 6})
# Reshape the training data into segments
# so that they can be processed by the network
x_train, y_train = create_segments_and_labels(df_train,
feature_list_hand,
TIME_PERIODS,
STEP_DISTANCE,
LABEL)
# %%
# Inspect x data
print('x_train shape: ', x_train.shape)
# Displays (20869, 40, 3)
print(x_train.shape[0], 'training samples')
# Displays 20869 train samples
# Inspect y data
print('y_train shape: ', y_train.shape)
# Displays (20869,)
# Set input & output dimensions
num_time_periods, num_sensors = x_train.shape[1], x_train.shape[2]
num_classes = le.classes_.size
print(list(le.classes_))
# Set input_shape / reshape for Keras
# Remark: acceleration data is concatenated in one array in order to feed
# it properly into coreml later, the preferred matrix of shape [80,3]
# cannot be read in with the current version of coreml (see also reshape
# layer as the first layer in the keras model)
input_shape = (num_time_periods*num_sensors)
x_train = x_train.reshape(x_train.shape[0], input_shape)
print('x_train shape:', x_train.shape)
# x_train shape: (20869, 120)
print('input_shape:', input_shape)
# input_shape: (120)
# Convert type for Keras otherwise Keras cannot process the data
x_train = x_train.astype("float32")
y_train = y_train.astype("float32")
# %%
# One-hot encoding of y_train labels (only execute once!)
y_train = np_utils.to_categorical(y_train, num_classes)
print('New y_train shape: ', y_train.shape)
# (4173, 6)
# %%
print("\n--- Create neural network model ---\n")
# 1D CNN neural network
model_m = Sequential()
model_m.add(Reshape((TIME_PERIODS, num_sensors), input_shape=(input_shape,)))
model_m.add(Conv1D(100, 10, activation='relu', input_shape=(TIME_PERIODS, num_sensors)))
model_m.add(Conv1D(100, 10, activation='relu'))
model_m.add(MaxPooling1D(4))
model_m.add(Conv1D(160, 10, activation='relu'))
model_m.add(Conv1D(160, 10, activation='relu'))
model_m.add(GlobalAveragePooling1D())
model_m.add(Dropout(0.8))
model_m.add(Dense(len(activity_map.keys()), activation='softmax'))
print(model_m.summary())
model_m.summary()
# Accuracy on training data: 99%
# Accuracy on test data: 91%
# %%
print("\n--- Fit the model ---\n")
# The EarlyStopping callback monitors training accuracy:
# if it fails to improve for two consecutive epochs,
# training stops early
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss', save_best_only=True),
keras.callbacks.EarlyStopping(monitor='accuracy', patience=1)
]
model_m.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# Hyper-parameters
BATCH_SIZE = 100
EPOCHS = 20
# Enable validation to use ModelCheckpoint and EarlyStopping callbacks.
history = model_m.fit(x_train,
y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
# callbacks=callbacks_list,
# validation_split=0.2,
verbose=1)
# %%
print("\n--- Learning curve of model training ---\n")
# summarize history for accuracy and loss
# plt.figure(figsize=(6, 4))
# plt.plot(history.history['accuracy'], "g--", label="Accuracy of training data")
# plt.plot(history.history['val_accuracy'], "g", label="Accuracy of validation data")
# plt.plot(history.history['loss'], "r--", label="Loss of training data")
# plt.plot(history.history['val_loss'], "r", label="Loss of validation data")
# plt.title('Model Accuracy and Loss')
# plt.ylabel('Accuracy and Loss')
# plt.xlabel('Training Epoch')
# plt.ylim(0)
# plt.legend()
# plt.show()
#%%
print("\n--- Check against test data ---\n")
# Normalize features for testing data set
# df_test['x-axis'] = feature_normalize(df_test['x-axis'])
# df_test['y-axis'] = feature_normalize(df_test['y-axis'])
# df_test['z-axis'] = feature_normalize(df_test['z-axis'])
#
# df_test = df_test.round({'x-axis': 6, 'y-axis': 6, 'z-axis': 6})
x_test, y_test = create_segments_and_labels(df_test,
feature_list_hand,
TIME_PERIODS,
STEP_DISTANCE,
LABEL)
# Set input_shape / reshape for Keras
x_test = x_test.reshape(x_test.shape[0], input_shape)
x_test = x_test.astype("float32")
y_test = y_test.astype("float32")
y_test = np_utils.to_categorical(y_test, num_classes)
score = model_m.evaluate(x_test, y_test, verbose=1)
print(model_m.metrics_names)
print(score)
print("\nAccuracy on test data: %0.2f" % score[1])
print("\nLoss on test data: %0.2f" % score[0])
# %%
print("\n--- Confusion matrix for test data ---\n")
y_pred_test = model_m.predict(x_test)
# Take the class with the highest probability from the test predictions
max_y_pred_test = np.argmax(y_pred_test, axis=1)
max_y_test = np.argmax(y_test, axis=1)
show_confusion_matrix(max_y_test, max_y_pred_test)
# %%
print("\n--- Classification report for test data ---\n")
print(classification_report(max_y_test, max_y_pred_test, target_names=activity_map.values()))
|
# -*- coding: utf-8 -*-
import pymysql
from .items import SinaNewsItem
import traceback
import logging
from Sina_News.middlewares import UrlFilterAndAdd, URLRedisFilter
import os
from twisted.enterprise import adbapi
logger = logging.getLogger(__name__)
checkFile = "isRunning.txt"
class SinaNewsPipeline(object):
commit_sql_str = '''insert into news1(title,date_time,content,url,author,source) values ("{title}","{date_time}","{content}","{url}","{author}","{source}");'''
# insert_url_sql = '''insert into total_url(url) values ("{url}");'''
# query_url_str = '''select * from total_url where url="{url}";'''
def __init__(self, pool):
self.dupefilter = UrlFilterAndAdd()
self.dbpool = pool
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
@classmethod
def from_settings(cls, settings):
dbparms = dict(
host=settings.get("MYSQL_HOST"),
port=settings.get("MYSQL_PORT"),
db=settings.get("MYSQL_DBNAME"),
user=settings.get("MYSQL_USER"),
passwd=settings.get("MYSQL_PASSWD"),
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor,
use_unicode=True
)
dbpool = adbapi.ConnectionPool("pymysql", **dbparms)
return cls(dbpool)
def process_item(self, item, spider):
self.dupefilter.add_url(item['url'])
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error, item, spider)
def handle_error(self, failure, item, spider):
# 处理异步插入的异常
print(failure)
def do_insert(self, cursor, item):
# 执行具体的插入
# 根据不同的item 构建不同的sql语句并插入到mysql中
sqltext = self.commit_sql_str.format(
title=pymysql.escape_string(item["title"]),
date_time=item["date_time"],
content=pymysql.escape_string(item["content"]),
url=item["url"],
author=item["author"],
source=item["source"]
)
cursor.execute(sqltext)
def open_spider(self, spider):
f = open(checkFile, "w")
f.close()
def close_spider(self, spider):
isFileExsit = os.path.isfile(checkFile)
if isFileExsit:
os.remove(checkFile)
|
class Solution:
# @param A : list of list of integers
# @param B : integer
# @return an integer
def searchMatrix(self, A, B):
rows = len(A)
cols = len(A[0])
start = 0
end = rows * cols - 1
while start <= end:
mid = (start + end) / 2
num = A[mid / rows][mid % rows]
if num == B:
return 1
elif num < B:
start = mid + 1
elif num > b:
end = mid - 1
return 0
|
#student I.D: 15315901
"""A class for Gaussian integers
A Gaussing integer is a complex number of the form a+bi where a and b are integers"""
"""write the code for the class Gaussian which models Gaussian integers. Your code should have:
1. A constructor so that we can create objects of the class Gaussian as follows:
z = Gaussian(4, -2)
The arguments to the constructor should be converted to ints using the int function.
2. A string representation using the __str__ method.
3. Methods __eq__ and __neq___ for equality testing.
4. Methods for doing arithmetic on Gaussian integers, in particular The following methods should be implemented:
5. __add__ and __radd__ for addition.
6. __sub__ and __rsub__ for subtraction.
7. __mul__ and __rmul__ for multiplication.
8. For division, we cannot expect the quotient of two Gaussian integers to be a new Gaussian integer, so we will have to implement the methods __floordiv__ (for ‘integer’ division) and __mod__ for remainder.
For each of the arithmetic operations, one argument is allowed to be an int. The ‘integer’ division is defined in the following way:
if z = a + bi and w = c + di, then we can form the quotient in the complex numbers:
z/w = (a + bi) / (c + di) = ((a + bi)(c − di)) /(c^2 + d^2) = (ac + bd)/(c^2 + d^2) + ((ad − bc)/ (c^2 + d^2)) i
Now we let the ‘integer’ quotient of z and w be q = m+ni, where m is the closest
integer to (ac+bd)/(c^2 +d^2) and n is the closest integer to
(ad−bc)/(c^2+d^2)
Then we can define the remainder by as r = z − q*w."""
class Gaussian:
"""A class for modeling gaussian integers"""
def __init__(self, a, b):
"""initialises the two values for the gaussian integer"""
self.a=int(a)
self.b=int(b)
def __str__(self):
"""for representing the gaussian integer as a string"""
if self.a>=0 and self.b>=0:
return("{}+{}i".format(self.a, self.b))
if self.a>0 and self.b<0:
return("{}{}i".format(self.a, self.b))
if self.a<0 and self.b>0:
return("{}+{}i".format(self.a, self.b))
if self.a<0 and self.b<0:
return("{}{}i".format(self.a, self.b))
def __eq__(self, other):
"""for testing if two gaussian integers are the same"""
return self.a == other.a and self.b == other.b
def __add__(self, other):
"""[a+bi]+[c+di]=[a+c,bi+di]"""
if type(other) == int or type(other) == float:
other = Gaussian(other,other)
l = self.a + other.a
r = self.b + other.b
return Gaussian(l,r)
def __radd__(self, other):
if type(other) == int or type(other) == float:
other = Gaussian(other,other)
return other.__add__(self)
def __sub__(self, other):
"""[a+bi]-[c+di]=[a-c,bi-di]"""
if type(other)==int or type(other)==float:
other=Gaussian(other, other)
l=self.a - other.a
r=self.b - other.b
return Gaussian(l,r)
def __rsub__(self, other):
if type(other) == int or type(other) == float:
other = Gaussian(other, other)
return other.__sub__(self)
def __mul__(self, other):
"""[a+bi]*[c+di]=[((a*c)-(b*d))+((a*d)+(b*d))]"""
if type(other)==int or type(other)==float:
other=Gaussian(other, other)
l=((self.a * other.a)-(self.b*other.b))
r=((self.a*other.b)+(self.b*other.a))
return Gaussian(l,r)
def __rmul__(self, other):
if type(other)==int or type(other)==float:
other=Gaussian(other,other)
return other.__mul__(self)
def __floordiv__(self, other):
"""
if
z = a + bi
and
w = c + di
then we can form the quotient in the complex numbers:
z/w = (a + bi) / (c + di) = ((a + bi)(c − di)) /(c^2 + d^2) = (ac + bd)/(c^2 + d^2) + ((ad − bc)/ (c^2 + d^2))i
Now we let the ‘integer’ quotient of z and w be q = m+ni, where m is the closest
integer to (ac+bd)/(c^2 +d^2) and n is the closest integer to (ad−bc)/(c^2+d^2)
Then we can define the remainder by as r = z − q*w.
"""
if type(other)==int or type(other)==float:
other=Gaussian(other,other)
l=((self.a*other.a)+(self.b*other.b))/((other.a**2)+(other.b**2))
r=((self.a*other.b)-(self.b*other.a))/((other.a**2)+(other.b**2))
m=int(l)
n=int(r)
Gaussian.q=Gaussian(m,n)
return Gaussian.q
def __mod__(self, other):
"""for finding the remainder for division
r= z - q*w, z=a+bi, q=m+ni, w=c+di"""
if type(other)==int or type(other)==float:
other=Gaussian(other, other)
return self - (Gaussian.q * other)
def test_equality():
assert Gaussian.__eq__(Gaussian(1,1),Gaussian(3,5))==False
def test_addition():
assert Gaussian.__add__(Gaussian(1,1),Gaussian(3,5)) == Gaussian(4,6)
def test_subtraction():
assert Gaussian.__sub__(Gaussian(1,1),Gaussian(3,5)) == Gaussian(-2,-4)
def test_multiplication():
assert Gaussian.__mul__(Gaussian(1,1),Gaussian(3,5)) == Gaussian(-2,8)
def test_floordivision():
assert Gaussian.__floordiv__(Gaussian(1,1),Gaussian(3,5)) == Gaussian(0,0)
def test_mod():
assert Gaussian.__mod__(Gaussian(1,1),Gaussian(3,5)) == Gaussian(1,1)
first = Gaussian(1,1)
second = Gaussian(3,5)
print(first,",", second)
print("{} == {} = {}".format(first, second, first==second))
print("{} + {} = {}".format(first,second,first+second))
print("{} - {} = {}".format(first,second, first-second))
print("{} * {} = {}".format(first, second, first*second))
print("{} // {} = {}".format(first, second, first//second))
print("{} % {} = {}".format(first, second, first%second))
test_equality()
test_addition()
test_subtraction()
test_multiplication()
test_floordivision()
test_mod()
|
from datetime import datetime
from pydantic import BaseModel, EmailStr
class UserBase(BaseModel):
username: str
avatar: str = None
# email: EmailStr
class UserCreate(UserBase):
nickname: str
password: str
class UserActivated(UserBase):
id: int
is_active: bool
email: EmailStr
class User(UserBase):
id: int
nickname: str
class Config:
orm_mode = True
|
import re
import sys
import xbmcaddon
from resources.lib import scraper, xbmc_handler
### get addon info
__addon__ = xbmcaddon.Addon()
__addonid__ = __addon__.getAddonInfo('id')
__addonidint__ = int(sys.argv[1])
def main(params):
# See if page number is set, or set it to 1
try:
page_no = int(params['page_no'])
except:
page_no = 1
contents = scraper.open_page('http://comment.rsablogs.org.uk/videos/page/'+str(page_no))
video_list = scraper.scrape_site(contents)
for video in video_list:
xbmc_handler.add_video_link(video['title'], video['url'])
xbmc_handler.add_next_page(page_no + 1)
xbmc_handler.end_directory()
if __name__ == '__main__':
params = xbmc_handler.get_params()
main(params)
|
import tensorflow as tf
import tensorflow.keras as keras
model = keras.applications.vgg16.VGG16(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
for layer in model.layers:
layer.trainable = False
x = model.output
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(30, activation="relu")(x)
x = keras.layers.Dense(30, activation="relu")(x)
x = keras.layers.Dense(30, activation="relu")(x)
x = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.models.Model(inputs=model.input, outputs=x)
model.summary()
|
from mod_base import*
class Part(Command):
"""Part list of channels."""
def run(self, win, user, data, caller=None):
args = Args(data)
if not args: args = [win.GetName()]
self.bot.PartChannels(args)
module = {
"class": Part,
"type": MOD_COMMAND,
"level": 2,
"zone":IRC_ZONE_CHANNEL,
"aliases":["p"]
}
|
import pymysql
import csv
import codecs
config = {'user': 'root',
'password':'',
'port': 3306,
'host': '127.0.0.1',
'db': 'book',
'charset': 'utf8'}
def get_conn():
conn = pymysql.connect(user=config['user'],
password=config['password'],
port=config['port'],
host=config['host'],
db=config['db'],
charset=config['charset'])
return conn
def insert(cur, sql, args):
cur.execute(sql, args)
def truncate(cur, sql, args):
cur.execute(sql, args)
def read_csv_to_mysql(filename,dbname):
with codecs.open(filename=filename, mode='r', encoding='utf-8') as f:
reader = csv.reader(f)
head = next(reader)
conn = get_conn()
cur = conn.cursor()
sql = 'truncate table booktuijian'
truncate(cur,sql=sql,args=None)
sql = 'insert into booktuijian values(%s,%s,%s)'
for item in reader:
if item[1] is None or item[1] == '': # item[1]作为唯一键,不能为null
continue
args = tuple(item)
print(args)
insert(cur, sql=sql, args=args)
conn.commit()
cur.close()
conn.close()
if __name__ == '__main__':
read_csv_to_mysql("datasets\\new\\booktuijian.csv")
|
# Generated by Django 2.2.13 on 2020-07-10 06:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0035_auto_20200710_1132'),
]
operations = [
migrations.DeleteModel(
name='ContactForm',
),
migrations.DeleteModel(
name='ContactMessage',
),
]
|
# math2: maximum minimum and factor
# zio800
from math import *
# maximum and minimum
def maxmin(lst):
nummax = lst[0]
nummin = lst[0]
for item in lst:
if item < nummin:
nummin = item
if item > nummax:
nummax = item
print('max = ', nummax, ', min = ', nummin)
# multiples of number in range
def factor(fac, first, last):
multiple = []
for item in range(first, last):
if item % fac == 0:
multiple.append(item)
print('number of multiples = ', len(multiple))
print('multiples: ', multiple)
print('sum of multiples = ', sum(multiple))
# calculate
factor(9, 1, 100)
lst1 = [1, 2, 3, 4, 5]
maxmin(lst1)
|
from django.db import models
class myform(models.Model):
data=models.CharField(max_length=20000)
title=models.CharField(max_length=100)
slabel=models.CharField(max_length=100)
class values(models.Model):
lform=models.ForeignKey(myform)
fieldId=models.CharField(max_length=10)
label=models.CharField(max_length=100)
value=models.CharField(max_length=100)
|
import datetime
from typing import List
import dateutil.tz
from flask import Blueprint, render_template, request, redirect, url_for, abort, flash
from . import db, model
from .model import Survey
import flask_login
import json
bp = Blueprint("main", __name__)
@bp.route("/")
@flask_login.login_required
def index():
current_user = flask_login.current_user
# loading the surveys created by the current user
surveys = model.Survey.query.filter_by(user=current_user).order_by(model.Survey.timestamp.desc()).all()
return render_template("main/index.html", surveys=surveys)
@bp.route("/change_state", methods=["POST"])
@flask_login.login_required
def change_state():
survey = model.Survey.query.filter_by(id=request.form.get("survey_id")).first()
print(request.form.get("state"))
if request.form.get("state") == "new":
survey.state = model.SurveyState.new
elif request.form.get("state") == "online":
survey.state = model.SurveyState.online
elif request.form.get("state")=="closed":
survey.state = model.SurveyState.closed
db.session.commit()
current_user = flask_login.current_user
# loading the surveys created by the current user
surveys = model.Survey.query.filter_by(user=current_user).order_by(model.Survey.timestamp.desc()).all()
return render_template("main/index.html", surveys=surveys)
@bp.route("/remove_survey/<int:survey_id>")
@flask_login.login_required
def remove_survey(survey_id):
# removing the selected survey
model.Survey.query.filter_by(id=survey_id).delete()
db.session.commit()
current_user = flask_login.current_user
# loading the surveys created by the current user
surveys = model.Survey.query.filter_by(user=current_user).order_by(
model.Survey.timestamp.asc()).all()
return render_template("main/index.html", surveys=surveys)
@bp.route("/add_survey")
@flask_login.login_required
def add_survey():
return render_template("main/add_survey.html")
@bp.route("/edit_survey/<int:survey_id>")
@flask_login.login_required
def edit_survey(survey_id):
current_user = flask_login.current_user
survey = model.Survey.query.filter_by(id=survey_id).first()
if not survey:
abort(404, "Suervey id {} doesn't exist.".format(survey_id))
if current_user.id != survey.user_id:
abort(404, "Survey %s is not editable through this user, try to login with another account."%(survey.title))
if survey.state != model.SurveyState.new:
abort(404, "Survey %s cannot be edited right now."%(survey.title))
return render_template("main/edit_survey.html", survey=survey)
@bp.route("/add_survey", methods=["POST"])
@bp.route("/edit_survey", methods=["POST"])
@flask_login.login_required
def post_survey():
# requesting data from the forms
dic = request.values
survey_id = request.form.get('survey_id')
if survey_id:
# if it is an edited survey, remove that survey
model.Survey.query.filter_by(id=survey_id).delete()
# getting the survey data from the forms
text_title = dic["title"]
text_description = dic["description"]
# getting the questions data from the forms
questions = [i[1] for i in dic.items() if "statement" in i[0]]
question_types = [i[1] for i in dic.items() if "type" in i[0]]
options = []
# getting the options data from the forms
for q in range(1, len(questions)+1):
options += [[i[1] for i in dic.items() if "option"+str(q) in i[0]]]
# creating the survey instance
survey = model.Survey(
user=flask_login.current_user,
title=text_title,
description=text_description,
state=model.SurveyState.new,
timestamp=datetime.datetime.now(dateutil.tz.tzlocal())
)
db.session.add(survey)
for i in range(len(questions)):
# creating the question instances
text_statement = questions[i]
question_type = question_types[i]
question = model.Question(
survey=survey,
statement=text_statement,
type=question_type,
position=i
)
db.session.add(question)
if question_type == "3" or question_type == "4":
for j, text_option in enumerate(options[i]):
# creating the option instances
option = model.Option(
question=question,
statement=text_option,
position=j
)
db.session.add(option)
# uploading to the database and redirecting
db.session.commit()
return redirect(url_for("main.index"))
@bp.route("/answer/<int:survey_id>")
def answer(survey_id):
survey = model.Survey.query.filter_by(id=survey_id).first()
if not survey:
abort(404, "Survey id {} doesn't exist.".format(survey_id))
if survey.state != model.SurveyState.online:
abort(404, "The survey %s cannot be answered, it is currently closed."%(survey.title))
return render_template("main/answer.html", survey=survey)
@bp.route("/answer/<int:survey_id>", methods=["POST"])
def post_answer(survey_id):
# creating the survey instance
survey = model.Survey.query.filter_by(id=survey_id).first()
survey_answer = model.Sanswer(
survey=survey,
timestamp=datetime.datetime.now(dateutil.tz.tzlocal())
)
db.session.add(survey_answer)
# storing the requested data from the forms
for i, question in enumerate(survey.questions):
# creating the answers instances
if question.type=="1":
question_answer = model.Qanswer(
sanswer=survey_answer,
question=question,
text=request.form.get(str(i))
)
db.session.add(question_answer)
elif question.type == "2":
if request.form.get(str(i))=="":
question_answer = model.Qanswer(
sanswer=survey_answer,
question=question,
)
else:
question_answer = model.Qanswer(
sanswer=survey_answer,
question=question,
integer=request.form.get(str(i))
)
db.session.add(question_answer)
else:
options_inds = request.form.getlist(str(i))
for option_ind in options_inds:
option = model.Option.query.filter_by(
question=question, position=int(option_ind)).first()
question_answer = model.Qanswer(
sanswer=survey_answer,
question=question,
option=option,
)
db.session.add(question_answer)
# uploading to the database and redirecting
db.session.commit()
return render_template("main/congratulations.html", survey=survey)
@bp.route("/view_answers/<int:survey_id>")
@flask_login.login_required
def view_answers(survey_id):
# rendering the view answers page
survey = model.Survey.query.filter_by(id=survey_id).first()
current_user = flask_login.current_user
if not survey:
abort(404, "Post id {} doesn't exist.".format(survey_id))
if current_user.id != survey.user_id:
abort(404, "The answers of this survey are private, try with a diferent account")
if survey.state == model.SurveyState.new:
abort(404, "The answers of survey {} are not accesible".format(survey.title))
# creating arrays to store the answers and the types of questions
answers = []
question_types = []
question_statements = []
# storing the answers depending of the type each question
for question in survey.questions:
question_answers = []
question_types += [question.type]
question_statements += [question.statement]
if question.type == "1":
for answer in question.q_answers:
question_answers += [answer.text]
answers += [question_answers]
if question.type == "2":
for answer in question.q_answers:
question_answers += [[answer.integer]]
answers += [question_answers]
if question.type == "3" or question.type == "4":
for answer in question.q_answers:
option = model.Option.query.filter_by(
id=answer.question_option_id).first()
option_text = option.statement
question_answers += [option_text]
options_labels = []
[options_labels.append(
i) for i in question_answers if i not in options_labels]
options_count = [question_answers.count(i) for i in options_labels]
answers += [[[i, j] for i, j in zip(options_labels, options_count)]]
return render_template("main/view_answers.html", survey=survey, answers=json.dumps(answers),
qtypes=json.dumps(question_types), qstatements=json.dumps(question_statements))
# @bp.route("/user/<int:user_id>")
# @flask_login.login_required
# def user(user_id):
# user = model.User.query.filter_by(id=user_id).first()
# messages = model.Survey.query.filter_by(user=user).order_by(model.Survey.timestamp.desc()).all()
# if not user:
# abort(404, "User id {} doesn't exist.".format(user_id))
# return render_template("main/profile.html", posts=messages)
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
from linearmodels import RandomEffects
from statsmodels.regression.rolling import RollingOLS
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from statsmodels.iolib.summary2 import summary_col
from stargazer.stargazer import Stargazer
os.chdir("/Users/harrynnh/workspace/misc/rescorptrans_ERC/")
erc_ibes_df = pd.read_feather("data/generated/erc_ibes.feather")
comp_df = pd.read_feather("data/generated/comp_clean.feather")
# Rolling regression CAR ~ UE
def rolling_reg(df):
formula = "cumret ~ earn_sur"
mod = RollingOLS.from_formula(formula, data=df, window=12)
rres = mod.fit()
df["roll_coef"] = rres.params.earn_sur
return df
roll_res = erc_ibes_df.groupby("gvkey").apply(rolling_reg).dropna()
rolling_erc = roll_res.roll_coef.mean()
# Cross sectional ERC with a single regression
def cs_reg(df):
formula = "cumret ~ earn_sur"
mod = smf.ols(formula, data=df)
res = mod.fit()
return res
pool_df = erc_ibes_df.loc[
erc_ibes_df["year"] > 2017, ["gvkey", "year", "cumret", "earn_sur"]
]
pool_reg = cs_reg(pool_df)
print(pool_reg.summary())
cs_erc = pool_reg.params.earn_sur
# Barplot for 2 ERCs
# print(plt.style.available)
# plt.style.use('seaborn')
plt.xkcd()
plt.bar(["Rolling ERC", "Pooled ERC"], [rolling_erc, cs_erc])
plt.xlabel("Methods")
plt.ylabel("ERC")
plt.title("Rolling firm specific ERC and pooled ERC")
plt.tight_layout()
plt.savefig("output/erc_barplot.eps", format="eps")
plt.show()
# What can I do with the firm specific ERCs?
firm_erc = (
roll_res.loc[roll_res["year"] > 2017, ["gvkey", "roll_coef"]]
.groupby("gvkey")
.agg(np.mean)
)
plt.xkcd()
fig_hist = sns.histplot(data=firm_erc, x="roll_coef", kde=True, bins=35)
fig_hist.set(xlabel="ERC", ylabel="Count", title="Firm-specific ERC distribution")
plt.tight_layout()
plt.savefig("output/erc_coef_dist.eps", format="eps")
plt.show()
time_erc = (
roll_res.loc[roll_res["year"] > 2017, ["year", "roll_coef"]]
.groupby("year")
.agg(np.mean)
)
plt.bar(["2018", "2019"], time_erc["roll_coef"])
plt.show()
# Plot pooled regression
# sns.set_theme(color_codes=True)
plt.xkcd()
fig_reg = sns.regplot(x="earn_sur", y="cumret", data=erc_ibes_df)
fig_reg.set(
xlabel="Earnings surprise",
ylabel="3-day cumulative return",
title="Earnings return relation",
)
plt.tight_layout()
plt.savefig("output/erc_regplot.eps", format="eps")
plt.show()
# Determinants of ERCs
erc_ibes_df = erc_ibes_df.assign(yq=erc_ibes_df["date"].dt.to_period("Q")).drop(
"date", axis=1
)
erc_comp_df = pd.merge(
comp_df,
erc_ibes_df.loc[erc_ibes_df["year"] > 2017],
how="left",
on=["gvkey", "yq"],
).dropna()
det_erc_res = smf.ols(
"cumret ~ earn_sur + ln_ta + earn_sur*ln_ta + mtb + earn_sur*mtb", data=erc_comp_df
).fit()
erc_det_res = summary_col(
[pool_reg, det_erc_res],
stars=True,
float_format="%0.2f",
info_dict={"R-squared": lambda x: "{:.2f}".format(x.rsquared)},
).as_latex()
# f = open("output/erc_table.tex", "w")
# f.write(erc_det_res)
# f.close()
|
import requests
url = "http://google.com"
headers = {"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.68"}
res = requests.get(url, headers = headers)
res.raise_for_status()
print(len(res.text))
with open("myGoogle.html", "w", encoding="utf8") as f:
f.write(res.text)
|
import random
import physics as phy
from math import pow, sqrt
import graphics
class Moon:
def __init__(self, h, s):
self.height = h
self.size = s
self.angle = random.randint(0,399)
self.position = 0, 0
self.last_move = None
self.update_pos()
self.tower = None
self.radius = 2 * s + 4
self.mass = pow(self.radius, 2)
self.period = 6.28 * pow(h, 1.5)/sqrt(phy.GRAVITY_CONSTANT * phy.PLANET_MASS)
self.image = graphics.Circle(graphics.Point(self.position[0] + 400, self.position[1] + 400), self.radius)
self.image.setFill("Grey")
def update_pos(self):
old_position = self.position
self.position = self.height * phy.approx_cos(self.angle), self.height * phy.approx_cos(self.angle + 300)
self.last_move = phy.vec_sum(self.position, phy.vec_scale(old_position, -1))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-09-07 13:51
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('garmin', '0006_auto_20170907_0934'),
]
operations = [
migrations.AlterField(
model_name='usergarmindataactivity',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='usergarmindatabodycomposition',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='usergarmindatadaily',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='usergarmindataepoch',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='usergarmindatamanuallyupdated',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
migrations.AlterField(
model_name='usergarmindatasleep',
name='data',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
]
|
from vtk_rw import read_vtk, write_vtk
import numpy as np
compartment = 'upper'
hemis = ['lh', 'rh']
vtk_file = '/scr/ilz3/myelinconnect/new_groupavg/profiles/%s/%s/%s_lowres_new_avgsurf_groupdata.vtk'
avg_npy_file = '/scr/ilz3/myelinconnect/new_groupavg/profiles/%s/%s_group_avg_profiles_%s.npy'
#avg_vtk_file = '/scr/ilz3/myelinconnect/new_groupavg/profiles/%s/%s_group_avg_profiles_%s.vtk'
both_hemi_npy_file = '/scr/ilz3/myelinconnect/new_groupavg/profiles/%s/both_group_avg_profiles_%s.npy'
both_hemis = []
for hemi in hemis:
avg_list = []
group_list = []
for pro in range(11):
v, f, d = read_vtk(vtk_file%(compartment, str(pro), hemi))
avg_list.append(np.mean(d, axis=1))
avg_array = np.asarray(avg_list).T
both_hemis.append(avg_array)
np.save(avg_npy_file%(compartment, hemi, compartment), avg_array)
#write_vtk(avg_vtk_file%(compartment, hemi, compartment), v,f, data=avg_array)
#print 'ready'
both = np.concatenate(both_hemis)
np.save(both_hemi_npy_file%(compartment, compartment), both)
|
from django.http import HttpResponse
import os
import sys
import dotenv
from linebot import LineBotApi, WebhookParser
from linebot.models import TextSendMessage
dotenv.load_dotenv()
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_access_token == None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
def send_reply_message(reply_token, text):
line_bot_api.reply_message(reply_token, TextSendMessage(text = text))
return HttpResponse('OK')
def send_push_message(user_id, text):
line_bot_api.push_message(user_id, TextSendMessage(text = text))
return HttpResponse('OK')
|
# -*- coding: utf-8 -*-
import csv
import scrapy
import os
class AppradiofmSpider(scrapy.Spider):
name = 'appradiofm'
allowed_domains = ['appradiofm.com']
start_urls = ['http://appradiofm.com/by-country/']
def parse(self, response):
datas = response.xpath('.//*[@class="col s12 m6 l4 margin-all"]/a').extract()
for data in datas:
sel = scrapy.Selector(text=data)
link = sel.xpath('.//a/@href').extract_first()
country = sel.xpath('.//*[@class="title titlebylist"]/text()').extract_first()
yield scrapy.Request(response.urljoin(link),callback=self.getcountry,meta={
'country':country
})
nextlink = response.xpath('.//a[contains(.,"Next")]/@href').extract_first()
if nextlink:
yield scrapy.Request(nextlink.replace('\\\\','//'),callback=self.parse)
def getcountry(self,response):
links = response.xpath('.//*[@class="col s12 m6 l4 margin-all"]/a/@href').extract()
for link in links:
yield scrapy.Request(link,callback=self.getdata,meta={
'country':response.meta.get('country')
})
nextlink = response.xpath('.//a[contains(.,"Next")]/@href').extract_first()
if nextlink:
yield scrapy.Request(nextlink.replace('\\\\','//'),callback=self.getcountry,meta={
'country':response.meta.get('country')
})
def getdata(self,response):
title = response.xpath('.//h3[@class="player-heading-new"]/text()').extract_first()
genres = response.xpath('.//th[contains(.,"Genres")]/following-sibling::td/text()').extract_first()
language = response.xpath('.//th[contains(.,"Language")]/following-sibling::td/text()').extract_first()
location = response.xpath('.//th[contains(.,"Location")]/following-sibling::td/text()').extract_first()
website = response.xpath('.//th[contains(.,"Website")]/following-sibling::td/a/text()').extract_first()
callsign = response.xpath('.//th[contains(.,"CallSign")]/following-sibling::td/text()').extract_first()
frequency = response.xpath('.//th[contains(.,"Frequency")]/following-sibling::td/text()').extract_first()
type = response.xpath('.//th[contains(.,"Type")]/following-sibling::td/text()').extract_first()
if "appradiofm.csv" not in os.listdir(os.getcwd()):
with open("appradiofm.csv","a") as f:
writer = csv.writer(f)
writer.writerow(['country','title','genres','language','location','website','callsign','frequency','type'])
with open("appradiofm.csv","a") as f:
writer = csv.writer(f)
writer.writerow([response.meta.get('country'),title,genres,language,location,website,callsign,frequency,type])
print([response.meta.get('country'),title,genres,language,location,website,callsign,frequency,type])
|
class DataAugmentation(object):
""" Data Augmentation.
Base class for applying common real-time data augmentation.
This class is meant to be used as an argument of `input_data`. When training
a model, the defined augmentation methods will be applied at training
time only. Note that DataPreprocessing is similar to DataAugmentation,
but applies at both training time and testing time.
Arguments:
None
Parameters:
methods: `list of function`. The augmentation methods to apply.
args: A `list` of arguments list to use for these methods.
"""
def __init__(self):
self.methods = []
self.args = []
def apply(self, batch):
for i, m in enumerate(self.methods):
if self.args[i]:
batch = m(batch, *self.args[i])
else:
batch = m(batch)
return batch
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import numpy as np
from math import *
theta = 0
alpha = 0
def init():
glClearColor(0.051, 0.051, 0.051, 0)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
gluPerspective(90, 1, 1, 3050)
gluLookAt(50, 10, 100, 0, 0, 0, 0, 1, 0)
# glEnable(GL_DEPTH_TEST)
zx=0
def SolarSystem():
glClear(GL_COLOR_BUFFER_BIT )
global zx
global theta
global alpha
glLoadIdentity()
glColor(1,1,1)
glTranslate(0, 0, 0)
glRotate(zx,0,0,1)
glTranslate(0,0,0)
glutWireSphere(4, 10, 10)
glLoadIdentity()
glColor(0,0,0)
glTranslate(0, 0, 0)
glRotate(zx,0,0,1)
glTranslate(0,0,0)
glutSolidSphere(4, 30, 30) # el nayzak
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(1, 1, 0)
glutSolidSphere(4, 30, 30) # Sun
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(0.7, 0.5, 0.5)
glRotate(-theta, 0, 1, 0)
glTranslate(5.5, 0, 0)
glRotate(theta * 1.5, 0, 1, 0)
glutSolidSphere(0.5, 10, 10) # 3tard
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 7.5, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(1, 0.7, 0)
glRotate(-theta / 1.2, 0, 1, 0)
glTranslate(7.5, 0, 0)
glRotate(theta * 1.5, 0, 1, 0)
glutSolidSphere(0.75, 20, 20) # elzohra
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 11, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glRotate(-theta / 1.5, 0, 1, 0)
glTranslate(11, 0, 0)
glColor(0, 0.5, 1)
glScale(2.5,0.5,2.5)
glutSolidSphere(1,20,20) # Earth
#glRotate(theta, 0, 1, 0)
#glutSolidSphere(1, 30, 30) # Earth
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glRotate(-theta / 1.5, 0, 1, 0)
glTranslate(11, 0, 0)
glColor(0.8, 0.8, 0.8)
glRotate(-theta, 0, 1, 1)
glRotate(20, 0, 0, 1)
glTranslate(1.5, 0, 0)
glRotate(theta * 4, 0, 1, 0)
glutSolidSphere(0.25, 15, 15) # Moon
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 15, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(1, 0.2, 0)
glRotate(-theta / 1.7, 0, 1, 0)
glTranslate(15, 0, 0)
glRotate(theta * 1.5, 0, 1, 0)
glutSolidSphere(0.6, 10, 10) # Mars
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 20, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(1, 0.1, 0)
glRotate(-theta / 2, 0, 1, 0)
glTranslate(20, 0, 0)
glRotate(theta * 1.5, 0, 1, 0)
glutSolidSphere(2.7, 30, 25) # Jupiter
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 26, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(0.8, 0, 0)
glRotate(-theta / 2.5, 0, 1, 0)
glTranslate(26, 0, 0)
glRotate(theta * 1.8, 0, 1, 0)
glutSolidSphere(2.5, 30, 25) # Saturn
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
glColor(0.5,0.5,0.5)
glRotate(-theta/2.5,0,1,0)
glTranslate(26,0,0)
glRotate(-theta,0,1,0)
glRotate(30, 0, 1, 0)
glRotate(90, 1, 0, 0)
glScale(1, 1, 0.01)
glutSolidTorus(0.4,3.2,20,20) #saturn belt
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 35, 50, 50)
glLoadIdentity()
glRotate(zx,0,1,0)
glTranslate(50,0,0)
# glScale(0.2,0.2,0.2)
glColor(0.0, 0.5, 0.7)
glRotate(-theta / 3, 0, 1, 0)
glTranslate(35, 0, 0)
glRotate(theta * 1.8, 0, 1, 0)
glutSolidSphere(1.2, 30, 25)
glLoadIdentity()
glColor(1,1,1)
glRotate(zx, 0, 1, 0)
glTranslate(50, 0, 0)
glRotate(90,1,0,0)
glutSolidTorus(0.01, 31, 50, 50)
glLoadIdentity()
# glScale(0.2,0.2,0.2)
glColor(0.1, 0.1, 0.7)
glRotate(-theta / 2.8, 0, 1, 0)
glTranslate(31, 0, 0)
glRotate(theta * 1.8, 0, 1, 0)
glutSolidSphere(1.5, 30, 25)
theta = theta + 0.1
alpha = alpha + 0.01
zx=zx+0.01
def draw():
SolarSystem()
glutSwapBuffers()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE)
glutInitWindowSize(700, 700)
glutCreateWindow(b"my Solar... mine")
init()
glutDisplayFunc(draw)
glutIdleFunc(draw)
glutMainLoop()
|
import numpy as np
import sys
import os
import graphlab
from preprocess import *
from kmeans import *
from visualization import *
# load wikipedia data
wiki = graphlab.SFrame('../../data/people_wiki.gl/')
# preprocess TF IDF structure
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
# transform into a sparse matrix
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
# then normalize
tf_idf = normalize(tf_idf)
# test kmeans with 3 cluster centers
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(tf_idf, k, seed=0)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400, record_heterogeneity=heterogeneity, verbose=True)
# plot heterogeneity of 3 clusters on wiki data
plot_heterogeneity(heterogeneity, k)
# print each cluster size
np.bincount(cluster_assignment)
# test kmeans with 10 cluster centers and different seeds
k = 10
heterogeneity = {}
import time
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = get_initial_centroids(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
print 'Max number: ', np.max(np.bincount(cluster_assignment))
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
end = time.time()
print(end-start)
# load preprocessed kmeans results
filename = '../../data/kmeans-arrays.npz'
heterogeneity_values = []
k_list = [2, 10, 25, 50, 100]
# plot K vs heterogeneity value
if os.path.exists(filename):
arrays = np.load(filename)
centroids = {}
cluster_assignment = {}
for k in k_list:
print k
sys.stdout.flush()
centroids[k] = lambda k=k: arrays['centroids_{0:d}'.format(k)]
cluster_assignment[k] = lambda k=k: arrays['cluster_assignment_{0:d}'.format(k)]
score = compute_heterogeneity(tf_idf, k, centroids[k](), cluster_assignment[k]())
heterogeneity_values.append(score)
plot_k_vs_heterogeneity(k_list, heterogeneity_values)
else:
print('File not found. Skipping.')
# visualize document clusters from kmeans result with 10 clusters
k = 10
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k, map_index_to_word)
# print each cluster size from keamns-10
np.bincount(cluster_assignment[10]())
# visualize document clusters from kmeans result with 100 clusters
k=100
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k, map_index_to_word, display_content=False)
|
import time, pytest
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
import enums
class Test:
#================================================================================================================================
# @Author: Michal Zomper
# Test Name : Channel page - Shared Repository
# Test description:
# create channel and set it as 'Shared Repository':
# Verify that :Membership is by invitation only.
# Members can publish content from this channel to any other channel according to their entitlements.
#================================================================================================================================
testNum = "747"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryName1 = None
entryName2 = None
description = "Description"
tags = "Tags,"
userName1 = "Automation_User_1"
userPass1 = "Kaltura1!"
filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_4.png'
channelName1 = "Shared Repository channel"
channelName2 = "Open Channel"
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
########################### TEST SETUP ###########################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)
self.common = Common(self.driver)
self.entryName1 = clsTestService.addGuidToString("Channel page - Shared Repository 1", self.testNum)
self.entryName2 = clsTestService.addGuidToString("Channel page - Shared Repository 2", self.testNum)
self.channelName1 = clsTestService.addGuidToString(self.channelName1, self.testNum)
self.channelName2 = clsTestService.addGuidToString(self.channelName2, self.testNum)
##################### TEST STEPS - MAIN FLOW #####################
writeToLog("INFO","Step 1: Going to create new channel")
if self.common.channel.createChannel(self.channelName1, self.description, self.tags, enums.ChannelPrivacyType.SHAREDREPOSITORY, False, True, True) == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED create new channel: " + self.channelName1)
return
writeToLog("INFO","Step 2: Going to upload entry")
if self.common.upload.uploadEntry(self.filePath, self.entryName1, self.description, self.tags) == None:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED to upload entry")
return
writeToLog("INFO","Step 3: Going to add entry to channel: " + self.channelName1)
if self.common.channel.addContentToChannel(self.channelName1, self.entryName1, False, publishFrom=enums.Location.MY_CHANNELS_PAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED to add entry '" + self.entryName1 + "' to channel '" + self.channelName1 + "'")
return
sleep(3)
writeToLog("INFO","Step 4: Going to logout from main user")
if self.common.login.logOutOfKMS() == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED to logout from main user")
return
writeToLog("INFO","Step 5: Going to login with user " + self.userName1)
if self.common.login.loginToKMS(self.userName1, self.userPass1) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to login with " + self.userName1)
return
writeToLog("INFO","Step 6: Going to upload entry")
if self.common.upload.uploadEntry(self.filePath, self.entryName2, self.description, self.tags) == None:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to upload entry")
return
writeToLog("INFO","Step 7: Going to add entry to channel: " + self.channelName1)
if self.common.channel.addContentToChannel(self.channelName1, self.entryName2, False, publishFrom=enums.Location.CHANNELS_PAGE) == True:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED, user can add content to 'shared repository channel' although the user isn't a member in the channel")
return
writeToLog("INFO","Step 7 failed as expected: user '" + self.userName1 + "' isn't a member in channel '" + self.channelName1 + "' so he isn't able to add content")
writeToLog("INFO","Step 8: Going to create new channel")
if self.common.channel.createChannel(self.channelName2, self.description, self.tags, enums.ChannelPrivacyType.OPEN, False, True, True) == False:
self.status = "Fail"
writeToLog("INFO","Step 8: FAILED create new channel: " + self.channelName2)
return
sleep(3)
writeToLog("INFO","Step 9: Going to logout from user: " + self.userName1)
if self.common.login.logOutOfKMS() == False:
self.status = "Fail"
writeToLog("INFO","Step 9: FAILED to logout from user: " + self.userName1)
return
writeToLog("INFO","Step 10: Going to login with main user")
if self.common.loginAsUser() == False:
self.status = "Fail"
writeToLog("INFO","Step 10: FAILED to login with main user")
return
writeToLog("INFO","Step 11: Going to add user '" + self.userName1 +"' as member to channel '" + self.channelName1 + "'")
if self.common.channel.addMembersToChannel(self.channelName1, self.userName1, permission=enums.ChannelMemberPermission.CONTRIBUTOR) == False:
self.status = "Fail"
writeToLog("INFO","Step 11: FAILED to add user '" + self.userName1 + "' as contributor to channel '" + self.channelName1 + "'")
return
sleep(3)
writeToLog("INFO","Step 12: Going to logout from main user")
if self.common.login.logOutOfKMS() == False:
self.status = "Fail"
writeToLog("INFO","Step 12: FAILED to logout from main user")
return
writeToLog("INFO","Step 13: Going to login with user " + self.userName1)
if self.common.login.loginToKMS(self.userName1, self.userPass1) == False:
self.status = "Fail"
writeToLog("INFO","Step 13: FAILED to login with " + self.userName1)
return
writeToLog("INFO","Step 14: Going to add entry to channel: " + self.channelName1)
if self.common.channel.addContentToChannel(self.channelName1, self.entryName2, False, publishFrom=enums.Location.CHANNELS_PAGE) == False:
self.status = "Fail"
writeToLog("INFO","Step 14: FAILED to add entry to channel '" + self.channelName1 + "'")
return
writeToLog("INFO","Step 15: Going to add entry to channel: " + self.channelName2 + " from shared repository channel: " + self.channelName1)
if self.common.channel.addContentToChannel(self.channelName2, self.entryName1, False, publishFrom=enums.Location.CHANNELS_PAGE, channelType=enums.ChannelPrivacyType.SHAREDREPOSITORY, sharedReposiytyChannel=self.channelName1) == False:
self.status = "Fail"
writeToLog("INFO","Step 15: FAILED to add entry from shared repository channel to channel '" + self.channelName2 + "'")
return
##################################################################
writeToLog("INFO","TEST PASSED: 'Channel page - Shared Repository' was done successfully")
# if an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
self.common.channel.deleteChannel(self.channelName2)
self.common.myMedia.deleteSingleEntryFromMyMedia(self.entryName2)
sleep(2)
self.common.login.logOutOfKMS()
self.common.loginAsUser()
self.common.channel.deleteChannel(self.channelName1)
self.common.myMedia.deleteSingleEntryFromMyMedia(self.entryName1)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line')
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:19:51 2013
@author: mark
"""
# General imports
import math
import numpy as np
from datetime import datetime
from datetime import timedelta
import pandas as pd
def td_to_mins(x):
"""
Converts a timedelta object to minutes
"""
return x.days * 24.0 * 60 + x.seconds / 60.0 + x.microseconds / 6000.0
vtd_to_mins = np.vectorize(td_to_mins) # Make usable with list like things
#def wkdy_or_wkend(dt=None):
# if dt == None : dt = datetime.now()
# if datetime.weekday(dt)
def binofday(dt, binsizemins):
"""
Computes bin of day based on bin size for a datetime.
Parameters
----------
dt : datetime.datetime object, default now.
binsizemins : Size of bin in minutes; default 30 minutes.
Returns
-------
0 to (n-1) where n is number of bins per day.
Examples
--------
dt = datetime(2013,1,7,1,45)
bin = binofday(dt,30)
# bin = 3
"""
if dt == None: dt = datetime.now()
# YES, I know that type checking is NOT Pythonic!!!
# However, the hell that is numpy.datetime64 data types has
# caused me to give up and do it anyway.
if not isinstance(dt,datetime):
dt = pd.Timestamp(dt)
mins = (dt.hour * 60) + dt.minute
bin = math.trunc(mins/binsizemins)
return bin
vbinofday = np.vectorize(binofday)
def binofweek(dt, binsizemins):
"""
Computes bin of week based on bin size for a datetim.
Based on .weekday() convention of 0=Monday.
Parameters
----------
dt : datetime.datetime object, default now.
binsizemins : Size of bin in minutes; default 30 minutes.
Returns
-------
0 to (n-1) where n is number of bins per week.
Examples
--------
dt = datetime(2013,1,7,1,45)
bin = binofweek(dt,30)
# bin = ???
"""
if dt == None: dt = datetime.now()
# YES, I know that type checking is NOT Pythonic!!!
# However, the hell that is numpy.datetime64 data types has
# caused me to give up and do it anyway.
if not isinstance(dt,datetime):
dt = pd.Timestamp(dt)
mins = (dt.weekday() * 1440) + (dt.hour * 60) + dt.minute
bin = math.trunc(mins/binsizemins)
return bin
vbinofweek = np.vectorize(binofweek)
def rounddownTime(dt, roundMinsTo):
"""
Find floor of a datetime object to specified number of minutes.
dt : datetime.datetime object
roundMinsTo : Closest number of minutes to round to.
"""
roundSecsTo = roundMinsTo*60
seconds = (dt - dt.min).seconds
floorTime = seconds // roundSecsTo * roundSecsTo
return dt + timedelta(0,floorTime-seconds,-dt.microsecond)
vrounddownTime = np.vectorize(rounddownTime)
def roundupTime(dt, roundMinsTo):
"""
Find ceiling of a datetime object to specified number of minutes
dt : datetime.datetime object
roundMinsTo : Closest number of minutes to round to.
"""
roundSecsTo = roundMinsTo*60.0
seconds = (dt - dt.min).seconds
ceilingTime = math.ceil(seconds / roundSecsTo) * roundSecsTo
return dt + timedelta(0,ceilingTime-seconds,-dt.microsecond)
vroundupTime = np.vectorize(roundupTime)
def isgt2bins(indtbin, outdtbin, binsize_mins):
return (outdtbin - indtbin) > timedelta(minutes=binsize_mins)
def occ_frac(stoprecrange, binsize_mins, rectype='inner'):
"""
Computes fractional occupancy in inbin and outbin.
Parameters
----------
stoprecrange: list consisting of [intime, outtime]
binsize_mins: bin size in minutes
rectype: One of'inner', 'outer', 'left', 'right'. See
stoprec_analysis_rltnshp() doc for details.
Returns
-------
[inbin frac, outbin frac] where each is a real number in [0.0,1.0]
"""
intime = stoprecrange[0]
outtime = stoprecrange[1]
indtbin = rounddownTime(intime,binsize_mins)
outdtbin = rounddownTime(outtime,binsize_mins)
# inbin occupancy
rightedge = min(indtbin + timedelta(minutes=binsize_mins),outtime)
inbin_occ_secs = (rightedge - intime).seconds
inbin_occ_frac = inbin_occ_secs/(binsize_mins*60.0)
# outbin occupancy
if indtbin == outdtbin:
outbin_occ_frac = 0.0 # Use inbin_occ_frac
else:
leftedge = max(outdtbin, intime)
outbin_occ_secs = (outtime - leftedge).seconds
outbin_occ_frac = outbin_occ_secs/(binsize_mins*60.0)
assert inbin_occ_frac <= 1.0 and inbin_occ_frac >= 0.0, \
"bad inbin_occ_frac={:.3f} in={} out={}".format(inbin_occ_frac,
intime,outtime)
assert outbin_occ_frac <= 1.0 and outbin_occ_frac >= 0.0, \
"bad outbin_occ_frac={:.3f} in={} out={}".format(outbin_occ_frac,
intime,outtime)
return [inbin_occ_frac, outbin_occ_frac]
def stoprec_analysis_rltnshp(stoprecrange, analysisrange):
"""
Determines relationship type of stop record to analysis date range.
Parameters
----------
stoprecrange: list consisting of [rec_in, rec_out]
analysisrange: list consisting of [a_start, a_end]
Returns
-------
Returns a string, either 'inner', 'left', 'right, 'outer',
'backwards', 'none' depending
on the relationship between the stop record being analyzed and the
analysis date range.
Type 'inner':
|-------------------------|
a_start a_end
|--------------|
rec_in rec_out
Type 'left':
|-------------------------|
a_start a_end
|--------------|
rec_in rec_out
Type 'right':
|-------------------------|
a_start a_end
|--------------|
rec_in rec_out
Type 'outer':
|-------------------------|
a_start a_end
|-------------------------------------|
rec_in rec_out
Type 'backwards':
The exit time is BEFORE the entry time. This is a BAD record.
Type 'none':
Ranges do not overlap
"""
rec_in = stoprecrange[0]
rec_out = stoprecrange[1]
a_start = analysisrange[0]
a_end = analysisrange[1]
if (rec_in > rec_out):
return 'backwards'
elif (a_start <= rec_in < a_end) and (a_start <= rec_out < a_end):
return 'inner'
elif (a_start <= rec_in < a_end) and (rec_out >= a_end):
return 'right'
elif (rec_in < a_start) and (a_start <= rec_out < a_end):
return 'left'
elif (rec_in < a_start) and (rec_out >= a_end):
return 'outer'
else:
return 'none'
|
#!/usr/bin/python
#
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This tool runs all the MPEG test files and rates with a specific
# codec and configuration.
#
import argparse
import sys
import mpeg_settings
import encoder
import pick_codec
def ExecuteConfig(codec_name, config_string=None, config_id=None):
codec = pick_codec.PickCodec(codec_name)
context = encoder.Context(codec, cache_class=encoder.EncodingDiskCache)
if config_string is not None and config_id is not None:
raise encoder.Error('Cannot have both an ID and a configuration string')
if config_string is not None:
my_encoder = encoder.Encoder(
context,
encoder.OptionValueSet(codec.option_set,
config_string,
formatter=codec.option_formatter))
else:
my_encoder = encoder.Encoder(context, filename=config_id)
executed_count = 0
not_executed_count = 0
for rate, filename in mpeg_settings.MpegFiles().AllFilesAndRates():
videofile = encoder.Videofile(filename)
encoding = my_encoder.Encoding(rate, videofile)
encoding.Recover()
if not encoding.Result():
executed_count += 1
encoding.Execute().Store()
else:
not_executed_count += 1
print 'Executed %d did not execute %d' % (executed_count, not_executed_count)
def main():
parser = argparse.ArgumentParser('Runs a specific configuration')
parser.add_argument('--score', action='store_true', default=False)
parser.add_argument('--criterion', default='psnr')
parser.add_argument('--codec')
parser.add_argument('--config_id', help='ID for the parameter set.')
parser.add_argument('configuration', nargs='?', default=None,
help='Parameters to use. '
'Remember to quote the string and put'
'"--" in the command line before it if needed.')
args = parser.parse_args()
ExecuteConfig(args.codec, config_id=args.config_id,
config_string=args.configuration)
return 0
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python
# encoding: utf-8
'''
Find MST for facilities problem.
'''
import glob
import json
import itertools
from operator import attrgetter
import os
import random
import sys
import math
import networkx as nx
import numpy
import random as Random
#Returns an array of the shortest path between any two pairs of nodes
def floydWarshall(graph):
return nx.floyd_warshall(graph, weight='weight')
#Returns a graph of Kruskal's MST
def kruskal(graph):
return (nx.minimum_spanning_tree(graph))
def draw(graph, name):
# plt.show()
#elarge=[(u,v) for (u,v,d) in graph.edges(data=True)
# if > 3]
#esmall=[(u,v) for (u,v,d) in graph.edges(data=True)
# if 5 <= 3]
import matplotlib.pyplot as plt
pos=nx.spring_layout(graph) # positions for all nodes
nx.draw_networkx_nodes(graph, pos, node_size=700)
nx.draw_networkx_edges(graph, pos, width=6, label=True)
nx.draw_networkx_edges(graph, pos,
width=6, alpha=0.5, edge_color='b',style='dashed',
label=True)
nx.draw_networkx_edge_labels(graph, pos, edge_labels={
(src, dst): "%.4f" %d['weight'] for src, dst, d in
graph.edges(data=True)
})
# labels
nx.draw_networkx_labels(graph, pos, font_size=20,font_family='sans-serif')
plt.savefig("%s.png" % name)
def output_graph(filename, results):
with open(filename, "w") as json_file:
json.dump([r.__dict__ for r in results], json_file, sort_keys=True, indent=4)
def add_edge_to_tree(tree, graph):
# TODO: Move to Kruskal function?
pass
def generate_complete_weighted_graph(size, costs):
complete_graph = nx.complete_graph(size)
weighted_complete_graph = nx.Graph()
count = 0
for (u,v) in complete_graph.edges():
weight_prefix = (costs[u] + costs[v])/2
weighted_complete_graph.add_edge(u,v, weight=weight_prefix)
count += 1
return weighted_complete_graph
#Finds subsets of S with exactly m elements
def findsubsets(S,m):
return set(itertools.combinations(S, m))
class Edge:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self,other):
return ((self.x == other.x) and (self.y == other.y)) or ((self.x == other.y) and (self.y == other.x))
def __str__(self):
return "%s - %s (%d%s)" % \
(self.x, self.y, self.weight,
", bw=%d" % self.bandwidth if self.bandwidth else "")
def calculate_target_function(graph, w):
all_nodes = nx.nodes(graph)
pair_set = findsubsets(all_nodes,2)
summation = 0
for pair in pair_set:
i = pair[0]
j = pair[1]
path_weight = 0
try:
path = nx.shortest_path(graph, source=i, target=j)
except nx.NetworkXNoPath:
#Set to an arbitrary large number
path_weight = 10000
path = [0]
for i in range(0,len(path) - 1):
step_src = path[i]
step_dst = path[i+1]
path_weight += graph[step_src][step_dst]['weight']
summation += path_weight * w[i][j]
total_cost = 0
#find the total cost of the edges
for edges in graph.edges():
total_cost += graph[edges[0]][edges[1]]['weight']
return summation + total_cost
#Generates a cost distribution
def generate_costs(p):
p_length = len(p)
costs = []
average_prefixes = sum(p) / len(p)
#Cost is based on inbalance of traffic
for i in range(0,p_length):
costs.append((math.log(p[i],10) + 1 ) / (average_prefixes))
return costs
#Main body of algorithm located here
def main():
prefixes_advertised = [1, 1603, 9, 5, 1, 28, 1, 1, 4234, 17, 9, 1, 81, 288, 1607, 2, 1, 13, 139, 90, 78, 164, 35]
p_length = len(prefixes_advertised)
total_prefixes = sum(prefixes_advertised)
#Calculation of w_(i,j)
w = [[0 for x in range(p_length)] for x in range(p_length)]
for i in range(0,p_length):
for j in range(0,p_length):
if(i == j):
w[i][j] = 0
else:
w[i][j] = prefixes_advertised[i] / (total_prefixes - prefixes_advertised[j])
#Generate some complete graph with arbitrary weights
costs = generate_costs(prefixes_advertised)
complete_graph = generate_complete_weighted_graph(p_length, costs)
#Saving the Minimum Spanning tree
current_MST = kruskal(complete_graph)
last_MST = current_MST.copy()
#TODO: Reduce number of shortest path calculations
#complete_graph_shortest_path = [[0 for x in range(p_length)] for x in range(p_length)]
while True:
###########################################################################################################
#Part 2: Add edges as necessary
all_nodes = nx.nodes(complete_graph)
pair_set = findsubsets(all_nodes,2)
local_summation = 0
best_e_union_P = current_MST.copy()
overall_minimum = 9999999
#Iterate through powerset of size 2
for pair in pair_set:
e_union_P = current_MST.copy()
src = pair[0]
dst = pair[1]
#Find the shortest path on the complete graph
complete_paths = nx.shortest_path(complete_graph, source=src, target=dst)
complete_path_weight = 0
for i in range(0,len(complete_paths) - 1):
step_src = complete_paths[i]
step_dst = complete_paths[i+1]
if(e_union_P.has_edge(step_src, step_dst) is False):
e_union_P.add_edge(step_src, step_dst, weight=complete_graph[step_src][step_dst]['weight'])
difference = calculate_target_function(e_union_P,w) - calculate_target_function(current_MST,w)
if difference < overall_minimum:
overall_minimum = difference
best_e_union_P = e_union_P.copy()
if(calculate_target_function(best_e_union_P,w) - calculate_target_function(current_MST,w) < 0):
current_MST = best_e_union_P.copy()
###########################################################################################################
#Part 3: Remove edges as necessary
part_3_minimum = 999999999
edge_to_remove = [0,0]
original_MST_target = calculate_target_function(current_MST, w)
for edges in current_MST.edges():
src = edges[0]
dst = edges[1]
current_MST.remove_edge(src, dst)
difference = calculate_target_function(current_MST, w) - original_MST_target
current_MST.add_edge(src, dst, weight=complete_graph[src][dst]['weight'])
if(difference < part_3_minimum):
part_3_minimum = difference
edge_to_remove = [src, dst]
remove_edge_src = edge_to_remove[0]
remove_edge_dst = edge_to_remove[1]
#Remove the edge that is determined to be the satisfy the equation
current_MST.remove_edge(remove_edge_src, remove_edge_dst)
#If condition is UNsatisified, need to add the edge back.
if calculate_target_function(current_MST, w) - original_MST_target >= 0:
current_MST.add_edge(remove_edge_src, remove_edge_dst, weight=complete_graph[remove_edge_src][remove_edge_dst]['weight'])
############################################################################################################
if(current_MST==last_MST):
break
else:
last_MST = current_MST
print 'The target function is: ' + str(calculate_target_function(current_MST,w))
print 'done'
draw(current_MST, 'modified_complete')
print 'The target function is: ' + str(calculate_target_function(current_MST,w))
if __name__ == "__main__":
sys.exit(main())
|
#
# n1 = 255
# n2 = 1000
#
# print(hex(n1), hex(n2))
import math
print(math.sqrt(100))
def quadratic(a, b, c):
if b ** 2 - 4 * a * c >= 0:
slt1 = -b + math.sqrt(b ** 2 - 4 * a * c)
slt1 = slt1/(2*a)
slt2 = -b - math.sqrt(b ** 2 - 4 * a * c)
slt2 = slt2 / (2 * a)
return slt1, slt2
else:
print('no solution for the equation')
return
# print(quadratic(2, 6, 3))
print('quadratic(2, 3, 1) =', quadratic(2, 3, 1))
print('quadratic(1, 3, -4) =', quadratic(1, 3, -4))
if quadratic(2, 3, 1) != (-0.5, -1.0):
print('测试失败')
elif quadratic(1, 3, -4) != (1.0, -4.0):
print('测试失败')
else:
print('测试成功')
|
print("1st loop:")
for name in "John", "Sam", "Luis":
print("Hello " + name + "\n")
print("\n2nd loop with defined range:")
for i in range(10, 30):
print(i)
print("\n3rd loop with only given a sequence")
for i in range(10):
print(i)
print("\n4th loop")
total = 0
for i in 5,6,11,13:
print(i)
total = total + i
print(total)
# this is a comment
print("\n5th loop")
print("I dont care about the i value in the loop, i just need to repeat x times an action")
for _ in range(10):
print("Hello")
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import math
import random
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.core import (QgsProcessing,
QgsFeature,
QgsFeatureSink,
QgsField,
QgsFields,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsSpatialIndex)
from qgis import processing
class DistrictAssignmentProcessingAlgorithm(QgsProcessingAlgorithm):
"""
This algorithm is used with US census tract information to create
non-gerrymandered congressional and legislative districts. The
algorithm uses a relatively standard K Means Clustering algorithm,
but the number of census tracts that can be in any given cluster is
constrained by the total population of those census tracts to ensure
that the population of each cluster is approximately equal.
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return DistrictAssignmentProcessingAlgorithm()
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'district_assignment_model'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('District Assignment Model')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('scripts')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'scripts'
def shortHelpString(self):
"""
Returns a localised short helper string for the algorithm. This string
should provide a basic description about what the algorithm does and the
parameters and outputs associated with it..
"""
return self.tr("Assigns Census Tracts to Districts")
def initAlgorithm(self, config=None):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# We add the input vector features source. It can have any kind of
# geometry.
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVectorAnyGeometry]
)
)
# We add a feature sink in which to store our processed features (this
# usually takes the form of a newly created vector layer when the
# algorithm is run in QGIS).
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Output layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
# Retrieve the feature source and sink. The 'dest_id' variable is used
# to uniquely identify the feature sink, and must be included in the
# dictionary returned by the processAlgorithm function.
source = self.parameterAsSource(
parameters,
self.INPUT,
context
)
# If source was not found, throw an exception to indicate that the algorithm
# encountered a fatal error. The exception text can be any string, but in this
# case we use the pre-built invalidSourceError method to return a standard
# helper text for when a source cannot be evaluated
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
output_fields = QgsFields()
output_fields.append(QgsField('id', QVariant.String))
output_fields.append(QgsField('population', QVariant.Int))
output_fields.append(QgsField('district', QVariant.Int))
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
output_fields,
source.wkbType(),
source.sourceCrs()
)
# Send some information to the user
feedback.pushInfo('CRS is {}'.format(source.sourceCrs().authid()))
# If sink was not created, throw an exception to indicate that the algorithm
# encountered a fatal error. The exception text can be any string, but in this
# case we use the pre-built invalidSinkError method to return a standard
# helper text for when a sink cannot be evaluated
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Compute the number of steps to display within the progress bar and
# get features from source
# total = 100.0 / source.featureCount() if source.featureCount() else 0
feedback.pushInfo('pre-census_tracts')
#features = [feature for feature in source.getFeatures]
#feedback.pushInfo(str(census_tracts))
features = {feature.id(): feature for feature in source.getFeatures()}
number_of_districts = 4 # NEED TO FIGURE OUT HOW TO IMPORT # OF DISTRICTS TO AUTOMATE DIFFERENT STATES
feedback.pushInfo('pre-model')
model = Model(features, number_of_districts, feedback)
#feedback.pushInfo(str(model.census_tracts))
census_tract_district_assignment = model.final_tract_assignments
#census_tract_district_assignment = {}
#for tract in model.census_tracts:
#feedback.pushInfo(str(tract))
# if tract.district is not None:
# census_tract_district_assignment[tract.tract_id] = tract.district.district_id
# else:
# census_tract_district_assignment[tract.tract_id] = 0
#census_tract_district_assignment = {tract.tract_id: tract.district.district_id for tract in model.census_tracts}
#for tract in census_tract_district_assignment:
# feedback.pushInfo(str(tract))
feedback.pushInfo('post-model')
# Create the Output Sink
for feature in source.getFeatures():
f = QgsFeature()
f.setFields(output_fields)
f.setGeometry(feature.geometry())
f['id'] = feature['GEOID10']
f['population'] = feature['POP10']
f['district'] = census_tract_district_assignment[f['id']]
sink.addFeature(f, QgsFeatureSink.FastInsert)
'''
for current, feature in enumerate(features):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
# Add a feature in the sink
sink.addFeature(feature, QgsFeatureSink.FastInsert)
# Update the progress bar
feedback.setProgress(int(current * total))
# To run another Processing algorithm as part of this algorithm, you can use
# processing.run(...). Make sure you pass the current context and feedback
# to processing.run to ensure that all temporary layer outputs are available
# to the executed algorithm, and that the executed algorithm can send feedback
# reports to the user (and correctly handle cancellation and progress reports!)
if False:
buffered_layer = processing.run("native:buffer", {
'INPUT': dest_id,
'DISTANCE': 1.5,
'SEGMENTS': 5,
'END_CAP_STYLE': 0,
'JOIN_STYLE': 0,
'MITER_LIMIT': 2,
'DISSOLVE': False,
'OUTPUT': 'memory:'
}, context=context, feedback=feedback)['OUTPUT']
# Return the results of the algorithm. In this case our only result is
# the feature sink which contains the processed features, but some
# algorithms may return multiple feature sinks, calculated numeric
# statistics, etc. These should all be included in the returned
# dictionary, with keys matching the feature corresponding parameter
# or output names.
'''
return {self.OUTPUT: dest_id}
class Utilities:
@staticmethod
def get_centers(locations):
'''
Brief Description: Creates a list of just the centers of a group of locations
Parameters: locations: List of census tracts or districts
Returns: List of location centers, where each center is a dictionary with latitude and longitude keys
'''
return [location.center for location in locations]
@staticmethod
def locate_center(locations):
'''
Brief Description: Locates the geographic center of a group of locations.
Parameters: locations: List of census_tracts or districts
Returns: Dictionary containing the latitude and longitude of the center of locations
'''
location_centers = Utilities.get_centers(locations)
center_latitude = sum([location['latitude'] for location in location_centers]) / len(location_centers)
center_longitude = sum([location['longitude'] for location in location_centers]) / len(location_centers)
return {
'latitude': center_latitude,
'longitude': center_longitude
}
@staticmethod
def get_distance(location_1, location_2):
assert(isinstance(location_1, dict))
assert(isinstance(location_2, dict))
import math
'''
Brief Description: Calculates the distance between two location (ignoring curvature)
Parameters: location_1: A dictionary with latitude and longitude for one location
location_2: A dictionary with latitude and longitude for a second location
Returns: Distance bewteen the two points
'''
return math.sqrt(
(location_1['latitude'] - location_2['latitude'])**2 +
(location_1['longitude'] - location_2['longitude'])**2
)
@staticmethod
def assign(district, tract):
district.assign(tract)
tract.assign(district)
@staticmethod
def remove(tract):
tract.district.remove(tract)
tract.remove()
@staticmethod
def reassign(district, tract):
assert(isinstance(tract, Tract))
assert(isinstance(district, District))
Utilities.remove(tract)
Utilities.assign(district, tract)
@staticmethod
def reset(items):
# items is either districts or tracts
for item in items:
item.reset()
@staticmethod
def update_distances(item):
for distance in item.distances:
distance.update()
@staticmethod
def get_differences(census_tracts, last_assignment):
new_assignment = [tract.district for tract in census_tracts]
return sum([1 for assignment in zip(new_assignment, last_assignment) if assignment[0] != assignment[1]])
@staticmethod
def relocate_centers(districts):
for district in districts:
district.relocate_center()
class Distance:
'''
The Distance class keeps track of the distance between a tract center and a district center. It includes custom less than
function (based on the distance between tract center and distance center and a customized equality function in which two
Distance objects are equal if they have the same tract and district.
'''
def __init__(self, tract, district):
self.tract = tract
self.district = district
self.update()
def update(self):
self.distance = Utilities.get_distance(self.tract.center, self.district.center)
def __lt__(self, other):
return self.distance < other.distance
def __eq__(self, other):
return self.tract == other.tract and self.district == other.district
def __repr__(self):
return f'Tract: {self.tract.tract_id}, District: {self.district.district_id}, Distance: {self.distance}'
class District:
'''
The District class keeps track of all properties and functions relative to a given district.
'''
def __init__(self, id, state_center):
self.district_id = id
self.center = None
self.population = 0
self.state_center = state_center
self.assigned_census_tracts = []
self.neighbors = []
self.distances = []
def initialize(self, starting_tract):
self.center = starting_tract.center
Utilities.assign(self, starting_tract)
def update(self):
self.relocate_center()
self.set_border_tracts()
self.set_neighboring_districts()
self.reset_neighbors()
def relocate_center(self):
if self.assigned_census_tracts:
self.center = Utilities.locate_center(self.assigned_census_tracts)
def add_neighbors(self, tract):
for neighboring_tract in tract.neighbors:
if neighboring_tract.district != self:
neighbor = Distance(neighboring_tract, self)
if neighbor not in self.neighbors:
self.neighbors.append(neighbor)
def remove_neighbors(self, tract):
self.neighbors.append(Distance(tract, self))
tract_neighbors = [neighbor for neighbor in self.neighbors if any([neighboring_tract == tract for neighboring_tract in neighbor.tract.neighbors])]
for neighbor in tract_neighbors:
if self.does_not_border(neighbor.tract):
self.neighbors.remove(neighbor)
def does_not_border(self, tract):
return all([neighbor_tract.district != self for neighbor_tract in tract.neighbors])
def reset_neighbors(self):
self.neighbors = []
for tract in self.assigned_census_tracts:
self.add_neighbors(tract)
def sort_neighbors(self):
self.neighbors.sort()
def get_nearest_neighbor(self):
return self.neighbors.pop(0).tract
def assign(self, tract):
self.population += tract.population
self.assigned_census_tracts.append(tract)
self.add_neighbors(tract)
def remove(self, tract):
self.population -= tract.population
self.assigned_census_tracts.remove(tract)
self.remove_neighbors(tract)
def distance_to_state_center(self):
return Utilities.get_distance(self.center, self.state_center)
def set_border_tracts(self):
self.border_tracts = [tract for tract in self.assigned_census_tracts if any([neighbor.district != self for neighbor in tract.neighbors])]
def set_average_border_tract_distance(self):
if len(self.border_tracts) == 0:
self.average_border_tract_distance = 0
else:
self.average_border_tract_distance = sum([Utilities.get_distance(tract.center, self.center) for tract in self.border_tracts]) / len(self.border_tracts)
def set_neighboring_districts(self):
self.neighboring_districts = list({neighboring_district for tract in self.assigned_census_tracts for neighboring_district in tract.get_neighboring_districts()})
def __lt__(self, other):
return self.distance_to_state_center() < other.distance_to_state_center()
def __repr__(self):
return f'District {self.district_id}: Population {self.population}, Tracts: {len(self.assigned_census_tracts)}'
def __str__(self):
return f'District {self.district_id}: Population {self.population}, Tracts: {len(self.assigned_census_tracts)}'
'''
class KMeansAssignment:
def __init__(self, model_parameters, show_feedback):
self.census_tracts = model_parameters['census_tracts']
self.districts = model_parameters['districts']
self.number_of_districts = model_parameters['number_of_districts']
self.centroids = []
self.show_feedback = show_feedback
self.run_assignment()
self.reset()
self.run_trial()
self.show_feedback('K-Means Clustering Complete')
def run_assignment(self):
trials = 10
for trial in range(trials):
self.show_feedback(f'Trial {trial + 1}')
self.run_trial()
self.reset()
def run_trial(self):
max_iterations = 50
iteration_number = 1
differences = len(self.census_tracts)
while differences > 0 and iteration_number <= max_iterations:
#self.show_feedback(f'Iteration: {iteration_number}, {differences} differences')
last_assignment = [tract.district for tract in self.census_tracts]
self.assign_census_tracts()
differences = Utilities.get_differences(self.census_tracts, last_assignment)
self.update_district_centers()
iteration_number += 1
def assign_census_tracts(self):
for tract in self.census_tracts:
closest_district = tract.get_closest_district()
if closest_district != tract.district:
if tract.district:
Utilities.remove(tract)
Utilities.assign(closest_district, tract)
def update_district_centers(self):
for district in self.districts:
district.relocate_center()
def reset(self):
self.centroids.append([district.center for district in self.districts])
new_starting_tracts = random.sample(self.census_tracts, self.number_of_districts)
for district in self.districts:
tract = new_starting_tracts.pop()
district.update_center(tract.center)
Utilities.assign(district, tract)
def final_reset(self):
district_centers = self.get_final_district_centers()
for district in self.districts:
district.update_center(district_centers.pop())
self.run_trial()
def get_final_district_centers(self):
def calculate_new_centroid_center(district_center, centroid_center, multiplier):
centroid_center['longitude'] = (district_center['longitude'] + centroid_center['longitude'] * multiplier) / (multiplier + 1)
centroid_center['latitude'] = (district_center['latitude'] + centroid_center['latitude'] * multiplier) / (multiplier + 1)
return centroid_center
def get_closest_centers(centroid_centers, trial_centers):
closest_centers = []
for centroid in centroid_centers:
distances = [Utilities.get_distance(centroid, center) for center in trial_centers]
closest_index = distances.index(min(distances))
closest_centers.append(trial_centers.pop(closest_index))
return closest_centers
centroid_centers = self.centroids[0]
for index in range(1, len(self.centroids)):
closest_centers = get_closest_centers(centroid_centers, self.centroids[index])
#self.show_feedback(f'Closest centers length {len(closest_centers)}, Centroid Length {
for center_index in range(len(centroid_centers)):
centroid_centers[center_index] = calculate_new_centroid_center(centroid_centers[center_index], closest_centers[center_index], index)
return centroid_centers
class NeighborAssignment:
def __init__(self, model_parameters, show_feedback):
self.census_tracts = model_parameters['census_tracts']
self.districts = model_parameters['districts']
self.number_of_districts = model_parameters['number_of_districts']
self.target_district_population = model_parameters['target_district_population']
self.show_feedback = show_feedback
self.run_assignment()
self.show_feedback('Neighbor Assignment Complete')
def run_assignment(self):
max_iterations = 100
iteration_number = 0
differences = len(self.census_tracts)
while differences > 0 and iteration_number < max_iterations:
self.show_feedback(f'Iteration number {iteration_number}: {differences} differences')
last_assignment = [tract.district for tract in self.census_tracts]
self.reset()
self.assign_census_tracts()
differences = Utilities.get_differences(self.census_tracts, last_assignment)
iteration_number += 1
def assign_census_tracts(self):
tracts_to_be_assigned = len(self.census_tracts) - self.number_of_districts
while tracts_to_be_assigned > 0:
for district in self.districts:
if self.assign_neighbor(district):
tracts_to_be_assigned -= 1
def reset(self):
for district in self.districts:
district.relocate_center()
for tract in self.census_tracts:
tract.reset()
assigned_tracts = []
for district in self.districts:
district.reset()
new_starting_tract = district.get_closest_tract(assigned_tracts)
Utilities.assign(district, new_starting_tract)
district.get_neighbors()
def assign_neighbor(self, district):
tract = district.neighbors.nearest_available()
if tract:
Utilities.assign(district, tract)
return True
return False
'''
class InitialAssignment:
def __init__(self, model_parameters, show_feedback):
self.census_tracts = model_parameters['census_tracts']
self.districts = model_parameters['districts']
#self.number_of_districts = model_parameters['number_of_districts']
self.target_district_population = model_parameters['target_district_population']
#self.state_center = Utilities.locate_center(self.census_tracts)
self.show_feedback = show_feedback
self.run_assignment()
Utilities.relocate_centers(self.districts)
self.show_feedback('Initial Assignment Complete')
def run_assignment(self):
def get_available_tracts():
return [tract for tract in self.census_tracts if tract.district is None]
def nearest_available_neighbor(district):
district.sort_neighbors()
while len(district.neighbors) > 0:
tract = district.get_nearest_neighbor()
if tract.district is None:
return tract
return None
def get_new_starting_tract(district):
available_tracts = get_available_tracts()
return min([Distance(tract, district) for tract in available_tracts]).tract if available_tracts else None
for district in self.districts:
starting_tract = max(get_available_tracts())
district.initialize(starting_tract)
while district.population < self.target_district_population:
tract_to_be_assigned = nearest_available_neighbor(district)
if tract_to_be_assigned:
Utilities.assign(district, tract_to_be_assigned)
else:
new_starting_tract = get_new_starting_tract(district)
if new_starting_tract:
district.initialize(new_starting_tract)
else:
break
self.show_feedback(str(district))
class ConsolidateDistricts:
def __init__(self, districts, show_feedback):
self.districts = districts
self.show_feedback = show_feedback
self.consolidate_districts()
Utilities.relocate_centers(self.districts)
self.show_feedback('Consolidation Complete')
def consolidate_districts(self):
def get_outliers(district):
central_tract = min([Distance(tract, district) for tract in district.assigned_census_tracts]).tract
visited = Set()
queue = [central_tract]
while len(queue) > 0:
tract = queue.pop(0)
visited.add(tract)
for neighbor in tract.neighbors:
if neighbor.district == district and neighbor not in visited:
queue.append(neighbor)
assigned = set(district.assigned_census_tracts)
return list(assigned.difference(visited))
def get_neighboring_districts(tract, district):
return list({neighbor.district for neighbor in tract.neighbors}.difference({district}))
self.show_feedback('Starting Consolidation')
consolidated_districts = 0
self.show_feedback(str(self.districts))
for district in self.districts:
#self.show_feedback(str(district))
district.reset_neighbors()
outliers = get_outliers(district)
self.show_feedback(f'Outliers {len(outliers)}')
while len(outliers) > 0:
tract = outliers.pop(0)
neighboring_districts = get_neighboring_districts(tract, district)
if not neighboring_districts:
outliers.append(tract)
else:
new_district = neighboring_districts.pop()
Utilities.reassign(new_district, tract)
consolidated_districts += 1
self.show_feedback(f'Consolidated Districts: {consolidated_districts}')
class CompactDistricts:
def __init__(self, districts, show_feedback):
self.districts = districts
self.show_feedback = show_feedback
self.compact_districts()
Utilities.relocate_centers(self.districts)
self.show_feedback('Compact Districts Complete')
def compact_districts(self):
def get_border_tracts(border_tracts, neighboring_district):
return [tract for tract in border_tracts if any([neighbor.district == neighboring_district for neighbor in tract.neighbors])]
def get_potential_swaps(district, neighboring_district):
potential_swap_tracts = []
border_tracts = get_border_tracts(district.border_tracts, neighboring_district)
for tract in border_tracts:
district_distance = Utilities.get_distance(tract.center, district.center)
neighbor_district_distance = Utilities.get_distance(tract.center, neighboring_district.center)
swap_ratio = neighbor_district_distance / district_distance
potential_swap_tracts.append({
'tract': tract,
'swap_ratio': swap_ratio
})
potential_swap_tracts.sort(key = lambda d: d['swap_ratio'])
return potential_swap_tracts
def swap_tracts(tract, neighbor):
tract_district = tract.district
neighbor_district = neighbor.district
Utilities.reassign(neighbor_district, tract)
Utilities.reassign(tract_district, neighbor)
def set_district(district):
district.relocate_center()
district.set_border_tracts()
district.set_neighboring_districts()
def get_available_neighboring_districts(district, assigned_districts):
return [neighboring_district for neighboring_district in district.neighboring_districts if neighboring_district not in assigned_districts]
def get_swaps(district, neighboring_district):
neighboring_district.set_border_tracts()
district_potential_swaps = get_potential_swaps(district, neighboring_district)
neighboring_district_potential_swaps = get_potential_swaps(neighboring_district, district)
return zip(district_potential_swaps, neighboring_district_potential_swaps)
number_swaps = 0
assigned_districts = []
for district in self.districts:
set_district(district)
available_neighboring_districts = get_available_neighboring_districts(district, assigned_districts)
assigned_districts.append(district)
for neighboring_district in available_neighboring_districts:
swaps = get_swaps(district, neighboring_district)
for swap in swaps:
district_potential_swap = swap[0]
neighbor_potential_swap = swap[1]
if district_potential_swap['swap_ratio'] + neighbor_potential_swap['swap_ratio'] < 2:
swap_tracts(district_potential_swap['tract'], neighbor_potential_swap['tract'])
number_swaps += 1
self.show_feedback(f'Swaps: {number_swaps}')
'''
def get_border_tracts(self):
border_tracts = []
for district in self.districts:
district.set_border_tracts()
border_tracts.extend(district.border_tracts)
return border_tracts
def set_average_distances_to_border_tracts(self):
for district in self.districts:
district.set_average_border_tract_distance()
def get_border_tract_ratio(self):
pass
def compact_districts(self):
border_tracts = self.get_border_tracts()
self.set_average_distances_to_border_tracts()
for tract in border_tracts:
new_assignment = tract.get_new_assignment()
if new_assignment:
Utilities.reassign(new_assignment, tract)
def new_compact_districts(self):
def get_neighboring_districts(district, assigned_districts):
neighboring_districts = list({neighboring_district for tract in district.assigned_census_tracts for neighboring_district in tract.get_neighboring_districts()})
return [district for district in neighboring_districts if district not in assigned_districts]
def get_neighboring_tracts(district, neighboring_district):
return [neighbor.tract for neighbor in district.neighbors if neighbor.tract.district == neighboring_district]
def get_distance_ratios(district, neighboring_district, tracts):
def get_average_distance(district, tracts):
return sum([Utilities.get_distance(tract.center, district.center) for tract in tracts]) / len(tracts)
average_district_distance = get_average_distance(district, tracts)
average_neighboring_district_distance = get_average_distance(neighboring_district, tracts)
distance_ratios = []
for tract in tracts:
district_distance_ratio = Utilities.get_distance(tract.center, district.center) / average_district_distance
neighboring_district_distance_ratio = Utilities.get_distance(tract.center, neighboring_district.center) / average_neighboring_district_distance
#if district_distance_ratio < neighboring_district_distance_ratio:
distance_ratio = {
'tract': tract,
'district_distance_ratio': district_distance_ratio,
'neighboring_district_distance_ratio': neighboring_district_distance_ratio
}
distance_ratios.append(distance_ratio)
distance_ratios.sort(key = lambda d: d['district_distance_ratio'])
return distance_ratios
def swap_tracts(tract, neighbor):
tract_district = tract.district
neighbor_district = neighbor.district
Utilities.reassign(neighbor_district, tract)
Utilities.reassign(tract_district, neighbor)
assigned_districts = []
for district in self.districts:
assigned_districts.append(district)
neighboring_districts = get_neighboring_districts(district, assigned_districts)
self.show_feedback(str(neighboring_districts))
for neighboring_district in neighboring_districts:
district.reset_neighbors()
swaps = 0
neighbor_tracts = get_neighboring_tracts(district, neighboring_district)
border_tracts = get_neighboring_tracts(neighboring_district, district)
#self.show_feedback(str(district))
#self.show_feedback(str(neighboring_district))
#self.show_feedback(str(len(neighbor_tracts)))
#self.show_feedback(str(len(border_tracts)))
if neighbor_tracts and border_tracts:
neighbor_tract_distance_ratios = get_distance_ratios(district, neighboring_district, neighbor_tracts)
neighbor_tract_distance_ratios = [tract for tract in neighbor_tract_distance_ratios if tract['district_distance_ratio'] < 1]
border_tract_distance_ratios = get_distance_ratios(neighboring_district, district, border_tracts)
border_tract_distance_ratios = [tract for tract in border_tract_distance_ratios if tract['district_distance_ratio'] > 1]
border_tract_distance_ratios.reverse()
#self.show_feedback(str(len(neighbor_tract_distance_ratios)))
#self.show_feedback(str(len(border_tract_distance_ratios)))
while len(neighbor_tract_distance_ratios) > 0 and len(border_tract_distance_ratios) > 0:
neighbor_tract = neighbor_tract_distance_ratios.pop(0)
border_tract = border_tract_distance_ratios.pop(0)
if border_tract['district_distance_ratio'] > neighbor_tract['district_distance_ratio']:
swap_tracts(neighbor_tract['tract'], border_tract['tract'])
swaps += 1
self.show_feedback(f'District: {district.district_id}, Neighboring District: {neighboring_district.district_id}, Swaps: {swaps}')
'''
class PopulationAdjustment:
def __init__(self, model_parameters, show_feedback):
self.census_tracts = model_parameters['census_tracts']
self.districts = model_parameters['districts']
self.number_of_districts = model_parameters['number_of_districts']
self.target_district_population = model_parameters['target_district_population']
self.buffer = 2000
self.show_feedback = show_feedback
self.run_adjustment()
self.show_feedback('Population Adjustment Complete')
def run_adjustment(self):
assigned_districts = []
max_iterations = 100
for district in self.districts:
self.update_districts()
#self.show_feedback(str(district))
assigned_districts.append(district)
if len(assigned_districts) == self.number_of_districts:
break
iterations = 1
while district.population > self.target_district_population + self.buffer and iterations < max_iterations:
#self.show_feedback('Over')
self.adjust_overpopulation(district, assigned_districts)
iterations += 1
while district.population < self.target_district_population - self.buffer and iterations < max_iterations:
#self.show_feedback('Under')
self.adjust_underpopulation(district, assigned_districts)
iterations += 1
self.show_feedback(f'{district}: {iterations} iterations')
def update_districts(self):
for district in self.districts:
district.update()
def adjust_overpopulation(self, district, assigned_districts):
def get_neighboring_districts(district, assigned_districts):
return [neighboring_district for neighboring_district in district.neighboring_districts if neighboring_district not in assigned_districts]
def get_tract_to_be_removed(district, assigned_districts):
neighboring_districts = get_neighboring_districts(district, assigned_districts)
tracts = []
for tract in district.border_tracts:
for neighbor in tract.neighbors:
if neighbor.district in neighboring_districts:
tracts.append(Distance(tract, district))
return max(tracts, key = lambda d: d.distance).tract if tracts else None
def get_new_district(tract, assigned_districts):
available_districts = [neighbor.district for neighbor in tract.neighbors if neighbor.district not in assigned_districts]
return min(available_districts, key = lambda d: d.population)
district.update()
tract_to_be_removed = get_tract_to_be_removed(district, assigned_districts)
if tract_to_be_removed:
new_district = get_new_district(tract_to_be_removed, assigned_districts)
Utilities.reassign(new_district, tract_to_be_removed)
def adjust_underpopulation(self, district, assigned_districts):
district.sort_neighbors()
while len(district.neighbors) > 0:
tract = district.get_nearest_neighbor()
if tract.district not in assigned_districts:
Utilities.reassign(district, tract)
break
'''
for neighboring_district in neighboring_districts:
adjacent_neighbors = self.get_adjacent_neighbors(district, neighboring_district)
if adjacent_neighbors:
available_neighbors.extend(adjacent_neighbors)
if available_neighbors:
neighbor_to_be_removed = min(available_neighbors, key = lambda d: d['ratio'])['neighbor']
tract_to_be_removed = neighbor_to_be_removed.tract
new_district = neighbor_to_be_removed.district
Utilities.reassign(new_district, tract_to_be_removed)
def get_adjacent_neighbors(self, district, neighboring_district):
adjacent_neighbors = [neighbor for neighbor in neighboring_district.neighbors if neighbor.tract.district == district]
#self.show_feedback(str(adjacent_neighbors))
if not adjacent_neighbors:
return []
average_neighbor_distance = sum([neighbor.distance for neighbor in adjacent_neighbors]) / len(adjacent_neighbors)
adjacent_neighbor_ratios = []
for neighbor in adjacent_neighbors:
neighbor_ratio = {
'neighbor': neighbor,
'ratio': neighbor.distance / average_neighbor_distance
}
adjacent_neighbor_ratios.append(neighbor_ratio)
#self.show_feedback(str(adjacent_neighbor_ratios))
return adjacent_neighbor_ratios
def get_neighboring_districts(self, district, assigned_districts):
return list({neighbor.tract.district for neighbor in district.neighbors if neighbor.tract.district not in assigned_districts})
def get_tract_and_district(self, district, available_districts):
#self.show_feedback(f'{district}, {available_districts}')
available_tracts = [tract for tract in district.border_tracts if any([neighbor.district in available_districts for neighbor in tract.neighbors])]
#self.show_feedback(f'{available_tracts}')
tract_to_be_removed = None
new_district = None
maximum_distance_ratio_difference = -math.inf
for tract in available_tracts:
district_distance_ratio = Utilities.get_distance(tract.center, district.center) / district.average_border_tract_distance
neighbor_district, neighbor_distance_ratio = self.get_neighbor_distance_ratio(tract, available_districts)
distance_ratio_difference = district_distance_ratio - neighbor_distance_ratio
if distance_ratio_difference > maximum_distance_ratio_difference:
tract_to_be_removed = tract
new_district = neighbor_district
maximum_distance_ratio_difference = distance_ratio_difference
#self.show_feedback(f'{tract_to_be_removed}, {new_district}')
return tract_to_be_removed, new_district
def get_neighbor_distance_ratio(self, tract, available_districts):
neighboring_districts = [neighbor.district for neighbor in tract.neighbors if neighbor.district in available_districts]
minimum_district = None
minimum_distance_ratio = math.inf
for district in neighboring_districts:
distance_ratio = Utilities.get_distance(tract.center, district.center) / district.average_border_tract_distance
if distance_ratio < minimum_distance_ratio:
minimum_district = district
minimum_distance_ratio = distance_ratio
return minimum_district, minimum_distance_ratio
def get_new_district(self, tract, assigned_districts):
neighboring_districts = [neighbor for neighbor in tract.get_neighboring_districts() if neighbor not in assigned_districts]
new_district = None
distance_ratio = math.inf
for district in neighboring_districts:
district.set_average_border_tract_distance()
district_distance_ratio = Utilities.get_distance(tract.center, district.center) / district.average_border_tract_distance
if district_distance_ratio < distance_ratio:
new_district = district
distance_ratio = district_distance_ratio
return new_district
def get_underpopulation_available_tracts(self, district, assigned_districts):
neighbor_tracts = [neighbor.tract for neighbor in district.neighbors]
return [tract for tract in neighbor_tracts if tract.district not in assigned_districts]
def get_tract_to_add(self, district, available_tracts):
new_tract = None
minimum_combined_distance_ratio = math.inf
for tract in available_tracts:
district_distance_ratio = Utilities.get_distance(tract.center, district.center) / district.average_border_tract_distance
neighbor_distance_ratio = Utilities.get_distance(tract.center, tract.district.center) / tract.district.average_border_tract_distance
combined_distance_ratio = district_distance_ratio + neighbor_distance_ratio
if combined_distance_ratio < minimum_combined_distance_ratio:
new_tract = tract
minimum_combined_distance_ratio = combined_distance_ratio
return new_tract
def old_run_adjustment(self):
for district in self.districts:
district.relocate_center()
self.get_district_neighbors()
assigned_districts = []
for district in self.districts:
assigned_districts.append(district)
#self.show_feedback(f'District {district.district_id}: {district.population}')
if district.population > self.target_district_population + self.buffer:
self.adjust_overpopulation(district, assigned_districts)
else:
self.adjust_underpopulation(district, assigned_districts)
def get_district_neighbors(self):
for district in self.districts:
district.get_neighbors()
def old_adjust_overpopulation(self, district, assigned_districts):
#self.show_feedback(f'Overpopulation {district.district_id} {district.population}')
#for tract in district.assigned_census_tracts:
#self.show_feedback(f'District tracts: {tract}, neighbors {tract.neighbors}')
furthest_census_tracts = self.find_furthest_census_tracts(district)
#self.show_feedback(f'Furthest: {furthest_census_tracts}')
while district.population > self.target_district_population + 2000 and len(furthest_census_tracts) > 0:
#self.show_feedback(f'District {district.district_id}: {district.population} Furthest: {len(furthest_census_tracts)}')
tract_to_be_reassigned = furthest_census_tracts.pop(0)
#self.show_feedback(str(tract_to_be_reassigned))
for tract in tract_to_be_reassigned.neighbors:
#self.show_feedback(f'Tract district: {tract.district}')
if tract.district != district and tract.district not in assigned_districts:
new_district = tract.district
self.reassign_tract(tract_to_be_reassigned, new_district)
#self.show_feedback(f'{district.district_id} {district.population}')
def old_adjust_underpopulation(self, district, assigned_districts):
#self.show_feedback(f' Underpopulation {district.district_id} {district.population}')
#nearest_census_tracts = self.find_nearest_census_tracts(district, assigned_districts)
while district.population < self.target_district_population - 2000 and len(district.neighbors) > 0:
#self.feedback.pushInfo(f'District {district.district_id}: {district.population} Neighbors: {len(district.neighbors)}')
tract_to_be_reassigned = district.neighbors.nearest(assigned_districts)
#self.show_feedback(str(tract_to_be_reassigned))
if tract_to_be_reassigned:
self.reassign_tract(tract_to_be_reassigned, district)
#self.show_feedback(f'{district.district_id} {district.population}')
def find_furthest_census_tracts(self, district):
#self.feedback.pushInfo(str(district))
distances = []
for tract in district.assigned_census_tracts:
if any([neighbor.district != district for neighbor in tract.neighbors]):
distances.append(Distance(tract, district.center))
distances.sort(reverse = True)
return [distance.location for distance in distances]
def find_nearest_census_tracts(self, district, assigned_districts):
return sorted([DistrictDistance(tract, district) for tract in self.census_tracts if tract.new_assignment not in assigned_districts])
def reassign_tract(self, tract, new_district):
#self.show_feedback(f'Reassign: {new_district}')
old_district = tract.district
Utilities.remove(tract)
Utilities.assign(new_district, tract)
'''
class SwapTracts:
def __init__(self, model_parameters, show_feedback):
self.census_tracts = model_parameters['census_tracts']
self.number_of_districts = model_parameters['number_of_districts']
self.districts = model_parameters['districts']
self.target_district_population = model_parameters['target_district_population']
self.show_feedback = show_feedback
self.run_distance_swap()
self.run_isolated_swap()
self.show_feedback('Tract Swap Complete')
def run_isolated_swap(self):
for tract in self.census_tracts:
if tract.is_isolated():
Utilities.remove(tract)
Utilities.assign(tract.neighbors[0].district, tract)
def run_distance_swap(self):
for this_district in self.districts:
self.show_feedback(f'District: {this_district}')
outer_ring_tracts = this_district.get_outer_ring_tracts()
for tract in outer_ring_tracts:
#self.show_feedback(str(tract))
neighboring_districts = self.get_neighboring_districts(tract)
self.show_feedback(str(this_district.neighbors))
self.show_feedback(str(neighboring_districts))
available_neighbors = this_district.neighbors.get_potential_swap_tracts(neighboring_districts)
self.show_feedback(str(available_neighbors))
potential_swaps = [neighbor for neighbor in available_neighbors if self.swap_criteria_met(tract, neighbor.tract)]
#self.show_feedback(str(potential_swaps))
#self.show_feedback(f'Tract: {tract}, Available Neighbors: {available_neighbors}, Potential Swaps: {potential_swaps}')
if potential_swaps:
neighbor = sorted(potential_swaps)[0].tract
self.show_feedback(str(tract))
self.show_feedback(str(neighbor))
self.show_feedback('-------------------')
self.swap_tracts(tract, neighbor)
def get_neighboring_districts(self, tract):
return list({neighbor.district for neighbor in tract.neighbors if neighbor.district != tract.district})
def swap_criteria_met(self, tract, neighbor):
if Utilities.get_distance(tract.district.center, tract.center) < Utilities.get_distance(tract.district.center, neighbor.center):
return False
if Utilities.get_distance(neighbor.district.center, tract.center) > Utilities.get_distance(neighbor.district.center, neighbor.center):
return False
return True
def swap_tracts(self, tract, neighbor):
tract_district = tract.district
neighbor_district = neighbor.district
self.show_feedback(str(tract))
self.show_feedback(str(neighbor))
Utilities.remove(tract)
Utilities.remove(neighbor)
Utilities.assign(neighbor_district, tract)
Utilities.assign(tract_district, neighbor)
class Model:
def __init__(self, features, number_of_districts, feedback):
self.show_feedback = feedback.pushInfo
self.number_of_districts = number_of_districts
self.census_tracts = self.get_census_tracts(features)
self.target_district_population = self.get_target_district_population()
self.state_center = Utilities.locate_center(self.census_tracts)
self.districts = self.initialize_districts()
self.model_parameters = {
'census_tracts': self.census_tracts,
'districts': self.districts,
'number_of_districts': self.number_of_districts,
'target_district_population': self.target_district_population,
}
self.show_feedback('Model Initialized')
self.show_feedback(f'Target District Population: {self.target_district_population}')
self.final_tract_assignments = {tract.tract_id: None for tract in self.census_tracts}
#self.get_assignment()
#self.add_distances()
self.run_model()
def get_census_tracts(self, features):
def extract_tracts_from_features(features):
def get_index(features):
index = QgsSpatialIndex()
for feature in features.values():
index.insertFeature(feature)
return index
def get_location(feature):
return {
'latitude': float(feature['INTPTLAT10']),
'longitude': float(feature['INTPTLON10'])
}
def find_neighbors(feature, features, index):
neighbors = []
intersecting_ids = index.intersects(feature.geometry().boundingBox())
for id in intersecting_ids:
intersecting_feature = features[id]
if intersecting_feature != feature and not intersecting_feature.geometry().disjoint(feature.geometry()):
neighbors.append(intersecting_feature['GEOID10'])
return neighbors
index = get_index(features)
self.show_feedback('Index complete')
return [
{
'tract_id': feature['GEOID10'],
'center': get_location(feature),
'population': feature['POP10'],
'county': feature['COUNTYFP10'],
'neighbors': find_neighbors(feature, features, index)
}
for feature in features.values()
]
census_tract_data = extract_tracts_from_features(features)
self.show_feedback('Features extracted')
tract_centers = [tract['center'] for tract in census_tract_data]
state_center_latitude = sum([location['latitude'] for location in tract_centers]) / len(tract_centers)
state_center_longitude = sum([location['longitude'] for location in tract_centers]) / len(tract_centers)
state_center = {
'latitude': state_center_latitude,
'longitude': state_center_longitude
}
#for tract in census_tract_data:
#self.feedback.pushInfo(f'Tract {tract["tract_id"]}: {tract["neighbors"]}')
#state_center = Utilities.locate_center(self.census_tracts)
census_tracts = [Tract(tract, state_center, self.show_feedback) for tract in census_tract_data]
census_tract_index = {tract.tract_id: tract for tract in census_tracts}
for tract in census_tracts:
tract.update_neighbors(census_tract_index)
return census_tracts
def get_target_district_population(self):
return sum([tract.population for tract in self.census_tracts]) / self.number_of_districts
def initialize_districts(self):
return [District(index, self.state_center) for index in range(1, self.number_of_districts + 1)]
def add_distances(self):
for tract in self.census_tracts:
for district in self.districts:
distance = Distance(tract, district)
tract.add_distance(distance)
district.add_distance(distance)
def run_model(self):
InitialAssignment(self.model_parameters, self.show_feedback)
for _ in range(2):
ConsolidateDistricts(self.model_parameters['districts'], self.show_feedback)
PopulationAdjustment(self.model_parameters, self.show_feedback)
for _ in range(25):
ConsolidateDistricts(self.model_parameters['districts'], self.show_feedback)
CompactDistricts(self.model_parameters['districts'], self.show_feedback)
PopulationAdjustment(self.model_parameters, self.show_feedback)
#assignment = KMeansAssignment(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
#for i in range(15):
# CompactDistricts(self.model_parameters, self.show_feedback)
# ConsolidateDistricts(self.model_parameters, self.show_feedback)
#for _ in range(2):
# ConsolidateDistricts(self.model_parameters['districts'], self.show_feedback)
# PopulationAdjustment(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
#neighbor_assignment = NeighborAssignment(self.model_parameters,self.show_feedback)
#self.show_district_feedback()
#PopulationAdjustment(self.model_parameters, self.show_feedback)
#CompactDistricts(self.model_parameters, self.show_feedback)
#ConsolidateDistricts(self.model_parameters, self.show_feedback)
self.show_district_feedback()
#swap_tracts = SwapTracts(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
#PopulationAdjustment(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
'''
iterations = 1
for iteration in range(iterations):
neighbor_assignment = NeighborAssignment(self.model_parameters, self.show_feedback)
self.show_district_feedback()
for i in range(iterations):
swap_tracts = SwapTracts(self.model_parameters, self.show_feedback)
self.show_district_feedback()
population_adjustment = PopulationAdjustment(self.model_parameters, self.show_feedback)
self.show_district_feedback()
#swap_tracts = SwapTracts(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
#population_adjustment = PopulationAdjustment(self.model_parameters, self.show_feedback)
#self.show_district_feedback()
'''
for tract in self.census_tracts:
self.final_tract_assignments[tract.tract_id] = 0 if tract.district is None else tract.district.district_id
#self.final_tract_assignments = {tract.tract_id: tract.district.district_id for tract in self.census_tracts}
def show_district_feedback(self):
for district in self.districts:
self.show_feedback(str(district))
class Tract:
def __init__(self, tract, state_center, show_feedback):
self.tract_id = tract['tract_id']
self.center = tract['center']
self.population = tract['population']
self.neighbors = tract['neighbors']
self.state_center = state_center
self.district = None
self.show_feedback = show_feedback
def update_neighbors(self, census_tract_index):
self.neighbors = [census_tract_index[id] for id in self.neighbors]
def assign(self, district):
self.district = district
def remove(self):
self.district = None
def get_state_distance(self):
return Utilities.get_distance(self.center, self.state_center)
def get_neighboring_districts(self):
return list({neighbor.district for neighbor in self.neighbors if neighbor.district != self.district})
def get_new_assignment(self):
new_assignment = None
distance_ratio = Utilities.get_distance(self.center, self.district.center) / self.district.average_border_tract_distance
for neighbor in self.neighbors:
if neighbor.district != self.district:
neighbor_distance_ratio = Utilities.get_distance(self.center, neighbor.district.center) / neighbor.district.average_border_tract_distance
if neighbor_distance_ratio < distance_ratio:
new_assignment = neighbor.district
distance_ratio = neighbor_distance_ratio
return new_assignment
def __lt__(self, other, key = None):
return self.get_state_distance() < other.get_state_distance()
def __repr__(self):
if self.district:
return f'{self.tract_id} => {self.district.district_id}\n'
return f'{self.tract_id} => None\n'
|
from flask.blueprints import Blueprint
import logging
from flask_login import login_required
from flask.templating import render_template
from waitlist.permissions import perm_manager
from waitlist.storage.database import CCVote, Account
from waitlist.base import db
from sqlalchemy.sql.functions import func
bp = Blueprint('settings_ccvote', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('ccvot_result_view')
@bp.route("/")
@login_required
@perm_manager.require('ccvot_result_view')
def index():
fc_results = db.session.query(Account.username, func.count('*')).join(
CCVote, Account.id == CCVote.fcvoteID
).group_by(
Account.username
).order_by(
func.count('*')
).all()
lm_results = db.session.query(Account.username, func.count('*')).join(
CCVote, Account.id == CCVote.lmvoteID
).group_by(
Account.username
).order_by(
func.count('*')
).all()
# unique votes
unique_votes_query_fc = db.session.query(CCVote.voterID.label("voterID"), CCVote.fcvoteID.label("fcvoteID")) \
.distinct().subquery()
unique_votes_query_lm = db.session.query(CCVote.voterID.label("voterID"), CCVote.lmvoteID.label("lmvoteID")) \
.distinct().subquery()
unqiue_fc_votes = db.session.query(Account.username, func.count('*')) \
.join(unique_votes_query_fc, unique_votes_query_fc.c.fcvoteID == Account.id
).group_by(
Account.username
).order_by(
func.count('*')
).all()
unqiue_lm_votes = db.session.query(Account.username, func.count('*')) \
.join(unique_votes_query_lm, unique_votes_query_lm.c.lmvoteID == Account.id
).group_by(
Account.username
).order_by(
func.count('*')
).all()
return render_template("settings/ccvotes.html", fcs=fc_results, lms=lm_results, ufcs=unqiue_fc_votes,
ulms=unqiue_lm_votes)
|
person = {
"name":"win.d",
"Play" : "pubg PC",
}
print(person)
person["status"] = "live streaming"
print(person)
person = {
"name" : "frank lampard",
"description" : "legendary Icon",
}
print(person)
person["age"] = "41"
del person["age"]
print(person)
person["status"] = "coaching chealsea"
print(person)
person = {
"name" : "raheem sterling",
"description" : "football player"
}
print(person)
del person["name"]
print(person)
|
from pathlib import Path
def read_fasta(fasta_file):
sequences = []
with Path(fasta_file).open('r') as ff:
sequence = []
for line in ff:
if line.startswith('>'):
if sequence:
sequences.append(''.join(sequence))
sequence = []
else:
sequence.extend(line.split())
if sequence:
sequences.append(''.join(sequence))
return sequences
def get_n_grams(seq, n):
n_grams = [[] for i in range(n)]
for i in range(len(seq) - n + 1):
n_grams[i % n].append(seq[i:i + n])
return n_grams
def make_corpus(fasta_file, corpus_file, n):
with Path(corpus_file).open('w') as cf:
for sequence in read_fasta(fasta_file):
for n_grams in get_n_grams(sequence, n):
cf.write(' '.join(n_grams) + '\n')
def possible_ngrams(alphabet, length, char_list=None, char_index=0):
if char_list is None:
char_list = ['.' for i in range(length)]
if char_index >= length:
yield ''.join(char_list)
else:
for char in alphabet:
char_list[char_index] = char
yield from possible_ngrams(alphabet,
length,
char_list,
char_index + 1)
def save_w2v_vectors_file(vectors_file, vocab, vectors):
sorted_words = list(vocab.keys())
sorted_words.sort()
with Path(vectors_file).open('w') as vf:
vf.write('{} {}\n'.format(vectors.shape[0], vectors.shape[1]))
for word in sorted_words:
vector = vectors[vocab[word].index]
vector = ' '.join(repr(val) for val in vector)
vf.write('{} {}\n'.format(word, vector))
def save_ft_vectors_file(vectors_file, vectors, min_gram, max_gram, vec_size):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
sorted_words = [] # sorted by alphabet order and size
for n in range(min_gram, max_gram + 1):
for ngram in possible_ngrams(alphabet, n):
if ngram in vectors:
sorted_words.append(ngram)
with Path(vectors_file).open('w') as vf:
vf.write('{} {}\n'.format(len(sorted_words), vec_size))
for word in sorted_words:
vector = vectors[word]
vector = ' '.join(repr(val) for val in vector)
vf.write('{} {}\n'.format(word, vector))
|
import os
import argparse
import csv
from sys import exit
from datetime import datetime, date
my_path = "C:/Programming/Alpha/CSV/"
"""
parser = argparse.ArgumentParser(description='First Test using Command Line Args')
parser.add_argument('-i','--input', help='Input file name',required=True)
parser.add_argument('-o','--output',help='Output file name', required=True)
parser.add_argument('-r','--rlim',help="Max row count for output files",required=True)
args = parser.parse_args()
## show values ##
input_file = args.input
output_file = args.output
row_limit = int(args.rlim)
"""
input_file = "testnew.csv"
userName = ""
jobName = ""
startDate = ""
startTime = ""
endDate = ""
endTime = ""
pitches = []
combos = []
options = []
elevations = []
#startDateTime
#endDateTime
def validate_file():
if not os.path.exists(os.path.join(my_path, input_file)):
exit("Cannot complete task, 'Input File' not found")
def row_data_collector(elev, option):
if (elev, option) == ('',''):
return combos
elif (elev, option) not in combos:
combos.append((elev,option))
return combos
def pitch_counter(pitch, pitches):
if pitch not in pitches:
pitches.append(pitch)
return pitches
def elevoptions(elevopt):
for row in elevopt:
if "Base" in row:
elevations.append(row[0])
elif row not in options:
options.append(row)
validate_file()
with open(os.path.join(my_path, input_file), "r") as my_file:
my_file_reader = csv.reader(my_file)
next(my_file_reader)
for row in my_file_reader:
if row[1] == "START JOB" and row[2] != "":
jobData = row[2].split("_")
jobName = jobData[0]
startDate = jobData[1]
startTime = jobData[2]
userName = jobData[3]
elif row[1] == "END JOB" and row[2] != "":
jobData = row[2].split("_")
endDate = jobData[0]
endTime = jobData[1]
else:
combos = row_data_collector(row[2], row[3])
if 'Shing' in row[1]:
pitches = pitch_counter(row[4], pitches)
elevoptions(combos)
print("User: {}".format(userName))
print("Job Name: {}".format(jobName))
print("Date Completed: {}".format(endDate))
print("Start Time: {}".format(startTime))
print("End Time: {}".format(endTime))
print("Pitches: {}".format(len(pitches)))
print("Elevations: {}".format(len(elevations)))
print("Options: {}".format(len(options)))
print(elevations)
print(options)
#"""
#duration = datetime.combine(date.today(), endTime) - datetime.combine(date.today(), startTime)
#dtStart = datetime.strptime(string_date, fmt)
#dtEnd = datetime.strptime(string_date, fmt)
#duration = datetime.combine(date.today(), endTime) - datetime.combine(date.today(), startTime)
#print(duraton)
#"""
#"""
### Save HEADINGS in Variable --> Also Checks Length of CSV
#headings, num_of_rows = validate_length(row_limit, input_file)
#validate_file(input_file)
#
## Identify number of Output files needed
#number_of_files = 0
#if num_of_rows % row_limit > 0:
# number_of_files = int(num_of_rows / row_limit) + 1
#else:
# number_of_files = int(num_of_rows / row_limit)
#
#
## Add Headings to Lists
#master_list = []
#for l in range(0, number_of_files):
# master_list.append([headings])
#
## Read CSV and Save Data
#def read_CSV():
# with open(os.path.join(my_path, input_file), "r") as my_file:
# my_file_reader = csv.reader(my_file)
# next(my_file_reader)
# count = 0
# for l in range(0, number_of_files):
# for row in my_file_reader:
# count += 1
# if count > row_limit:
# master_list[l+1].append(row)
# count = 1
# break
# else:
# master_list[l].append(row)
#
#
#read_CSV()
#
##Write to CSVs
#for file in range(0, len(master_list)):
# with open(os.path.join(my_path, "output" + str(file + 1) + ".csv"), "w") as my_file:
# my_file_writer = csv.writer(my_file, lineterminator='\n')
# my_file_writer.writerows(master_list[file])
#"""
#
|
from girder.models.setting import Setting
from girder.plugins.imagespace.settings import ImageSpaceSetting
class CmuSetting(ImageSpaceSetting):
requiredSettings = ('IMAGE_SPACE_CMU_PREFIX',
'IMAGE_SPACE_CMU_BACKGROUND_SEARCH',
'IMAGE_SPACE_CMU_FULL_IMAGE_SEARCH')
def validateImageSpaceCmuPrefix(self, doc):
return doc.rstrip('/')
def validateImageSpaceCmuBackgroundSearch(self, doc):
return doc.rstrip('/')
def validateImageSpaceCmuFullImageSearch(self, doc):
return doc.rstrip('/')
|
def decorator_function(any_function):
def wrapper_function():
print('this is awesome function')
any_function()
return wrapper_function()
@decorator_function
def func1():
print('esta es la funcion uno')
func1()
|
from pandas.core.common import flatten
from collections import defaultdict
input = open('data/16.txt').read().split('\n\n')
all_rules = [list(map(int, i.split(' ')[-4:])) for i in input[0].replace('or ', '').replace('-', ' ').split('\n')]
rule_names = [i.split(' ')[0] for i in
input[0].replace('departure ', 'd_').replace('arrival ', 'a_').replace('-', ' ').split('\n')]
my_ticket = list(map(int, input[1].split('\n')[1].split(',')))
tickets = [list(map(int, i.split(','))) + [True] for i in input[2].split('\n')[1:]]
rules = {k: [i for i in range(v[0], v[1] + 1)] + [j for j in range(v[2], v[3] + 1)] for k, v in zip(rule_names, all_rules)}
valids = sorted([i for i in range(min(flatten(all_rules)), max(flatten(all_rules)) + 1)])
valid_tickets = list()
for t in tickets:
for n in t[:-1]:
if n not in valids:
t[-1] = False
if t[-1]:
valid_tickets.append(t[:-1])
impossible = defaultdict(list)
possible = defaultdict(list)
definitely = defaultdict(int)
for i in range(len(rules)):
for t in valid_tickets:
for rule, includes in rules.items():
if t[i] not in includes:
impossible[rule] += [i]
for rule, impossibility in impossible.items():
possible[rule] = [i for i in range(20) if i not in impossibility]
while True:
for r, i in possible.items():
if len(i) == 1:
definitely[r] = i[0]
possible.pop(r)
break
else:
for ind, j in enumerate(i):
if j in definitely.values():
i.pop(ind)
break
if len(definitely) == 19:
break
res = 1
for k, v in definitely.items():
if str(k).startswith('d_'):
res *= my_ticket[v]
print(res)
|
from .set_convolution import SetConvLayer
from data import load_dataset
import torch
import torch.nn as nn
from sklearn.cluster import KMeans
import numpy as np
# binary classifier
class SetConvNetwork(torch.nn.Module):
def __init__(self, cfg, anchor):
super(SetConvNetwork, self).__init__()
self.cfg = cfg
self.anchor = anchor
prev_dim = cfg.MODEL.INPUT_DIM
layers = []
for dim in cfg.MODEL.DOWNSAMPLE_DIM:
layers.append(nn.Linear(prev_dim, dim))
prev_dim = dim
self.downsample = nn.Sequential(*layers)
# create a set convolution layer for each minority class
# tmp = [('class {}'.format(int(c)), SetRelativeConvolution(cfg, prev_dim, self.cfg.MODEL.SETCONV_DIM)) for c in anchors]
# self.set_conv = nn.ModuleDict(dict(tmp))
self.set_conv = SetConvLayer(cfg, prev_dim, self.cfg.MODEL.SETCONV_DIM)
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(self.cfg.MODEL.SETCONV_DIM, self.cfg.MODEL.EMBEDDING_DIM, bias=False)
# initialize the fc layer
nn.init.xavier_uniform_(self.fc.weight)
for layer in self.downsample:
nn.init.kaiming_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
def forward(self, x):
# anchors = {}
# for c in self.anchors:
# anchors[c] = self.relu(self.downsample(self.anchors[c]))
anchor = self.relu(self.downsample(self.anchor))
x = self.downsample(x)
x = self.relu(x)
# outs = []
# for c in anchors:
# out = self.set_conv['class {}'.format(int(c))](x-anchors[c], x)
# outs.append(out)
out = self.set_conv(x-anchor, x)
# out = torch.cat(outs, dim=1)
out = self.fc(out)
out = self.relu(out)
return out
def build_model(cfg):
minority_classes = cfg.MODEL.MINORITY_CLASS
x, y = load_dataset(cfg)
# anchors = {}
# for c in minority_classes:
# data = x[y==c]
# if len(data) >= cfg.MODEL.K:
# kmeans = KMeans(n_clusters=cfg.MODEL.K)
# else:
# kmeans = KMeans(n_clusters=1)
# kmeans.fit(data)
# centers = kmeans.cluster_centers_
# centers = torch.from_numpy(centers).to(cfg.DEVICE)
# anchors[c] = centers.float()
data = x[y==minority_classes]
anchor = torch.from_numpy(np.mean(data, axis=0).reshape(1, -1)).float().to(cfg.DEVICE)
model = SetConvNetwork(cfg, anchor)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.