text stringlengths 8 6.05M |
|---|
__all__ = ["add", "multi"]
print("add module imported") |
__author__ = 'Anton Vakhrushev'
|
"""Plot saliency maps """
import tensorflow as tf
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.applications import inception_v3
from scipy.ndimage import imread, zoom
from scipy.misc import imresize
import matplotlib.pyplot as plt
def get_saliency(image,model):
"""Returns a saliency map with same shape as image. """
K.set_learning_phase(0)
K._LEARNING_PHASE = tf.constant(0)
image = np.expand_dims(image,0)
loss = K.variable(0.)
loss += K.sum(K.square(model.output))
grads = K.abs(K.gradients(loss,model.input)[0])
saliency = K.max(grads,axis=3)
fetch_saliency = K.function([model.input],[loss,saliency])
outputs, saliency = fetch_saliency([image])
K.set_learning_phase(True)
return saliency
def plot_saliency(image, model):
"""Gets a saliency map for image, plots it next to image. """
saliency = get_saliency(image,model)
plt.ion()
fig, (ax1,ax2) = plt.subplots(2)
ax1.imshow(np.squeeze(saliency),cmap="viridis")
ax1.set_xticklabels([]); ax1.set_yticklabels([])
ax2.imshow(np.squeeze(image),cmap="gray")
ax2.set_xticklabels([]); ax2.set_yticklabels([])
plt.pause(0.01)
plt.show()
def get_gradcam(image,model,layer_name):
#remove dropout/noise layers
K.set_learning_phase(0)
K._LEARNING_PHASE = tf.constant(0)
layer = model.get_layer(layer_name)
image = np.expand_dims(image,0)
loss = K.variable(0.)
loss += K.sum(model.output)
#gradients of prediction wrt the conv layer of choice are used
upstream_grads = K.gradients(loss,layer.output)[0]
feature_weights = K.mean(upstream_grads,axis=[1,2])
heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
fetch_heatmap = K.function([model.input], [heatmap])
return fetch_heatmap([image])[0]
def transparent_cmap(cmap,alpha,N=255):
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0,0.8,N+4)
return mycmap
def plot_heatmap(image,model,layer_name,ax,cmap=plt.cm.jet,alpha=0.6):
heat_cmap = transparent_cmap(cmap,alpha)
heatmap = get_gradcam(image,model,layer_name)
image = np.squeeze(image)
heatmap = np.squeeze(heatmap)
heatmap = imresize(heatmap, image.shape)
if ax is None:
fig, ax = plt.subplots(1,1)
ax.imshow(image,cmap="gray")
ax.imshow(np.squeeze(heatmap),cmap=heat_cmap)
ax.set_xticklabels([]); ax.set_yticklabels([])
def plot_network(images, model, labels=None):
layer_names = [l.name for l in model.layers if isinstance(l,Conv2D)]
n_conv = len(layer_names)
n_images = images.shape[0]
fig, axlist = plt.subplots(n_images,n_conv)
diagnosis = ["negative", "positive"]
for i in range(n_images):
for j in range(n_conv):
plot_heatmap(images[i], model, layer_names[j], axlist[i][j])
axlist[-1][j].set_xlabel(layer_names[j])
if labels is not None:
axlist[i][0].set_ylabel(str(labels[i]))
fig.show()
|
# Generated by Django 3.2.7 on 2021-09-03 16:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_photo'),
]
operations = [
migrations.RenameField(
model_name='photo',
old_name='cat',
new_name='board',
),
]
|
def calc(num1,num2):
"""This function adds two numbers"""
print("the total is",num1+num2)
while(1):
try:
print("Enter first number\n")
x=int(input())
break
continue
except Exception as e:
print("Enter correct value")
print(e)
print("Enter second number\n")
y=int(input())
print(calc.__doc__)
calc(x,y) |
from django.conf.urls import url
from django.contrib import admin
from .views import (
addStockCode,
showStockInfo,
)
urlpatterns = [
url(r'^addStockCode/$', addStockCode, name='addStockCode'),
url(r'^showStockInfo/$', showStockInfo, name='showStockInfo'),
]
|
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
import itertools
def read(n):
s = ""
for k, v in itertools.groupby(str(n)):
s += str(len(list(v))) + k
print s
return s
cnt = 0
out = "1"
for i in range(1, n):
out = read(out)
return out
|
import math
class Solution:
def checkPerfectNumber(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 1:
return False
root = int(math.sqrt(num))
divisors = [1]
i = 2
while i <= root:
if num%i == 0:
divisors.append(i)
divisors.append(num//i)
i += 1
return True if sum(divisors) == num else False
print(Solution().checkPerfectNumber(28)) |
n=int(input('Enter the value of n'))
sum=0
for i in range(1,n+1):
sum+=i
print('Sum=',sum) |
from ActionsAnalysis import *
import numpy as np
import time
for fileName in ['new/dummy_eliiradariamoshelenaido_pu1.0_single_100_norm']:
for epochs in ['100','200']:
aa = ActionsAnalysis(expDir = fileName)
print 'loading'
#~ try:
#~ aa.loadData(epochs)
#~ except:
#~ aa.loadData('')
aa.loadData(epochs)
print 'bag of words'
for kValue in [100,200,300,400,500]:
aa.computeBagOfWords(doKmeans=True,k=kValue,nActions=1)
#~ np.save('/home/mzanotto/renvision/experiments/P29_01_04_16/'+str(sys.argv[1])+'/bow'+str(kValue)+'.npy',aa.bow)
#~ np.save('/home/mzanotto/renvision/experiments/P29_01_04_16/'+str(sys.argv[1])+'/labels.npy',aa.stimuliOfInterestAll)
np.save('/home/mzanotto/renvision/experiments/P29_01_04_16/'+fileName+'/bow'+str(kValue)+'_'+epochs+'.npy',aa.bow)
np.save('/home/mzanotto/renvision/experiments/P29_01_04_16/'+fileName+'/labels_'+epochs+'.npy',aa.stimuliOfInterestAll)
print 'break'
|
# -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf |
# -*- coding: utf-8 -*-
"""This is the entry point of the program."""
def detect_language(text, languages):
"""Returns the detected language of given text."""
counter = 0
select_lang = None
for language in languages:
num_of_words = len([word for word in language['common_words'] if word in text])
if num_of_words > counter:
counter = num_of_words
select_lang = language['name']
return select_lang
|
import numpy as np
from GeoToolkit.Mag import Simulator, DataIO, MathUtils, Mag
# from SimPEG import PF, Utils, Mesh, Maps
import ipywidgets as widgets
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
from scipy.interpolate import griddata, interp1d
from scipy.interpolate import NearestNDInterpolator, LinearNDInterpolator
# from SimPEG.Utils import mkvc, ndgrid, uniqueRows
def blockModel():
"""
List of paramters defining the synthetic block model
[xCenter, yCenter, zCenter, Width, Height, Depth, rotationX, rotationZ]
"""
parameters = [
[2000, 500, -100, 5000, 4000, 1000, 60, 0],
[-500, 0, -100, 300, 300, 300, -30, 0],
[400, 100, -100, 4000, 100, 1000, 55, 10],
]
susceptibility = [0.075, 0.1, -0.05, 0.005]
return parameters, susceptibility
def setSyntheticProblem(
rxLocs, EarthField=[50000, 90, 0], discretize=False
):
"""
Set the synthetic problem with multiple blocks.
Output the figure used in the doc
"""
if discretize:
# Mesh discretization for plotting
hx = [(10, 320)]
hy = [(10, 320)]
hz = [(10, 120)]
x0 = np.min(rxLocs, axis=0)
x0[2] -= 1000
# Create a mesh
mesh = Mesh.TensorMesh([hx, hy, hz], x0=x0)
model = np.zeros(mesh.nC)
else:
mesh = []
model = []
# Shift everything centered at origin
cntr = np.mean(rxLocs, axis=0)
rxLocs -= np.kron(np.ones((rxLocs.shape[0], 1)), cntr)
# Create survey
survey = Mag.createMagSurvey(rxLocs, EarthField=EarthField)
cntr = np.mean(rxLocs, axis=0)
# Cycle through the parameters, create blocks for forward and
# discretize on to the mesh
prisms = []
# User defined parameters for the blocks
params, suscs = blockModel()
# Create the synthetic blocks model and place
# it at the center of the survey
for param, susc in zip(params, suscs):
prism = Simulator.definePrism()
prism.x0, prism.y0, prism.z0 = cntr[0]+param[0], cntr[1]+param[1], rxLocs[:, 2].min() +param[2]
prism.dx, prism.dy, prism.dz = param[3], param[4], param[5]
prism.pdec, prism.pinc = param[6], param[7]
prisms.append(prism)
# Forward model data
prob = Mag.Problem(prism=prism, survey=survey)
prob.susc = susc
survey._dobs = survey.dobs + prob.fields()[0]
if discretize:
# Discretize onto mesh
X, Y, Z = np.meshgrid(prism.xn, prism.yn, prism.zn)
pts = np.c_[Utils.mkvc(X), Utils.mkvc(Y), Utils.mkvc(Z)]
xyz = MathUtils.rotate(
pts, np.r_[prism.xc, prism.yc, prism.zc],
prism.pinc, prism.pdec
)
ind = Utils.ModelBuilder.PolygonInd(mesh, xyz)
model[ind] += susc
return survey, mesh, model
def meshBuilder(xyz, h, padDist, meshGlobal=None,
expFact=1.3,
meshType='TENSOR',
verticalAlignment='top'):
"""
Function to quickly generate a Tensor mesh
given a cloud of xyz points, finest core cell size
and padding distance.
If a meshGlobal is provided, the core cells will be centered
on the underlaying mesh to reduce interpolation errors.
:param numpy.ndarray xyz: n x 3 array of locations [x, y, z]
:param numpy.ndarray h: 1 x 3 cell size for the core mesh
:param numpy.ndarray padDist: 2 x 3 padding distances [W,E,S,N,Down,Up]
[OPTIONAL]
:param numpy.ndarray padCore: Number of core cells around the xyz locs
:object SimPEG.Mesh: Base mesh used to shift the new mesh for overlap
:param float expFact: Expension factor for padding cells [1.3]
:param string meshType: Specify output mesh type: "TensorMesh"
RETURNS:
:object SimPEG.Mesh: Mesh object
"""
assert meshType in ['TENSOR', 'TREE'], ('Revise meshType. Only ' +
' TENSOR | TREE mesh ' +
'are implemented')
# Get extent of points
limx = np.r_[xyz[:, 0].max(), xyz[:, 0].min()]
limy = np.r_[xyz[:, 1].max(), xyz[:, 1].min()]
limz = np.r_[xyz[:, 2].max(), xyz[:, 2].min()]
# Get center of the mesh
midX = np.mean(limx)
midY = np.mean(limy)
midZ = np.mean(limz)
nCx = int(limx[0]-limx[1]) / h[0]
nCy = int(limy[0]-limy[1]) / h[1]
nCz = int(limz[0]-limz[1]+int(np.min(np.r_[nCx, nCy])/3)) / h[2]
if meshType == 'TENSOR':
# Make sure the core has odd number of cells for centereing
# on global mesh
if meshGlobal is not None:
nCx += 1 - int(nCx % 2)
nCy += 1 - int(nCy % 2)
nCz += 1 - int(nCz % 2)
# Figure out paddings
def expand(dx, pad):
L = 0
nC = 0
while L < pad:
nC += 1
L = np.sum(dx * expFact**(np.asarray(range(nC))+1))
return nC
# Figure number of padding cells required to fill the space
npadEast = expand(h[0], padDist[0, 0])
npadWest = expand(h[0], padDist[0, 1])
npadSouth = expand(h[1], padDist[1, 0])
npadNorth = expand(h[1], padDist[1, 1])
npadDown = expand(h[2], padDist[2, 0])
npadUp = expand(h[2], padDist[2, 1])
# Create discretization
hx = [(h[0], npadWest, -expFact),
(h[0], nCx),
(h[0], npadEast, expFact)]
hy = [(h[1], npadSouth, -expFact),
(h[1], nCy), (h[1],
npadNorth, expFact)]
hz = [(h[2], npadDown, -expFact),
(h[2], nCz),
(h[2], npadUp, expFact)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hy, hz], 'CC0')
# Re-set the mesh at the center of input locations
# Set origin
if verticalAlignment == 'center':
mesh.x0 = [midX-np.sum(mesh.hx)/2., midY-np.sum(mesh.hy)/2., midZ-np.sum(mesh.hz)/2.]
elif verticalAlignment == 'top':
mesh.x0 = [midX-np.sum(mesh.hx)/2., midY-np.sum(mesh.hy)/2., limz[0]-np.sum(mesh.hz)]
else:
assert NotImplementedError("verticalAlignment must be 'center' | 'top'")
elif meshType == 'TREE':
# Figure out full extent required from input
extent = np.max(np.r_[nCx * h[0] + padDist[0, :].sum(),
nCy * h[1] + padDist[1, :].sum(),
nCz * h[2] + padDist[2, :].sum()])
maxLevel = int(np.log2(extent/h[0]))+1
# Number of cells at the small octree level
# For now equal in 3D
nCx, nCy, nCz = 2**(maxLevel), 2**(maxLevel), 2**(maxLevel)
# nCy = 2**(int(np.log2(extent/h[1]))+1)
# nCz = 2**(int(np.log2(extent/h[2]))+1)
# Define the mesh and origin
# For now cubic cells
mesh = Mesh.TreeMesh([np.ones(nCx)*h[0],
np.ones(nCx)*h[1],
np.ones(nCx)*h[2]])
# Set origin
if verticalAlignment == 'center':
mesh.x0 = np.r_[-nCx*h[0]/2.+midX, -nCy*h[1]/2.+midY, -nCz*h[2]/2.+midZ]
elif verticalAlignment == 'top':
mesh.x0 = np.r_[-nCx*h[0]/2.+midX, -nCy*h[1]/2.+midY, -(nCz-1)*h[2] + limz.max()]
else:
assert NotImplementedError("verticalAlignment must be 'center' | 'top'")
return mesh
def refineTree(mesh, xyz, finalize=False, dtype="point", nCpad=[1, 1, 1]):
maxLevel = int(np.log2(mesh.hx.shape[0]))
if dtype == "point":
mesh.insert_cells(xyz, np.ones(xyz.shape[0])*maxLevel, finalize=False)
stencil = np.r_[
np.ones(nCpad[0]),
np.ones(nCpad[1])*2,
np.ones(nCpad[2])*3
]
# Reflect in the opposite direction
vec = np.r_[stencil[::-1], 1, stencil]
vecX, vecY, vecZ = np.meshgrid(vec, vec, vec)
gridLevel = np.maximum(np.maximum(vecX,
vecY), vecZ)
gridLevel = np.kron(np.ones((xyz.shape[0], 1)), gridLevel)
# Grid the coordinates
vec = np.r_[-stencil[::-1], 0, stencil]
vecX, vecY, vecZ = np.meshgrid(vec, vec, vec)
offset = np.c_[
mkvc(np.sign(vecX)*2**np.abs(vecX) * mesh.hx.min()),
mkvc(np.sign(vecY)*2**np.abs(vecY) * mesh.hx.min()),
mkvc(np.sign(vecZ)*2**np.abs(vecZ) * mesh.hx.min())
]
# Replicate the point locations in each offseted grid points
newLoc = (
np.kron(xyz, np.ones((offset.shape[0], 1))) +
np.kron(np.ones((xyz.shape[0], 1)), offset)
)
mesh.insert_cells(
newLoc, maxLevel-mkvc(gridLevel)+1, finalize=finalize
)
elif dtype == 'surface':
# Get extent of points
limx = np.r_[xyz[:, 0].max(), xyz[:, 0].min()]
limy = np.r_[xyz[:, 1].max(), xyz[:, 1].min()]
F = NearestNDInterpolator(xyz[:, :2], xyz[:, 2])
zOffset = 0
# Cycle through the first 3 octree levels
for ii in range(3):
dx = mesh.hx.min()*2**ii
# Horizontal offset
xyOff = dx * 2
nCx = int(limx[0]-limx[1] + 2 * xyOff) / dx
nCy = int(limy[0]-limy[1] + 2 * xyOff) / dx
# Create a grid at the octree level in xy
CCx, CCy = np.meshgrid(
np.linspace(limx[1]-xyOff, limx[0]+xyOff, nCx),
np.linspace(limy[1]-xyOff, limy[0]+xyOff, nCy)
)
z = F(mkvc(CCx), mkvc(CCy))
for level in range(int(nCpad[ii])):
mesh.insert_cells(
np.c_[mkvc(CCx), mkvc(CCy), z-zOffset], np.ones_like(z)*maxLevel-ii,
finalize=False
)
zOffset += dx
if finalize:
mesh.finalize()
else:
NotImplementedError("Only dtype='points' has been implemented")
return mesh
if __name__ == '__main__':
# Load data and topo and build default model
assetDir = './../../docs/Notebooks/assets/TKC/'
survey = Mag.readMagneticsObservations('DIGHEM_Mag_floor10nt_25m.obs')
topo = np.genfromtxt('TKCtopoDwnS.dat', skip_header=1)
locs = survey.rxLoc
# Build the problem
survey, mesh, model = setSyntheticProblem(locs, topo=topo, discretize=True)
if topo is not None:
topo -= np.kron(np.ones((topo.shape[0], 1)), cntr)
if topo is not None:
actv = Utils.modelutils.surface2ind_topo(mesh, topo)
else:
actv = np.ones(mesh.nC, dtype='bool')
model = model[actv]
actvMap = Maps.InjectActiveCells(mesh, actv, np.nan)
# All ploting functions
fig = plt.figure(figsize=(10, 6))
axs = plt.subplot(1, 2, 1)
indy = int(mesh.vnC[1]/2)-18
indz = -32
# Plot horizontal section
im = mesh.plotSlice(
actvMap*model, normal='Z', ax=axs,
ind=indz, clim=[0.0, 0.1], pcolorOpts={'cmap': 'jet'}
)
a = np.r_[rxLocs[:, 0].min(), mesh.vectorCCy[indy]]
b = np.r_[rxLocs[:, 0].max(), mesh.vectorCCy[indy]]
plt.scatter(rxLocs[:, 0], rxLocs[:, 1], 10, c='k', marker='.')
plt.plot(np.r_[a[0], b[0]], np.r_[a[1], b[1]], 'r--')
axs.set_title(
'Plan view'
)
axs.set_xlabel('Easting (m)')
axs.set_ylabel('Northing (m)')
axs.set_aspect('equal')
axs.set_xlim(rxLocs[:, 0].min()-100, rxLocs[:, 0].max()+100)
axs.set_ylim(rxLocs[:, 1].min()-100, rxLocs[:, 1].max()+100)
# Plot vertical section
axs = plt.subplot(1, 2, 2)
indy = int(mesh.vnC[1]/2)-18
im = mesh.plotSlice(
actvMap*model, normal='Y', ax=axs,
ind=indy, clim=[0.0, 0.1], pcolorOpts={'cmap': 'jet'}
)
cbar = plt.colorbar(im[0], orientation='horizontal')
cbar.set_label('SI')
Simulator.plotProfile2D(
rxLocs[:, 0], rxLocs[:, 1], rxLocs[:, -1], a, b, 10, ax=axs,
coordinate_system='xProfile', ylabel='k:'
)
if topo is not None:
Simulator.plotProfile2D(
topo[:, 0], topo[:, 1], topo[:, -1], a, b, 10, ax=axs,
plotStr=['k-'],
coordinate_system='xProfile', ylabel=''
)
axs.set_title(
'EW Section'
)
axs.set_ylim(-1000, 100)
axs.set_aspect('equal')
axs.set_xlabel('Easting (m)')
axs.set_ylabel('Depth (m)')
axs.yaxis.set_label_position("right")
fig.savefig('./images/SyntheticModel.png', bbox_inches='tight')
plt.close()
# Save contours to shapefile
zSlice = (actvMap*model).reshape(mesh.vnC, order='F')[:,:,indz]
contours = plt.contour(mesh.vectorCCx,mesh.vectorCCy, zSlice.T, [0.03,0.075,0.09]).allsegs[0]
DataIO.exportShapefile(
contours, [1]*len(contours),
saveAs='Synthetic_Zcontours',
directory="./assets/Synthetic")
|
# %%
import numpy as np
import csv
import time
import threading
from keras import models, backend
from keras.utils import to_categorical
test_data = []
test_labels = []
def evaluate(model='123.h5', valve=15000):
network = models.load_model(model)
with open('test_data.csv', 'r', newline='') as csvfile:
data = csv.reader(csvfile)
for row in data:
test_data.append(row)
with open('test_label.csv', 'r', newline='') as csvfile:
data = csv.reader(csvfile)
for row in data:
test_labels.append(row[0])
tst_data = np.array(test_data, dtype='float32')
tst_labels = to_categorical(test_labels)
tst_data = np.expand_dims(tst_data, axis=2)
pos_acc = 0
neg_acc = 0
pos_count = 0
neg_count = 0
predictions = network.predict_classes(tst_data)
for i in range(len(predictions)):
if int(tst_labels[i][1]) == 1:
pos_acc += 1 * (predictions[i] == 1 and np.max(tst_data[i]) > valve)
pos_count += 1
if int(tst_labels[i][1]) == 0:
neg_acc += 1*(predictions[i] == 0)
neg_count += 1
TP = pos_acc
TN = neg_acc
FP = neg_count - neg_acc
FN = pos_count - pos_acc
sensitivity = TP/(TP+FN)
specificity = TN/(FP+TN)
print('Accuracy : %.2f' % ((pos_acc+neg_acc)/len(tst_labels)*100), '%')
print("Sensitivity: %2f" % (sensitivity*100), '%',
'\nSpecificity: %2f' % (specificity*100), '%')
print(predictions)
# %%
evaluate('9594_9729.h5', 1000)
# %%
# %%
|
#!/usr/bin/env python3
# coding: utf-8
# pylint: disable=C0103,C0111,R0201,E1101
# Romain Vincent, GetWatuAsk
"""Routes for the app."""
import datetime as dt
import smtplib
from flask import Flask, redirect, url_for, request, render_template, session
import data_query as db
app = Flask(__name__, static_url_path='/static')
app.secret_key = 'Ts2V+eDAwCK/gZLoe+KhyUjgpnBrHE3yumYuuRG59Q4='
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.route('/')
def index():
return redirect(url_for('login'), code=302)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html'), 200
# Method POST
user_email = request.form.get('inputMail')
password = request.form.get('inputPassword')
if not (user_email and password):
return "Unprocessable Entity", 422
user = db.login(user_email)
if user is None or not (user['mail'] == user_email and
user['password'] == password):
msg = "Invalid email or password"
return render_template('403.html', msg=msg, route='login'), 403
# Connected, redirect to index
session['user'] = user
return redirect(url_for('get_needs', user_id=user['user_id']), code=302)
@app.route('/logout')
def logout():
session.pop('user', None)
return redirect(url_for('login'), code=302)
@app.route('/needs', methods=['GET', 'POST'])
def get_needs():
user_id = session['user']['user_id']
user = db.get_user_by_id(user_id)
if user is None:
msg = "Couldn't get user N°{}".format(user_id)
return render_template('403.html', msg=msg, route='get_needs'), 403
args = {'states': list()}
args['states'].append(request.form.get('open'))
args['states'].append(request.form.get('win'))
args['states'].append(request.form.get('lost'))
args['min_date'] = request.form.get('min_date')
args['max_date'] = request.form.get('max_date')
args['client_name'] = request.form.get('client_name')
args['title'] = request.form.get('title')
needs = db.get_needs_from_user(user_id, args)
if needs is None:
msg = "Couldn't get needs for user N°{}".format(user_id)
return render_template('403.html', msg=msg, route='get_needs'), 403
for need in needs:
need['remaining'] = (need['latest_date'] - need['creation_date']).days
return render_template('need-list.html', user=user, needs=needs, clients=db.get_clients(), total=len(needs)), 200
@app.route('/client/<client_id>/<need_id>/<qr_code_salt>')
def client_need(client_id, need_id, qr_code_salt):
ok = qr_code_salt is not None # We should check the code
client = db.get_client_by_id(client_id)
needs = db.get_needs_id(client_id)
need = db.get_need_by_id(need_id)
return render_template('client-dashboard.html', client=client, need=need,
need_ids=needs), 200
@app.route('/needs/view/<need_id>')
def view_need(need_id):
need = db.get_need_by_id(need_id)
if need is None:
msg = "Couldn't get need N°{}".format(need_id)
return render_template('403.html', msg=msg, route='get_needs'), 403
return render_template('view.html', need=need), 200
@app.route('/needs/edit/<need_id>', methods=['GET', 'POST'])
def edit_need(need_id=None):
if request.method == 'GET':
# Reload from session or get directly from database
if need_id is not None:
need = db.get_need_by_id(need_id)
session['need'] = need
elif session['need'] is not None:
need = session['need']
else:
return "unprocessable entity", 422
if need is None:
msg = "Couldn't get need N°" + need_id
return render_template('403.html', msg=msg, route='edit_need'), 403
user = session['user']
client = db.get_client_by_id(need['client_id'])
return render_template('edit-need.html', need=need, user=user, client=client), 200
elif request.method == 'POST':
description = request.form.get('description')
consultant_name = request.form.get('consultant_name')
keys = request.form.get('keys')
latest_date = request.form.get('dueDate')
month = request.form.get('month')
day = request.form.get('day')
price_ht = request.form.get('price_ht')
status_id = request.form.get('selectStatus')
need = db.get_need_by_id(need_id)
print("EDIT-NEED before edit : ", need)
db.update_need(need_id, description, latest_date, month,
day, price_ht, consultant_name, status_id, keys)
return redirect(url_for('get_needs'))
else:
return "Not implemented yet", 404
def camelcasify(string):
words = [word.title() if i != 0 else word
for i, word in enumerate(string.split(' '))]
return "".join(words)
@app.route('/needs/new', methods=['GET', 'POST'])
def new_need():
if request.method == 'GET':
params = {'consultant_name': request.args.get('consultant_name'),
'user_id': session['user']['user_id'],
'today': dt.date.today()}
clients = db.get_clients()
params['clients'] = clients
user = session['user']
return render_template('new-need.html', params=params, user=user), 200
# Method POST:
new_need = request.form.to_dict()
if 'title' in new_need:
new_need['title'] = camelcasify(new_need['title'])
session['user']['last_insert_need_id'] = db.insert_need(new_need)
# send_mail()
return redirect(url_for('get_needs'), code=302)
@app.route('/needs/delete/<need_id>', methods=['GET', 'POST'])
def delete_need(need_id):
if request.method == 'POST':
db.delete_need(need_id)
return redirect(url_for('get_needs'))
return "Not implemented yet", 404
def send_mail():
print("PASSE PAR LA")
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("workshop.epsi.btrois@gmail.com", "Azqswx21!")
msg = "Creation d'un nouveau besoin à votre nom. Vous pouvez le retrouver dans la liste de vos besoins."
try:
server.sendmail("workshop.epsi.btrois@gmail.com",
"workshop.epsi.btrois@gmail.com", msg)
except:
print("Impossible d'envoyer le mail!")
server.quit()
if __name__ == '__main__':
app.run(debug=True)
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2020 Kari Kujansuu
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from collections import defaultdict
from enum import Enum, auto
import os
import traceback
# from dataclasses import dataclass
from gi.repository import Gtk, Gdk
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.db import DbTxn
from gramps.gen.db import DbReadBase
from gramps.gen.dbstate import DbState
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.errors import WindowActiveError
from gramps.gen.lib import Name
from gramps.gen.lib import Person
from gramps.gen.user import User
from gramps.gui.dbguielement import DbGUIElement
from gramps.gui.glade import Glade
from gramps.gui.dialog import OkDialog
from gramps.gui.displaystate import DisplayState
from gramps.gui.editors import EditPerson
from gramps.gui.managedwindow import ManagedWindow
from gramps.gui.plug import tool
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
do_logging = False
do_trace = True
tracefile = __file__ + ".trace.txt"
MAIN = "main"
PLIST = "plist"
SLIST = "slist"
TREEVIEW = "treeview"
SEARCHTEXT = "searchtext"
FINDBUTTON = "find-button"
MERGEBUTTON = "merge-button"
RESETBUTTON = "reset-button"
OPENBUTTON = "open-button"
CLOSEBUTTON = "close-button"
class Nametype(Enum):
FIRSTNAME = auto()
PATRONYME = auto()
SURNAME = auto()
try:
from typing import List, Tuple, Optional, Any, Callable, Union
Pinfo = List[Tuple[str, str, str]] # pname,handle,grampsid
Nameinfo = List[Tuple[Tuple[str, int], Pinfo]] # (name,gender),[plinfo...]
except:
pass
# @dataclass
class Row:
def __init__(self, name, gender, count, rownum, deleted, plist):
# type: (Row, str, str, int, int, bool, Pinfo) -> None
self.name = name
self.gender = gender
self.count = count
self.rownum = rownum
self.deleted = deleted
self.plist = plist
def gender_string_to_code(gender_string):
# type: (Optional[str]) -> int
if gender_string == "MALE":
return Person.MALE
if gender_string == "FEMALE":
return Person.FEMALE
if gender_string == "UNKNOWN":
return Person.UNKNOWN
return -1
def fetch_names(db):
# type: (DbReadBase) -> Tuple[Nameinfo,Nameinfo,Nameinfo]
n = 0
firstnameset = defaultdict(list)
suffixset = defaultdict(list)
surnameset = defaultdict(list)
for person_handle in db.get_person_handles(): # type: str
person = db.get_person_from_handle(person_handle)
names = person_names(person)
gender = person.get_gender() # type: int
pname = name_displayer.display(person) # type: str
# print(name_displayer.display(person))
for index, name in enumerate(names):
firstnames = name.get_first_name() # type: str
surnames = name.get_surname_list()
if len(surnames) > 1:
print()
print("Monta sukunimeä:", pname)
firstnames = firstnames.replace(".", ". ")
# firstnames = firstnames.replace(":",": ")
for firstname in firstnames.split():
firstnameset[(firstname, gender)].append(
(pname, person_handle, person.gramps_id)
)
n += 1
suffix = name.get_suffix() # type: str
if suffix:
suffixset[(suffix, gender)].append(
(pname, person_handle, person.gramps_id)
)
for surname in surnames:
sname = surname.get_surname()
if sname:
surnameset[(sname, -1)].append(
(pname, person_handle, person.gramps_id)
)
print(n, "names")
firstnamelist = sorted(firstnameset.items())
suffixlist = sorted(suffixset.items())
surnamelist = sorted(surnameset.items())
return firstnamelist, suffixlist, surnamelist
lastmod = 0.0
# -------------------------------------------------------------------------
#
# Tool
#
# -------------------------------------------------------------------------
class Tool(tool.Tool):
def __init__(
self,
dbstate, # type: DbState
user, # type: User
options_class, # type: tool.ToolOptions
name, # type: str
callback=None, # type: Callable
):
# type: (...) -> None
self.user = user
self.uistate = user.uistate
self.dbstate = dbstate
tool.Tool.__init__(self, dbstate, options_class, name)
if not self.check_filechange():
return
try:
self.run()
except:
traceback.print_exc()
def check_filechange(self):
# type: () -> bool
global lastmod
modtime = os.stat(__file__).st_mtime
if lastmod and lastmod < modtime:
OkDialog("File changed", "Please reload")
return False
lastmod = modtime
return True
def run(self):
# type: () -> None
firstnamelist, suffixlist, surnamelist = fetch_names(self.db)
try:
d = NameDialog(
self.uistate, self.dbstate, firstnamelist, suffixlist, surnamelist
)
except:
traceback.print_exc()
def person_names(person):
# type: (Person) -> List[Name]
return [person.get_primary_name()] + person.get_alternate_names()
class MyTreeView(Gtk.TreeView):
def __init__(self):
# type: () -> None
Gtk.TreeView.__init__(self)
# renderer = Gtk.CellRendererText()
# col = Gtk.TreeViewColumn('Text2',renderer, text=1)
# self.append_column(col)
class MyListModel(Gtk.ListStore):
def __init__(self, treeview, columns, event_func):
# type: (MyTreeView, List[Tuple[str, int, int]], Callable) -> None
Gtk.ListStore.__init__(self, str, str, int, int) # name, gender, count, rownum
self.event_func = event_func
treeview.set_model(self)
treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
renderer = Gtk.CellRendererText()
print(columns)
for (title, colnum, width) in columns:
col = Gtk.TreeViewColumn(title, renderer, text=colnum, weight_set=True)
col.set_clickable(True)
# col.set_sort_column_id(colnum)
col.set_resizable(True)
treeview.append_column(col)
# treeview.connect('button-press-event', self.__button_press)
def add(self, row):
# type: (List[Union[int, str]]) -> None
node = self.append()
for col, value in enumerate(row):
self.set_value(node, col, value)
def __button_press(self, obj, event):
# type: (Any, Any) -> bool
"""
Called when a button press is executed
"""
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and event.button == 1:
self.event_func(obj)
return True
return False
class NameDialog(ManagedWindow, DbGUIElement):
def __init__(
self,
uistate, # type: DisplayState
dbstate, # type: DbState
firstnamelist, # type: List[Tuple[Tuple[str, int], List[Tuple[str, str, str]]]]
suffixlist, # type: List[Tuple[Tuple[str, int], List[Tuple[str, str, str]]]]
surnamelist, # type: List[Tuple[Tuple[str, int], List[Tuple[str, str, str]]]]
):
# type: (...) -> None
self.uistate = uistate
self.dbstate = dbstate
self.db = dbstate.db
self.firstnamelist = firstnamelist
self.suffixlist = suffixlist
self.surnamelist = surnamelist
self.names = firstnamelist
self.nametype: Nametype = Nametype.FIRSTNAME
self.rows: List[Row] = []
self.personlist = None # type: Optional[Personlist]
# print(names)
ManagedWindow.__init__(self, self.uistate, [], self.__class__, modal=False)
# the self.top.run() below makes Gtk make it modal, so any change to
# the previous line's "modal" would require that line to be changed
DbGUIElement.__init__(self, dbstate.db)
try:
self.draw_window()
ok = True
except:
traceback.print_exc()
OkDialog("Error occurred", traceback.format_exc() + "\n\nRestart Gramps.")
ok = False
self.set_window(self.top, None, _("Name merge tool"))
self.setup_configs("interface.namemerge", 300, 350)
if ok:
self.reset(None)
self.show()
# see ManagedWindow.clean_up
def clean_up(self):
# type: () -> None
if self.personlist:
self.personlist.close()
self.callman.disconnect_all()
print("done")
def draw_window(self):
# type: () -> Gtk.Window
"""Draw the dialog box."""
glade = Glade(toplevel=MAIN)
self.glade = glade
self.top = glade.toplevel
self.personlist = None
# None.x
columns = [
(_("Name"), 0, 200),
(_("Gender"), 1, 20),
(_("Count"), 2, 20),
]
# ('',-1,0)]
self.nameview = MyTreeView()
self.namemodel = MyListModel(
self.nameview, columns, event_func=self.cb_double_click
)
self.init()
find = glade.get_child_object(FINDBUTTON)
find.connect("clicked", self.find)
reset = glade.get_child_object(RESETBUTTON)
reset.connect("clicked", self.reset)
self.searchtext = glade.get_child_object(SEARCHTEXT)
slist = glade.get_child_object(SLIST) # GetkScrolledWindow
slist.add(self.nameview)
merge_button = glade.get_child_object(MERGEBUTTON)
merge_button.connect("clicked", self.merge)
# colorh = "#cc6666"
colorh = "#22dd22"
color = Gdk.RGBA()
color.parse(colorh)
color.to_string()
merge_button.override_background_color(Gtk.StateFlags.NORMAL, color)
self.nametype_firstname = glade.get_child_object("nametype_firstname")
self.nametype_firstname.connect("clicked", self.change_nametype)
self.nametype_patronyme = glade.get_child_object("nametype_patronyme")
self.nametype_patronyme.connect("clicked", self.change_nametype)
self.nametype_surname = glade.get_child_object("nametype_surname")
self.nametype_surname.connect("clicked", self.change_nametype)
self.gender_male = glade.get_child_object("gender_male")
self.gender_male.connect("clicked", self.find)
self.gender_female = glade.get_child_object("gender_female")
self.gender_female.connect("clicked", self.find)
self.gender_unknown = glade.get_child_object("gender_unknown")
self.gender_unknown.connect("clicked", self.find)
self.set_gender = glade.get_child_object("set_gender")
# self.set_gender.connect('clicked', self.find)
refresh_button = glade.get_child_object("refresh_button")
refresh_button.connect("clicked", self.refresh)
self.lbl_namecount = glade.get_child_object("lbl_namecount")
self.lbl_personcount = glade.get_child_object("lbl_personcount")
self.top.connect("key-press-event", self.keypress)
self.treeview = self.glade.get_child_object(TREEVIEW)
renderer = Gtk.CellRendererText()
columns = [("Id", 0, 50), ("Nimi", 1, 300)]
for (title, colnum, width) in columns:
col = Gtk.TreeViewColumn(title, renderer, text=colnum, weight_set=True)
col.set_clickable(True)
col.set_sort_column_id(colnum)
col.set_resizable(True)
self.treeview.append_column(col)
store = Gtk.ListStore(str, str, str)
self.treeview.set_model(store)
select = self.nameview.get_selection()
select.connect("changed", self.on_tree_selection_changed)
self.open_button = glade.get_child_object(
OPENBUTTON
) # cannot use id 'open_button'!!???
self.open_button.connect("clicked", self.__open_selected)
self.open_button.set_sensitive(False)
self.treeview.connect("button-press-event", self.__button_press)
self.treeview.connect("button-release-event", self.__button_press)
select = self.treeview.get_selection()
select.connect("changed", self.on_personlist_selection_changed)
return self.top
def keypress(self, obj, event):
# type: (Gtk.Widget, Gdk.Event) -> None
print("keypress", obj, event.keyval)
if event.keyval == Gdk.KEY_Escape:
print("esc")
self.close()
def change_nametype(self, obj):
# type: (Gtk.Widget) -> None
if self.nametype_firstname.get_active() and self.nametype != Nametype.FIRSTNAME:
self.names = self.firstnamelist
self.nametype = Nametype.FIRSTNAME
self.gender_male.set_sensitive(True)
self.gender_female.set_sensitive(True)
self.gender_unknown.set_sensitive(True)
self.set_gender.set_sensitive(True)
self.init()
self.reset(None)
if self.nametype_patronyme.get_active() and self.nametype != Nametype.PATRONYME:
self.names = self.suffixlist
self.nametype = Nametype.PATRONYME
self.gender_male.set_sensitive(True)
self.gender_female.set_sensitive(True)
self.gender_unknown.set_sensitive(True)
self.set_gender.set_sensitive(True)
self.init()
self.reset(None)
if self.nametype_surname.get_active() and self.nametype != Nametype.SURNAME:
self.names = self.surnamelist
self.nametype = Nametype.SURNAME
self.gender_male.set_sensitive(False)
self.gender_female.set_sensitive(False)
self.gender_unknown.set_sensitive(False)
self.set_gender.set_sensitive(False)
self.init()
self.reset(None)
def on_tree_selection_changed(self, selection):
# type: (Gtk.TreeSelection) -> None
self.selection = selection
(model, rows) = selection.get_selected_rows()
store = Gtk.ListStore(str, str, str)
plist2 = []
for row in rows:
ref = Gtk.TreeRowReference(model, row)
rownum = model.get_value(model.get_iter(ref.get_path()), 3)
plist = self.rows[rownum].plist
plist2.extend(plist)
for pname, handle, grampsid in sorted(plist2):
store.append([grampsid, pname, handle])
self.treeview.set_model(store)
def cb_double_click(self, treeview):
# type: (Gtk.TreeView) -> None
"""
Handle double click on treeview.
"""
(model, rows) = treeview.get_selection().get_selected_rows()
if len(rows) != 1:
return
ref = Gtk.TreeRowReference(model, rows[0])
try:
rownum = model.get_value(model.get_iter(ref.get_path()), 3)
row = self.rows[rownum]
sortedlist = sorted(row.plist)
self.personlist = Personlist(self.uistate, self.dbstate, sortedlist)
except WindowActiveError as e:
traceback.print_exc()
if self.personlist:
self.personlist.close()
sortedlist = sorted(row.plist)
self.personlist = Personlist(self.uistate, self.dbstate, sortedlist)
except:
traceback.print_exc()
def on_personlist_selection_changed(self, selection):
# type: (Gtk.TreeSelection) -> None
model, treeiter = selection.get_selected()
print(treeiter)
if treeiter is None:
self.open_button.set_sensitive(False)
else:
self.open_button.set_sensitive(True)
def __open_selected(self, obj):
# type: (Gtk.Widget) -> None
model, treeiter = self.treeview.get_selection().get_selected()
print(treeiter)
if not treeiter:
return
row = list(model[treeiter])
handle = row[2]
person = self.dbstate.db.get_person_from_handle(handle)
EditPerson(self.dbstate, self.uistate, [], person)
def __button_press(self, treeview, event):
# type: (Gtk.TreeView, Gdk.Event) -> bool
"""
Called when a button press is executed
"""
if event.type == Gdk.EventType.BUTTON_RELEASE and event.button == 1:
self.set_active_person()
return False
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and event.button == 1:
self.__open_selected(None)
return True
return False
def set_active_person(self):
# type: () -> None
model, treeiter = self.treeview.get_selection().get_selected()
if treeiter is None:
return
row = list(model[treeiter])
handle = row[2]
self.uistate.set_active(handle, "Person")
def gender_ok(self, gender):
# type: (str) -> bool
if self.nametype == Nametype.SURNAME:
return True
if self.gender_male.get_active() and gender == "MALE":
return True
if self.gender_female.get_active() and gender == "FEMALE":
return True
if self.gender_unknown.get_active() and gender == "UNKNOWN":
return True
return False
def refresh(self, obj):
# type: (Gtk.Widget) -> None
self.firstnamelist, self.suffixlist, self.surnamelist = fetch_names(self.db)
if self.nametype == Nametype.FIRSTNAME:
self.names = self.firstnamelist
if self.nametype == Nametype.PATRONYME:
self.names = self.suffixlist
if self.nametype == Nametype.SURNAME:
self.names = self.surnamelist
self.init()
self.find(None)
def findtext(self, text):
# type: (str) -> None
self.namemodel.clear()
self.namecount = 0
self.personcount = 0
# for [firstname,gender,count,rownum,is_deleted,plist] in self.rows:
for row in self.rows:
if row.deleted:
continue
if not self.gender_ok(row.gender):
continue
if row.name.lower().find(text) >= 0:
self.namemodel.add([row.name, row.gender, row.count, row.rownum])
self.namecount += 1
self.personcount += row.count
self.lbl_namecount.set_text(str(self.namecount))
self.lbl_personcount.set_text(str(self.personcount))
def find(self, obj):
# type: (Gtk.Widget) -> None
text = self.searchtext.get_text().lower()
self.findtext(text)
def reset(self, obj):
# type: (Optional[Gtk.Widget]) -> None
self.findtext("")
def init(self):
# type: () -> None
self.namemodel.clear()
rownum = 0
self.rows = []
for (name, gender), plist in self.names:
if gender == Person.MALE:
genderstring = "MALE"
elif gender == Person.FEMALE:
genderstring = "FEMALE"
elif gender == -1:
genderstring = ""
else:
genderstring = "UNKNOWN"
count = len(plist)
self.rows.append(Row(name, genderstring, count, rownum, False, plist))
rownum += 1
def merge(self, obj):
# type: (Any) -> None
print("merge")
(model, rows) = self.nameview.get_selection().get_selected_rows()
# (model, rows) = self.selection.get_selected_rows()
# print(rows)
names = []
maxcount = 0
maxindex = 0
for index, row in enumerate(rows):
ref = Gtk.TreeRowReference(model, row)
path = ref.get_path() # essentially a row number?
row = list(model[path])
name = row[0] # type: str
gender = row[1] # type: str
count = row[2] # type: int
if count > maxcount:
maxcount = count
maxindex = index
names.append((name, gender))
ok = self.select_primary(names, maxindex)
if not ok:
return
print(self.primary_name)
title = _("Merging names")
it1 = None
count = 0
merged_rows = []
with DbTxn(title, self.db) as self.trans:
for row in rows[::-1]:
# print()
ref = Gtk.TreeRowReference(model, row)
path = ref.get_path() # essentially a row number?
it = model.get_iter(path)
# print(path,list(model[path]),list(model[it]))
row = list(model[path])
name = row[0]
gender = row[1]
count += row[2]
rownum = row[3]
print(name)
if (name, gender) == self.primary_name:
it1 = it
remaining_row = self.rows[rownum]
continue
model.remove(it)
self.rows[rownum].deleted = True # is_deleted
merged_rows.append(self.rows[rownum])
if it1:
self.nameview.get_selection().unselect_all()
self.nameview.get_selection().select_iter(it1)
new_gender = self.merge_individuals(remaining_row, merged_rows)
if new_gender is not None:
model[it1][1] = new_gender
model[it1][2] = count
self.namecount -= len(merged_rows)
self.lbl_namecount.set_text(str(self.namecount))
else:
raise
selection = self.nameview.get_selection()
self.on_tree_selection_changed(selection) # update the person list
def merge_individuals(self, remaining_row, merged_rows):
# type: (Row,List[Row]) -> str
remaining_name = remaining_row.name
remaining_gender = remaining_row.gender
new_gender = ""
if self.nametype != Nametype.SURNAME and self.set_gender.get_active():
for row in [remaining_row] + merged_rows:
gender = row.gender
if gender != "UNKNOWN":
new_gender = gender
break
new_gender_code = gender_string_to_code(new_gender)
if new_gender is not None and remaining_gender == "UNKNOWN":
# must update the gender for the "remaining" individuals also
for pname, person_handle, grampsid in remaining_row.plist:
person = self.db.get_person_from_handle(person_handle)
self.replace_gender(person, new_gender_code)
for row in merged_rows:
for pname, person_handle, grampsid in row.plist:
person = self.db.get_person_from_handle(person_handle)
self.replace_name(person, row.name, remaining_name)
if new_gender and row.gender == "UNKNOWN":
self.replace_gender(person, new_gender_code)
remaining_row.plist.extend(row.plist)
remaining_row.count = len(remaining_row.plist)
if do_trace:
with open(tracefile, "a") as f:
ntype = "?"
if self.nametype == Nametype.FIRSTNAME:
ntype = "F"
if self.nametype == Nametype.PATRONYME:
ntype = "P"
if self.nametype == Nametype.SURNAME:
ntype = "S"
gendertype = "?"
if self.gender_male.get_active():
gendertype = "M"
if self.gender_female.get_active():
gendertype = "F"
print(ntype, gendertype, row.name, "=>", remaining_name, file=f)
return new_gender
def replace_name(self, person, old_name, new_name):
# type: (Person, str, str) -> None
names = person_names(person)
pname = name_displayer.display(person)
for name in names:
if self.nametype == Nametype.FIRSTNAME:
firstnames = name.get_first_name()
firstnames = firstnames.replace(".", ". ")
newnames = []
for firstname in firstnames.split():
if firstname == old_name:
firstname = new_name
newnames.append(firstname)
firstnames = " ".join(newnames)
name.set_first_name(firstnames)
if self.nametype == Nametype.PATRONYME:
suffix = name.get_suffix()
if suffix == old_name:
name.set_suffix(new_name)
surnames = name.get_surname_list()
for surname in surnames:
if surname.get_surname() == old_name:
surname.set_surname(new_name)
new_pname = name_displayer.display(person)
if do_logging:
print(person.gramps_id, pname, "=>", new_pname)
self.db.commit_person(person, self.trans)
def replace_gender(self, person, new_gender_code):
# type: (Person, int) -> None
person.set_gender(new_gender_code)
self.db.commit_person(person, self.trans)
def select_primary(self, names, maxindex):
# type: (List[Tuple[str,str]], int) -> bool
def cb_set_primary_name(obj, name_and_gender):
# type: (Gtk.Widget, Tuple[str,str]) -> None
self.primary_name = name_and_gender
dialog = Gtk.Dialog(
title=_("Select primary name"), parent=None, flags=Gtk.DialogFlags.MODAL
)
lbl1 = Gtk.Label(_("Select primary name"))
dialog.vbox.pack_start(lbl1, False, False, 5)
# self.primary_name = None
group = None
for index, (name, gender) in enumerate(names):
group = Gtk.RadioButton.new_with_label_from_widget(
group, name + " - " + gender
)
group.connect("toggled", cb_set_primary_name, (name, gender))
dialog.vbox.pack_start(group, False, True, 0)
# first one is the default:
if index == maxindex:
group.set_active(True)
self.primary_name = (name, gender)
dialog.add_button("Ok", Gtk.ResponseType.OK)
dialog.add_button("Cancel", Gtk.ResponseType.CANCEL)
dialog.set_default_response(Gtk.ResponseType.OK)
dialog.show_all()
result = dialog.run()
dialog.destroy()
if result == Gtk.ResponseType.OK:
return True
return False
class Personlist(ManagedWindow):
def __init__(self, uistate, dbstate, plist):
# type: (DisplayState, DbState, Pinfo) -> None
self.uistate = uistate
self.dbstate = dbstate
self.db = dbstate.db
self.plist = plist
# print(plist)
ManagedWindow.__init__(self, self.uistate, [], self.__class__, modal=False)
store = Gtk.ListStore(str, str, str)
for pname, handle, grampsid in self.plist:
store.append([grampsid, pname, handle])
self.draw_window()
self.treeview.set_model(store)
self.set_window(self.top, None, _("Show person"))
self.top.show_all()
def draw_window(self):
# type: () -> None
glade = Glade(toplevel=PLIST)
self.glade = glade
self.top = glade.get_child_object(PLIST)
self.treeview = glade.get_child_object(TREEVIEW)
renderer = Gtk.CellRendererText()
columns = [("Id", 0), ("Nimi", 1)]
for (title, colnum) in columns:
col = Gtk.TreeViewColumn(title, renderer, text=colnum, weight_set=True)
# if colnum == 1:
# col.set_cell_data_func(renderer, datafunc)
col.set_clickable(True)
col.set_sort_column_id(colnum)
col.set_resizable(True)
self.treeview.append_column(col)
open_button = glade.get_child_object(
OPENBUTTON
) # cannot use id 'open_button'!!???
open_button.connect("clicked", self.cb_open_selected)
close_button = glade.get_child_object(CLOSEBUTTON)
close_button.connect("clicked", self.close)
self.treeview.connect("button-press-event", self.cb_button_press)
self.treeview.connect("button-release-event", self.cb_button_press)
def cb_open_selected(self, obj):
# type: (Any) -> None
model, treeiter = self.treeview.get_selection().get_selected()
row = list(model[treeiter])
handle = row[2]
person = self.dbstate.db.get_person_from_handle(handle)
EditPerson(self.dbstate, self.uistate, [], person)
def cb_button_press(self, treeview, event):
# type: (Any,Any) -> bool
"""
Called when a button press is executed
"""
if event.type == Gdk.EventType.BUTTON_RELEASE and event.button == 1:
self.set_active_person()
return False
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and event.button == 1:
self.__open_selected(None)
return True
return False
def set_active_person(self):
# type: () -> None
model, treeiter = self.treeview.get_selection().get_selected()
row = list(model[treeiter])
handle = row[2]
self.uistate.set_active(handle, "Person")
# ------------------------------------------------------------------------
# Options
#
# ------------------------------------------------------------------------
class Options(tool.ToolOptions):
"""
Define options and provides handling interface.
"""
def __init__(self, name, person_id=None):
# type: (str, str) -> None
tool.ToolOptions.__init__(self, name, person_id)
|
import doctest
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite("kspalculator.techtree"))
return tests |
from etl import ETLPipeline
from etl_for_summary import ETLPipelineSummary
import pandas as pd
def run_etl_Rome_summary_without_extraction():
etl = ETLPipelineSummary("Dataset_summary\ROM\\","")
etl.run_without_extraction()
def run_etl_Rome_summary():
etl = ETLPipelineSummary("Dataset_summary\ROM\\","http://data.insideairbnb.com/italy/lazio/rome/2020-12-17/visualisations/listings.csv")
etl.run()
def run_etl_Rome_without_extraction():
etl = ETLPipeline("Dataset_summary\ROM\\","")
etl.run_without_extraction()
def run_etl_Rome_without_extraction():
etl = ETLPipeline("Dataset\ROM\\","http://data.insideairbnb.com/italy/lazio/rome/2020-12-17/data/listings.csv.gz")
etl.run()
def run_etl_Amsterdam_summary_without_extraction():
etl = ETLPipelineSummary("Dataset_summary\AMS\\","")
etl.run_without_extraction()
def run_etl_Amsterdam_summary():
etl = ETLPipelineSummary("Dataset_summary\AMS\\","http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2020-12-12/visualisations/listings.csv")
etl.run()
def run_etl_Amsterdam_without_extraction():
etl = ETLPipeline("Dataset_summary\AMS\\","")
etl.run_without_extraction()
def run_etl_Amsterdam_without_extraction():
etl = ETLPipeline("Dataset\AMS\\","http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2020-12-12/data/listings.csv.gz")
etl.run()
def get_listing_urls(left_listing_id:str, right_listing_id:str, path:str, complete_listing_filename = "listing.csv"):
"""
:param left_listing_id: id of the listing we want to find the url
:param right_listing_id: id of the listing we want to find the url
:param path: the path of the file containing the complete listing data
:param complete_listing_filename: the name of the file containing the complete data for the listing (expected csv)
:return: a pair of the urls of the listings
"""
df = pd.read_csv(path + complete_listing_filename, error_bad_lines=False, warn_bad_lines=True)
df1 = df[df["id"] == left_listing_id]
df2 = df[df["id"] == right_listing_id]
url_left = df1["listing_url"].to_numpy()[0]
url_right = df2["listing_url"].to_numpy()[0]
return (url_left, url_right)
if __name__ == "__main__":
etl = ETLPipelineSummary("Dataset_summary\ROM\\","")
etl.run_last_step()
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import User, Contact, PlayerMore, CoachMore
from django.forms import widgets
class RegistrationForm(UserCreationForm):
email = forms.EmailField(max_length=200, help_text='Required. Add a vaild email address')
class Meta:
model = User
fields = ("email", "username", "password1", "password2", "type")
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name', 'email', 'age', 'message']
class CreatePlayer(forms.ModelForm):
class Meta:
model = PlayerMore
fields = ['user','email', 'name', 'image', 'age', 'state', 'city', 'position', 'club', 'school', 'gradyear', 'gpa', 'height', 'weight',
'phone', 'summary', 'skills']
class CreateCoach(forms.ModelForm):
class Meta:
model = CoachMore
fields = ['user', 'email', 'name', 'image','state', 'position', 'school', 'experience', 'requirements', 'phone']
readonly = ('user', 'email')
class LoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
|
name_to_abbrev = {'arizonadiamondbacks': 'ARI', 'atlantabraves': 'ATL', 'baltimoreorioles': 'BAL', 'bostonredsox': 'BOS', 'chicagowhitesox': 'CHW', 'chicagocubs': 'CHC', 'cincinnatireds': 'CIN', 'clevelandindians': 'CLE', 'coloradorockies': 'COL', 'detroittigers': 'DET', 'houstonastros': 'HOU', 'kansascityroyals': 'KCR','losangelesangels': 'LAA', 'losangelesdodgers': 'LAD','miamimarlins': 'MIA', 'milwaukeebrewers': 'MIL', 'minnesotatwins': 'MIN', 'newyorkyankees': 'NYY', 'newyorkmets': 'NYM','oaklandathletics': 'OAK', 'philadelphiaphillies': 'PHI', 'pittsburghpirates': 'PIT','sandiegopadres': 'SDP', 'sanfranciscogiants': 'SFG','seattlemariners': 'SEA', 'st.louiscardinals': 'STL', 'tampabayrays': 'TBR', 'texasrangers': 'TEX', 'torontobluejays': 'TOR', 'washingtonnationals': 'WSN'}
class Team:
def __init__(self, name):
self.name = name
self.abbrev = getAbbrev(name)
self.stats_2019 = {}
self.stats_2020 = {}
self.lineup = []
self.pitcher = None
def getAbbrev(name):
name = name.lower().replace(' ', '')
abbrev = name_to_abbrev[name]
return(abbrev)
|
import csv
import sys
ip_dict = {} # {ip address: email}
name = {} # {ip address: name}
file = 'test.csv'
# data has fields ip,name,email
# populate ip_dict and name dictionaries
with open(file,'r') as inputFile:
data = csv.DictReader(inputFile, delimiter = ',')
# skip header row
next(data)
# populate ip dictionary with associated alert email address.
# populate name dictionary with name associated with each IP.
for row in data:
ip_dict[row['ip']] = row['email']
name[row['ip']] = row['name']
for unresponsiveIP in sys.stdin:
unresponsiveIP = unresponsiveIP.strip()
print(
f'Please alert {name[unresponsiveIP]} that {unresponsiveIP} is not responding. Email {ip_dict[unresponsiveIP]}.')
|
# -*- coding: utf-8 -*-
# @Author: jpch89
# @Time: 18-8-24 下午5:09
class A:
# def go(self):
# return object()
def __call__(self):
return object()
class B:
def run(self):
return object()
def func():
return object()
def main(call_able):
call_able()
# 想在 main 中调用传入的参数
# 得到对象 object
# 不用区分到底是A.go()
# 还是B.run()
# 还是函数func()
# 直接调用即可
# __call__ 实现了统一调用接口
main(A)
|
#__init__.py
# use this file
# from [file name] import [function name]
# so, main.py writedown ' from MMmodule import call_cmd_normal
# and, use any function!!
#from MMlib_system import call_cmd_normal,call_cmd_pipe
|
# Generated by Django 2.1.3 on 2018-11-23 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='Author Name'),
),
]
|
"""
smorest_sfs.modules.projects.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
项目的资源模块
"""
from typing import Any, Dict, List
from flask.views import MethodView
from flask_sqlalchemy import BaseQuery
from loguru import logger
from flask_jwt_extended import current_user
from smorest_sfs.extensions.api.decorators import paginate
from smorest_sfs.extensions.marshal.bases import (BaseIntListSchema,
BaseMsgSchema,
GeneralParam)
from smorest_sfs.modules.auth import PERMISSIONS
from smorest_sfs.modules.auth.decorators import (doc_login_required,
permission_required)
from . import blp, models, schemas
@blp.route('/options')
class ProjectListView(MethodView):
@doc_login_required
@permission_required(PERMISSIONS.ProjectQuery)
@blp.response(schemas.ProjectListSchema)
def get(self) -> Dict[str, List[models.Project]]:
# pylint: disable=unused-argument
'''
获取所有项目选项信息
'''
query = models.Project.query
items = query.all()
return {'data': items}
@blp.route('')
class ProjectView(MethodView):
@doc_login_required
@permission_required(PERMISSIONS.ProjectQuery)
@blp.arguments(GeneralParam, location="query", as_kwargs=True)
@blp.response(schemas.ProjectPageSchema)
@paginate()
def get(self, **kwargs: Dict[str, Any]) -> BaseQuery:
# pylint: disable=unused-argument
'''
获取所有项目信息——分页
'''
query = models.Project.where(**kwargs)
return query
@doc_login_required
@permission_required(PERMISSIONS.ProjectAdd)
@blp.arguments(schemas.ProjectSchema)
@blp.response(schemas.ProjectItemSchema)
def post(self, project: models.Project) -> Dict[str, models.Project]:
# pylint: disable=unused-argument
'''
新增项目信息
'''
project.save()
logger.info(f"{current_user.username}新增了项目{project}")
return {'data': project}
@doc_login_required
@permission_required(PERMISSIONS.ProjectDelete)
@blp.arguments(BaseIntListSchema, as_kwargs=True)
@blp.response(BaseMsgSchema)
def delete(self, lst: List[int]) -> None:
# pylint: disable=unused-argument
'''
批量删除项目
-------------------------------
:param lst: list 包含id列表的字典
'''
models.Project.delete_by_ids(lst)
logger.info(f"{current_user.username}删除了项目{lst}")
@blp.route('/<int:project_id>',
parameters=[
{'in': 'path', 'name': 'project_id', 'description': '项目id'}
])
class ProjectItemView(MethodView):
@doc_login_required
@permission_required(PERMISSIONS.ProjectEdit)
@blp.arguments(schemas.ProjectSchema)
@blp.response(schemas.ProjectItemSchema)
def put(self, project: models.Project, project_id: int) -> Dict[str, models.Project]:
'''
更新项目
'''
project = models.Project.update_by_id(project_id,
schemas.ProjectSchema,
project)
logger.info(f"{current_user.username}更新了项目{project.id}")
return {'data': project}
@doc_login_required
@permission_required(PERMISSIONS.ProjectDelete)
@blp.response(BaseMsgSchema)
def delete(self, project_id: int) -> None:
'''
删除项目
'''
models.Project.delete_by_id(project_id)
logger.info(f"{current_user.username}删除了项目{project_id}")
@doc_login_required
@permission_required(PERMISSIONS.ProjectQuery)
@blp.response(schemas.ProjectItemSchema)
def get(self, project_id: int) -> Dict[str, models.Project]:
# pylint: disable=unused-argument
'''
获取单条项目
'''
project = models.Project.get_by_id(project_id)
return {'data': project} |
def main():
# dados da Penny
nome = "Penny"
sexo = "feminino"
altura = 167
peso = 52
idade = 25
cabelo = "loiro"
escolaridade = "medio"
compativel = sexo == 'feminino' and (peso >= 45 and peso <= 65) and (altura >= 155 and altura <= 170) and (25 <= idade <= 35) and cabelo == "loiro" and not escolaridade == "PhD"
print("Candidata", nome, ":", compativel)
# Fim do programa
#-------------------------------------------
if __name__ == '__main__':
main()
|
"""
Aprimore o desafio anterior, mostrando no final:
A - A soma de todos os valores pares digitados.
B - A soma dos valores da terceira coluna.
C - O maior valor da segunda linha.
"""
matriz = [[], [], []]
valor = 0
soma = soma3 = maior1 = 0
for i in range(0, 3):
for j in range(0, 3):
valor = int(input(f'Digite o valor para [{i}][{j}]: '))
matriz[i].append(valor)
print('=-=' * 15)
for i in matriz[0]:
print(f'[ {i} ]', end=' ')
if i % 2 == 0:
soma += i
print()
for i in matriz[1]:
print(f'[ {i} ]', end= ' ')
if i % 2 == 0:
soma += i
if len(matriz) == 0:
maior1 = i
else:
if i > maior1:
maior1 = i
print()
for i in matriz[2]:
print(f'[ {i} ]', end=' ')
if i % 2 == 0:
soma += i
soma3 += i
print()
print('=-=' * 15)
print(f'A soma de todos os valores pares é igual a {soma}')
print(f'A soma dos valores da 3º coluna é igual a {soma3}')
print(f'O maior valor da 2º coluna é igual a {maior1}') |
import pygame
import util
import physics
from display import Display
import random
import colors
import time
class Game:
def __init__(self):
pygame.init()
self.spawning_interval = util.spawning_interval
# If this is true, we have just a blank screen and the ball. For playing
# around with the physics
self.basic = True
self.display = Display()
self.laserID = 0
self.xwingID = 0
# Track info about the player in the world
self.state = \
{
'vx': 0.0, # Velocity in the x and y directions
'vy': 0.0,
'x': util.init_ball_x,
'y': util.init_ball_y,
'health': 100,
'counter': 0,
'score': 0,
'done': False, # Set to true when the player loses
'lasers': {}, # Store as a dict of {id: (x, y, vx, vy)}
'xwings': {} # Store info about xwings
# id: orientation, x, y, velocity
}
self.can_fire = True
self.can_play_sound = True
self.main_theme = pygame.mixer.Sound(util.star_wars_theme)
self.imperial_march = pygame.mixer.Sound(util.imperial_march)
self.duel_of_the_fates = pygame.mixer.Sound(util.duel_of_the_fates)
self.blaster_sound = pygame.mixer.Sound(util.blaster_sound)
self.blaster_channel = pygame.mixer.Channel(1)
self.explosion_channel = pygame.mixer.Channel(2)
self.explosion_sound = pygame.mixer.Sound(util.explosion_sound)
self.crash_sound = pygame.mixer.Sound(util.crash_sound)
self.random_sounds = [pygame.mixer.Sound(util.rand2)] # only really enjoyed the darth vader clip so far
def move_ball(self, movement_keys):
'''
Update the ball position
'''
physics.do_movement(movement_keys, self.state)
self.display.update(self.state)
def add_laser_bolt(self, tx, ty):
'''
Add a new laser bolt with starting position self.state['x'], self.state['y'] and
trajectory tx, ty
'''
self.state['lasers']['l' + str(self.laserID)] = (self.state['x'], self.state['y'], tx, ty)
self.display.add_laser('l' + str(self.laserID))
self.laserID += 1
def fire_laser(self, target): # target passed in as col, row
'''
Get the trajectory of the new laser bolt, add it to the game state, and update the display
'''
true_target = target[1], target[0]
tx, ty = physics.get_laser_trajectory(true_target, self.state)
self.add_laser_bolt(tx, ty)
self.display.update(self.state)
pygame.mixer.Channel(1).play(self.blaster_sound)
def reached_end(self):
'''
Check whether the ball is within the target area
'''
if self.basic:
return False
return (self.state['x'] >= self.goal_x and self.state['x'] <= self.goal_x + self.goal_height) \
and (self.state['y'] >= self.goal_y and self.state['y'] <= self.goal_y + self.goal_width)
def xwing_initial_position(self, orientation):
'''
Depending on the orientation, there will be a different range of possible initial locations
for the xwing
'''
# Return row, col coordinate
if orientation == 'top':
return 0, random.randint(0, util.screen_width - 1)
if orientation == 'bottom':
return util.screen_height - 1, random.randint(0, util.screen_width - 1)
if orientation == 'left':
return random.randint(0, util.screen_height - 1), 0
if orientation == 'right':
return random.randint(0, util.screen_height - 1), util.screen_width - 1
# If orientation is top_left or top_right, we have a choice in which to use
# So just recurse once with a hard orientation specified
if orientation == 'top_left':
po = random.randint(0, 1)
particular_orientation = 'top' if po == 1 else 'left'
return self.xwing_initial_position(particular_orientation)
if orientation == 'top_right':
po = random.randint(0, 1)
particular_orientation = 'top' if po == 1 else 'right'
return self.xwing_initial_position(particular_orientation)
def omniscient_seeking(self, i):
'''
Assume the xwings can see everything and they always angle toward the player
Returns a new velocity that angles them toward the player
'''
orientation, x, y, vx, vy = self.state['xwings'][i]
x = x + util.xwing_height // 2 # Update so we try to match the centers
y = y + util.xwing_width // 2
target_x, target_y = self.state['x'], self.state['y']
desired_velocity = util.scaled_distance((target_x, target_y), (x, y), util.xwing_velocity)
steering = util.truncate((desired_velocity[0] - vx, desired_velocity[1] - vy), util.max_steering)
# Update the x and y velocity with the steering components
fin_vx, fin_vy = vx + steering[0], vy + steering[1]
return fin_vx, fin_vy
def spawn_xwing(self):
'''
Spawn an xwing
'''
#if self.xwingID > 3:
# return
xwing_id = 'x' + str(self.xwingID) # pull the next xwing id
orientation = util.xwing_positions[random.randint(0, len(util.xwing_positions) - 1)] # (-1 to keep it in bounds)
init_x, init_y = self.xwing_initial_position(orientation)
# Start with velocity = 0, to be updated directly in the direction of the player
# on the next iteration of omniscient_seeking
# Set the state for our new xwing
self.state['xwings'][xwing_id] = orientation, init_x, init_y, 0, 0
# Add the id of this xwing to the things to be blitted
self.display.add_xwing(xwing_id, orientation)
self.xwingID += 1
def do_actions(self):
'''
Handle all user-side actions
'''
key_state = pygame.key.get_pressed()
pressed_keys = {x for x in util.action_keys if key_state[x]}
# Handle avatar movement
movement_keys = util.get_movement_keys(pressed_keys, self.state)
self.move_ball(movement_keys)
if pygame.mouse.get_pressed()[0] and self.can_fire:
# Then we have pressed some part of the mouse
target = pygame.mouse.get_pos()
self.fire_laser(target)
self.can_fire = False
if not pygame.mouse.get_pressed()[0]:
self.can_fire = True
# Reset to catch the next actions
pygame.event.pump()
def update_xwing_position(self, xwing_id):
'''
Self-explanatory - update the position of the given xwing according to
its current velocity
'''
# Modify the xy coordinates of the xwing according to the current velocity
o, x, y, vx, vy = self.state['xwings'][xwing_id]
self.state['xwings'][xwing_id] = o, x + vx, y + vy, vx, vy
def update_xwing_velocity(self, xwing_id):
'''
Modify the state to contain the updated xwing velocity according to our seeking
behavior
'''
updated_v = self.omniscient_seeking(xwing_id)
o, x, y, vx, vy = self.state['xwings'][xwing_id]
# Also update orientation
if updated_v[0] < 0 and updated_v[1] > 0:
new_o = 'left'
elif updated_v[0] < 0 and updated_v[1] < 0:
new_o = 'bottom'
elif updated_v[0] > 0 and updated_v[1] < 0:
new_o = 'top_right'
else: # updated_v[0] > 0 and updated_v[1] > 0
new_o = 'top_left'
if new_o != o:
self.display.update_orientation(xwing_id, new_o)
self.state['xwings'][xwing_id] = new_o, x, y, updated_v[0], updated_v[1]
def handle_spawning(self):
'''
Depending on the game state, add some number of new xwings
'''
if self.state['counter'] % self.spawning_interval == 0:
self.spawn_xwing()
self.spawning_interval = max(15, int(self.spawning_interval * util.spawn_rate))
def handle_ai(self):
'''
Do spawning, and update position and velocity for all the xwing
'''
self.handle_spawning()
# Update the movement pattern and position of each xwing
for xwing_id in self.state['xwings']:
self.update_xwing_velocity(xwing_id)
self.update_xwing_position(xwing_id)
def handle_collision(self, xwing_id):
'''
This xwing has collided with the death star, so update its status
'''
self.state['xwings'].pop(xwing_id)
self.display.boom(xwing_id)
def check_for_destroyed_xwings(self):
'''
Compare the location of each laser with the xwings
'''
destroyed = []
finished_lasers = []
for xwing in self.state['xwings']:
_, x, y, _, _ = self.state['xwings'][xwing]
for laser in self.state['lasers']:
lx, ly, _, _ = self.state['lasers'][laser]
lx, ly = int(lx), int(ly)
if lx in range(int(x), int(x + util.xwing_height))\
and ly in range(int(y), int(y + util.xwing_width)):
# We have a collision
self.explosion_channel.play(self.explosion_sound)
destroyed.append(xwing)
finished_lasers.append(laser)
self.display.boom(xwing)
break
# Remove destroyed xwings
for d in destroyed:
self.state['xwings'].pop(d)
return len(destroyed)
def check_collisions(self):
'''
If any of the xwings have collided with the player,
update the health of the death star
'''
ds_coords = self.state['x'], self.state['y']
xwings_to_remove = []
num_collisions = 0
for xwing_id in self.state['xwings']:
_, x, y, _, _ = self.state['xwings'][xwing_id]
xcx = int(x + util.xwing_height // 2)
xcy = int(y + util.xwing_width // 2)
# This only checks if the center of the xwing sprite is within the bounds of the death star sprite
if xcx in range(int(ds_coords[0] - util.ball_height // 2), int(ds_coords[0] + util.ball_height // 2))\
and xcy in range(int(ds_coords[1] - util.ball_width // 2), int(ds_coords[1] + util.ball_width // 2)):
xwings_to_remove.append(xwing_id)
num_collisions += 1
for xwing in xwings_to_remove:
self.handle_collision(xwing)
return num_collisions
def update_health(self, num_collisions):
'''
Update our health statistic according to the number of collisions this past frame
'''
diff = num_collisions * util.damage_per_hit
self.state['health'] -= diff
# If health drops below 0, we've lost
if self.state['health'] <= 0:
self.state['health'] = 0
self.state['done'] = True
def update_score(self, num_destroyed):
'''
We get points for xwings destroyed with lasers
'''
points = num_destroyed * util.points_per_xwing_destroyed
self.state['score'] += points
if self.state['score'] % 300 == 0 and self.state['score'] > 0 and self.can_play_sound:
rsound = self.random_sounds[random.randint(0, len(self.random_sounds) - 1)]
pygame.mixer.Channel(3).play(rsound)
# Like a mutex, until we are guaranteed we can play on this channel again
self.can_play_sound = False
def play_level(self, level):
'''
Call this for each level
If level is the string 'basic', then don't load any tiles and just use
the default display
'''
clock = pygame.time.Clock()
while not self.state['done']:
clock.tick(60)
self.state['counter'] += 1
self.do_actions()
self.handle_ai()
num_collisions = self.check_collisions()
if num_collisions > 0:
self.explosion_channel.play(self.explosion_sound)
points = self.check_for_destroyed_xwings()
# This is so that we don't get stuck in a temporary loop were the score
# is at a checkpoint, and hasn't changed, causing the player to try playing
# the sound bit over and over again
if points > 0:
self.can_play_sound = True
self.update_health(num_collisions)
self.update_score(points)
# Update the display once per iteration, after all changes have been made
self.display.update(self.state)
print('Well done!')
print('You hit %s rebel fighters,' % str(self.state['score'] // 10))
print('for a score of %s' % str(self.state['score']))
# self.display.exit_credits(self.state)
def run(self):
'''
Point of entry to the Game class
'''
done = False
# Set the song order
pygame.mixer.Channel(0).play(self.main_theme)
pygame.mixer.Channel(0).queue(self.imperial_march)
pygame.mixer.Channel(0).queue(self.main_theme)
pygame.mixer.Channel(0).queue(self.imperial_march)
pygame.mixer.Channel(0).queue(self.duel_of_the_fates)
level = 0 # Irrelevant if self.basic is True
self.play_level(level)
pygame.event.wait()
pygame.display.quit()
pygame.quit()
|
from csv import DictReader
from datetime import date
from datetime import datetime
from decimal import Decimal
from freezegun import freeze_time
from io import BytesIO
from io import StringIO
from onegov.core.orm.abstract import MoveDirection
from onegov.swissvotes.collections import SwissVoteCollection
from onegov.swissvotes.collections import TranslatablePageCollection
from onegov.swissvotes.models import SwissVote
from openpyxl import load_workbook
from pytz import utc
def test_pages(session):
pages = TranslatablePageCollection(session)
# setdefault
static = ['home', 'disclaimer', 'imprint', 'data-protection']
for id in reversed(static):
pages.setdefault(id)
assert [page.id for page in pages.query()] == static
# add
about = pages.add(
id='about',
title_translations={'en': "About", 'de': "Über"},
content_translations={'en': "Placeholder", 'de': "Platzhalter"}
)
assert about.id == 'about'
assert about.title_translations == {'en': "About", 'de': "Über"}
assert about.content_translations == {
'en': "Placeholder", 'de': "Platzhalter"
}
assert [page.id for page in pages.query()] == static + ['about']
contact = pages.add(
id='contact',
title_translations={'en': "Contact", 'de': "Kontakt"},
content_translations={'en': "Placeholder", 'de': "Platzhalter"}
)
dataset = pages.add(
id='dataset',
title_translations={'en': "Dataset", 'de': "Datensatz"},
content_translations={'en': "Placeholder", 'de': "Platzhalter"}
)
assert [page.id for page in pages.query()] == static + [
'about', 'contact', 'dataset'
]
# move
pages.move(dataset, about, MoveDirection.above)
pages.move(about, contact, MoveDirection.below)
assert [page.id for page in pages.query()] == static + [
'dataset', 'contact', 'about'
]
def test_votes(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
assert votes.last_modified is None
with freeze_time(datetime(2019, 1, 1, 10, tzinfo=utc)):
vote = votes.add(
id=1,
bfs_number=Decimal('100.1'),
date=date(1990, 6, 2),
title_de="Vote DE",
title_fr="Vote FR",
short_title_de="V D",
short_title_fr="V F",
short_title_en="V E",
_legal_form=1
)
assert vote.id == 1
assert vote.bfs_number == Decimal('100.1')
assert vote.date == date(1990, 6, 2)
assert vote.title_de == "Vote DE"
assert vote.title_fr == "Vote FR"
assert vote.short_title_de == "V D"
assert vote.short_title_fr == "V F"
assert vote.short_title_en == "V E"
assert vote.legal_form == "Mandatory referendum"
assert votes.last_modified == datetime(2019, 1, 1, 10, tzinfo=utc)
assert votes.by_bfs_number('100.1') == vote
assert votes.by_bfs_number(Decimal('100.1')) == vote
def test_votes_default(swissvotes_app):
votes = SwissVoteCollection(
swissvotes_app,
page=2,
from_date=3,
to_date=4,
legal_form=5,
result=6,
policy_area=7,
term=8,
full_text=9,
position_federal_council=10,
position_national_council=11,
position_council_of_states=12,
sort_by=13,
sort_order=14
)
assert votes.page == 2
assert votes.from_date == 3
assert votes.to_date == 4
assert votes.legal_form == 5
assert votes.result == 6
assert votes.policy_area == 7
assert votes.term == 8
assert votes.full_text == 9
assert votes.position_federal_council == 10
assert votes.position_national_council == 11
assert votes.position_council_of_states == 12
assert votes.sort_by == 13
assert votes.sort_order == 14
votes = votes.default()
assert votes.page is None
assert votes.from_date is None
assert votes.to_date is None
assert votes.legal_form is None
assert votes.result is None
assert votes.policy_area is None
assert votes.term is None
assert votes.full_text is None
assert votes.position_federal_council is None
assert votes.position_national_council is None
assert votes.position_council_of_states is None
assert votes.sort_by is None
assert votes.sort_order is None
def test_votes_pagination(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
assert votes.pages_count == 0
assert votes.batch == ()
assert votes.page_index == 0
assert votes.offset == 0
assert votes.previous is None
assert votes.next is None
assert votes.page_by_index(0) == votes
for id_ in range(26):
votes.add(
id=id_,
bfs_number=Decimal(f'{id_}'),
date=date(1990, 6, 2),
title_de="Vote",
title_fr="Vote",
short_title_de="Vote",
short_title_fr="Vote",
short_title_en="Vote",
_legal_form=1
)
votes = SwissVoteCollection(swissvotes_app)
assert votes.query().count() == 26
assert votes.subset_count == 26
assert votes.pages_count == 2
assert len(votes.batch) == 20
assert votes.page_index == 0
assert votes.offset == 0
assert votes.previous is None
assert votes.next == votes.page_by_index(1)
assert votes.page_by_index(0) == votes
assert votes.next.page_index == 1
assert len(votes.next.batch) == 6
assert votes.next.previous == votes
def test_votes_term_filter(swissvotes_app):
assert SwissVoteCollection(swissvotes_app).term_filter == []
assert SwissVoteCollection(swissvotes_app, term='').term_filter == []
assert SwissVoteCollection(swissvotes_app, term='', full_text=True)\
.term_filter == []
def compiled(**kwargs):
list_ = SwissVoteCollection(swissvotes_app, **kwargs).term_filter
return [
str(statement.compile(compile_kwargs={"literal_binds": True}))
for statement in list_
]
c_title_de = "to_tsvector('german', swissvotes.title_de)"
c_title_fr = "to_tsvector('french', swissvotes.title_fr)"
c_short_title_de = "to_tsvector('german', swissvotes.short_title_de)"
c_short_title_fr = "to_tsvector('french', swissvotes.short_title_fr)"
c_short_title_en = "to_tsvector('english', swissvotes.short_title_en)"
c_keyword = "to_tsvector('german', swissvotes.keyword)"
c_initiator = "to_tsvector('german', swissvotes.initiator)"
c_text_de = 'swissvotes."searchable_text_de_CH"'
c_text_fr = 'swissvotes."searchable_text_fr_CH"'
c_text_it = 'swissvotes."searchable_text_it_CH"'
c_text_en = 'swissvotes."searchable_text_en_US"'
assert compiled(term='987') == [
'swissvotes.bfs_number = 987',
"swissvotes.procedure_number = '987'",
f"{c_title_de} @@ to_tsquery('german', '987')",
f"{c_title_fr} @@ to_tsquery('french', '987')",
f"{c_short_title_de} @@ to_tsquery('german', '987')",
f"{c_short_title_fr} @@ to_tsquery('french', '987')",
f"{c_short_title_en} @@ to_tsquery('english', '987')",
f"{c_keyword} @@ to_tsquery('german', '987')",
]
assert compiled(term='17.060') == [
'swissvotes.bfs_number = 17.060',
"swissvotes.procedure_number = '17.060'",
f"{c_title_de} @@ to_tsquery('german', '17.060')",
f"{c_title_fr} @@ to_tsquery('french', '17.060')",
f"{c_short_title_de} @@ to_tsquery('german', '17.060')",
f"{c_short_title_fr} @@ to_tsquery('french', '17.060')",
f"{c_short_title_en} @@ to_tsquery('english', '17.060')",
f"{c_keyword} @@ to_tsquery('german', '17.060')",
]
assert compiled(term='17.12.2004') == [
f"{c_title_de} @@ to_tsquery('german', '17.12.2004')",
f"{c_title_fr} @@ to_tsquery('french', '17.12.2004')",
f"{c_short_title_de} @@ to_tsquery('german', '17.12.2004')",
f"{c_short_title_fr} @@ to_tsquery('french', '17.12.2004')",
f"{c_short_title_en} @@ to_tsquery('english', '17.12.2004')",
f"{c_keyword} @@ to_tsquery('german', '17.12.2004')",
]
assert compiled(term='1893_002') == [
"swissvotes.procedure_number = '1893_002'",
f"{c_title_de} @@ to_tsquery('german', '1893002')",
f"{c_title_fr} @@ to_tsquery('french', '1893002')",
f"{c_short_title_de} @@ to_tsquery('german', '1893002')",
f"{c_short_title_fr} @@ to_tsquery('french', '1893002')",
f"{c_short_title_en} @@ to_tsquery('english', '1893002')",
f"{c_keyword} @@ to_tsquery('german', '1893002')",
]
assert compiled(term='abc') == [
f"{c_title_de} @@ to_tsquery('german', 'abc')",
f"{c_title_fr} @@ to_tsquery('french', 'abc')",
f"{c_short_title_de} @@ to_tsquery('german', 'abc')",
f"{c_short_title_fr} @@ to_tsquery('french', 'abc')",
f"{c_short_title_en} @@ to_tsquery('english', 'abc')",
f"{c_keyword} @@ to_tsquery('german', 'abc')",
]
assert compiled(term='abc', full_text=True) == [
f"{c_title_de} @@ to_tsquery('german', 'abc')",
f"{c_title_fr} @@ to_tsquery('french', 'abc')",
f"{c_short_title_de} @@ to_tsquery('german', 'abc')",
f"{c_short_title_fr} @@ to_tsquery('french', 'abc')",
f"{c_short_title_en} @@ to_tsquery('english', 'abc')",
f"{c_keyword} @@ to_tsquery('german', 'abc')",
f"{c_initiator} @@ to_tsquery('german', 'abc')",
f"{c_text_de} @@ to_tsquery('german', 'abc')",
f"{c_text_fr} @@ to_tsquery('french', 'abc')",
f"{c_text_it} @@ to_tsquery('italian', 'abc')",
f"{c_text_en} @@ to_tsquery('english', 'abc')",
]
assert compiled(term='Müller') == [
f"{c_title_de} @@ to_tsquery('german', 'Müller')",
f"{c_title_fr} @@ to_tsquery('french', 'Müller')",
f"{c_short_title_de} @@ to_tsquery('german', 'Müller')",
f"{c_short_title_fr} @@ to_tsquery('french', 'Müller')",
f"{c_short_title_en} @@ to_tsquery('english', 'Müller')",
f"{c_keyword} @@ to_tsquery('german', 'Müller')",
]
assert compiled(term='20,20') == [
f"{c_title_de} @@ to_tsquery('german', '20,20')",
f"{c_title_fr} @@ to_tsquery('french', '20,20')",
f"{c_short_title_de} @@ to_tsquery('german', '20,20')",
f"{c_short_title_fr} @@ to_tsquery('french', '20,20')",
f"{c_short_title_en} @@ to_tsquery('english', '20,20')",
f"{c_keyword} @@ to_tsquery('german', '20,20')",
]
assert compiled(term='Neu!') == [
f"{c_title_de} @@ to_tsquery('german', 'Neu')",
f"{c_title_fr} @@ to_tsquery('french', 'Neu')",
f"{c_short_title_de} @@ to_tsquery('german', 'Neu')",
f"{c_short_title_fr} @@ to_tsquery('french', 'Neu')",
f"{c_short_title_en} @@ to_tsquery('english', 'Neu')",
f"{c_keyword} @@ to_tsquery('german', 'Neu')",
]
assert compiled(term='H P Müller') == [
f"{c_title_de} @@ to_tsquery('german', 'H <-> P <-> Müller')",
f"{c_title_fr} @@ to_tsquery('french', 'H <-> P <-> Müller')",
f"{c_short_title_de} @@ to_tsquery('german', 'H <-> P <-> Müller')",
f"{c_short_title_fr} @@ to_tsquery('french', 'H <-> P <-> Müller')",
f"{c_short_title_en} @@ to_tsquery('english', 'H <-> P <-> Müller')",
f"{c_keyword} @@ to_tsquery('german', 'H <-> P <-> Müller')",
]
assert compiled(term='x AND y') == [
f"{c_title_de} @@ to_tsquery('german', 'x <-> AND <-> y')",
f"{c_title_fr} @@ to_tsquery('french', 'x <-> AND <-> y')",
f"{c_short_title_de} @@ to_tsquery('german', 'x <-> AND <-> y')",
f"{c_short_title_fr} @@ to_tsquery('french', 'x <-> AND <-> y')",
f"{c_short_title_en} @@ to_tsquery('english', 'x <-> AND <-> y')",
f"{c_keyword} @@ to_tsquery('german', 'x <-> AND <-> y')",
]
assert compiled(term='x | y') == [
f"{c_title_de} @@ to_tsquery('german', 'x <-> y')",
f"{c_title_fr} @@ to_tsquery('french', 'x <-> y')",
f"{c_short_title_de} @@ to_tsquery('german', 'x <-> y')",
f"{c_short_title_fr} @@ to_tsquery('french', 'x <-> y')",
f"{c_short_title_en} @@ to_tsquery('english', 'x <-> y')",
f"{c_keyword} @@ to_tsquery('german', 'x <-> y')",
]
assert compiled(term='y ! y') == [
f"{c_title_de} @@ to_tsquery('german', 'y <-> y')",
f"{c_title_fr} @@ to_tsquery('french', 'y <-> y')",
f"{c_short_title_de} @@ to_tsquery('german', 'y <-> y')",
f"{c_short_title_fr} @@ to_tsquery('french', 'y <-> y')",
f"{c_short_title_en} @@ to_tsquery('english', 'y <-> y')",
f"{c_keyword} @@ to_tsquery('german', 'y <-> y')",
]
def test_votes_query(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
vote_1 = votes.add(
id=1,
bfs_number=Decimal('100'),
date=date(1990, 6, 2),
title_de="Abstimmung über diese Sache",
title_fr="Vote sur cette question",
short_title_de="diese Sache",
short_title_fr="cette question",
short_title_en="this thing",
_legal_form=1,
descriptor_1_level_1=Decimal('4'),
descriptor_1_level_2=Decimal('4.2'),
descriptor_1_level_3=Decimal('4.21'),
descriptor_2_level_1=Decimal('10'),
descriptor_2_level_2=Decimal('10.3'),
descriptor_2_level_3=Decimal('10.35'),
descriptor_3_level_1=Decimal('10'),
descriptor_3_level_2=Decimal('10.3'),
descriptor_3_level_3=Decimal('10.33'),
_position_federal_council=3,
_position_council_of_states=1,
_position_national_council=2,
_result=1,
)
votes.add(
id=2,
bfs_number=Decimal('200.1'),
date=date(1990, 9, 2),
title_de="Wir wollen diese Version die Sache",
title_fr="Nous voulons cette version de la chose",
short_title_de="diese Version",
short_title_fr="cette version",
short_title_en="that version",
keyword="Variant A of X",
initiator="The group that wants something",
_legal_form=2,
descriptor_1_level_1=Decimal('10'),
descriptor_1_level_2=Decimal('10.3'),
descriptor_1_level_3=Decimal('10.35'),
descriptor_2_level_1=Decimal('1'),
descriptor_2_level_2=Decimal('1.6'),
descriptor_2_level_3=Decimal('1.62'),
_position_federal_council=2,
_position_council_of_states=2,
_position_national_council=1,
_result=1
)
votes.add(
id=3,
bfs_number=Decimal('200.2'),
date=date(1990, 9, 2),
title_de="Wir wollen nochmal etwas anderes",
title_fr="Nous voulons encore une autre version de la chose",
short_title_de="Nochmals etwas anderes",
short_title_fr="encore une autre version",
short_title_en="something else again",
keyword="Variant B of X",
_legal_form=2,
descriptor_3_level_1=Decimal('8'),
descriptor_3_level_2=Decimal('8.3'),
_position_federal_council=1,
_position_council_of_states=1,
_position_national_council=1,
_result=2
)
def count(**kwargs):
return SwissVoteCollection(swissvotes_app, **kwargs).query().count()
assert count() == 3
assert count(from_date=date(1900, 1, 1)) == 3
assert count(from_date=date(1990, 6, 2)) == 3
assert count(from_date=date(1990, 9, 2)) == 2
assert count(from_date=date(1991, 3, 2)) == 0
assert count(to_date=date(1900, 1, 1)) == 0
assert count(to_date=date(1990, 6, 2)) == 1
assert count(to_date=date(1990, 9, 2)) == 3
assert count(to_date=date(1991, 3, 2)) == 3
assert count(from_date=date(1990, 6, 2), to_date=date(1990, 6, 2)) == 1
assert count(from_date=date(1990, 6, 2), to_date=date(1990, 9, 2)) == 3
assert count(from_date=date(1990, 9, 2), to_date=date(1990, 6, 2)) == 0
assert count(legal_form=[]) == 3
assert count(legal_form=[1]) == 1
assert count(legal_form=[2]) == 2
assert count(legal_form=[1, 2]) == 3
assert count(legal_form=[3]) == 0
assert count(result=[]) == 3
assert count(result=[1]) == 2
assert count(result=[2]) == 1
assert count(result=[1, 2]) == 3
assert count(result=[3]) == 0
assert count(position_federal_council=[]) == 3
assert count(position_federal_council=[1]) == 1
assert count(position_federal_council=[2]) == 1
assert count(position_federal_council=[1, 2]) == 2
assert count(position_federal_council=[3]) == 1
assert count(position_council_of_states=[]) == 3
assert count(position_council_of_states=[1]) == 2
assert count(position_council_of_states=[2]) == 1
assert count(position_council_of_states=[1, 2]) == 3
assert count(position_council_of_states=[3]) == 0
assert count(position_national_council=[]) == 3
assert count(position_national_council=[1]) == 2
assert count(position_national_council=[2]) == 1
assert count(position_national_council=[1, 2]) == 3
assert count(position_national_council=[3]) == 0
assert count(policy_area=['1']) == 1
assert count(policy_area=['4']) == 1
assert count(policy_area=['8']) == 1
assert count(policy_area=['10']) == 2
assert count(policy_area=['1', '4']) == 2
assert count(policy_area=['8', '10']) == 3
assert count(policy_area=['1', '8', '10']) == 3
assert count(policy_area=['1', '4', '8', '10']) == 3
assert count(policy_area=['4.42']) == 1
assert count(policy_area=['4.42.421']) == 1
assert count(policy_area=['4.42.421', '10']) == 1
assert count(policy_area=['4.42.421', '10.103']) == 1
assert count(policy_area=['4.42.421', '10.103.1033']) == 1
assert count(policy_area=['4.42.421', '10.103.1035']) == 2
assert count(term='Abstimmung') == 1
assert count(term='cette question') == 1
assert count(term='version') == 2
assert count(term='encore') == 1
assert count(term='thing') == 1
assert count(term='something') == 1
assert count(term='riant') == 0
assert count(term='A of X') == 1
assert count(term='group') == 0
assert count(term='group', full_text=True) == 1
assert count(term='The group that wants something', full_text=True) == 1
# test tie-breaker
vote_1._legal_form = 5
vote_1._position_federal_council = 8
vote_1._position_council_of_states = 8
vote_1._position_national_council = 8
assert count(legal_form=[5], position_federal_council=[2]) == 1
assert count(legal_form=[5], position_federal_council=[8]) == 1
assert count(legal_form=[5], position_federal_council=[1]) == 0
assert count(legal_form=[5], position_federal_council=[9]) == 0
assert count(legal_form=[5], position_council_of_states=[2]) == 1
assert count(legal_form=[5], position_council_of_states=[8]) == 1
assert count(legal_form=[5], position_council_of_states=[1]) == 0
assert count(legal_form=[5], position_council_of_states=[9]) == 0
assert count(legal_form=[5], position_national_council=[2]) == 1
assert count(legal_form=[5], position_national_council=[8]) == 1
assert count(legal_form=[5], position_national_council=[1]) == 0
assert count(legal_form=[5], position_national_council=[9]) == 0
vote_1._position_federal_council = 9
vote_1._position_council_of_states = 9
vote_1._position_national_council = 9
assert count(legal_form=[5], position_federal_council=[2]) == 0
assert count(legal_form=[5], position_federal_council=[8]) == 0
assert count(legal_form=[5], position_federal_council=[1]) == 1
assert count(legal_form=[5], position_federal_council=[9]) == 1
assert count(legal_form=[5], position_council_of_states=[2]) == 0
assert count(legal_form=[5], position_council_of_states=[8]) == 0
assert count(legal_form=[5], position_council_of_states=[1]) == 1
assert count(legal_form=[5], position_council_of_states=[9]) == 1
assert count(legal_form=[5], position_national_council=[2]) == 0
assert count(legal_form=[5], position_national_council=[8]) == 0
assert count(legal_form=[5], position_national_council=[1]) == 1
assert count(legal_form=[5], position_national_council=[9]) == 1
def test_votes_query_attachments(swissvotes_app, attachments,
postgres_version, campaign_material):
votes = SwissVoteCollection(swissvotes_app)
votes.add(
id=1,
bfs_number=Decimal('100'),
date=date(1990, 6, 2),
title_de="Vote on that one thing",
title_fr="Vote on that one thing",
short_title_de="Vote on that one thing",
short_title_fr="Vote on that one thing",
short_title_en="Vote on that one thing",
_legal_form=1,
)
votes.add(
id=2,
bfs_number=Decimal('200.1'),
date=date(1990, 9, 2),
title_de="We want this version the thing",
title_fr="We want this version the thing",
short_title_de="We want this version the thing",
short_title_fr="We want this version the thing",
short_title_en="We want this version the thing",
_legal_form=2,
)
vote = votes.add(
id=3,
bfs_number=Decimal('200.2'),
date=date(1990, 9, 2),
title_de="We want that version of the thing",
title_fr="We want that version of the thing",
short_title_de="We want that version of the thing",
short_title_fr="We want that version of the thing",
short_title_en="We want that version of the thing",
_legal_form=2,
)
for name, attachment in attachments.items():
setattr(vote, name, attachment)
vote.campaign_material_metadata = {
'campaign_material_other-essay': {'language': ['de']},
'campaign_material_other-leaflet': {'language': ['it']},
}
vote.files.append(campaign_material['campaign_material_other-essay.pdf'])
vote.files.append(campaign_material['campaign_material_other-leaflet.pdf'])
votes.session.flush()
def count(**kwargs):
return SwissVoteCollection(swissvotes_app, **kwargs).query().count()
assert count() == 3
assert count(term='Abstimmungstext') == 0
assert count(term='Abstimmungstext', full_text=True) == 1
assert count(term='Abst*', full_text=True) == 1
assert count(term='conseil', full_text=True) == 1
assert count(term='Parlamentdebatte', full_text=True) == 1
assert count(term='Réalisation', full_text=True) == 1
assert count(term='booklet', full_text=True) == 0
assert count(term='Abhandlung', full_text=True) == 1
assert count(term='Volantino', full_text=True) == 1
assert count(term='volantinare', full_text=True) == 1
assert count(term='Volantini', full_text=True) == 1
def test_votes_order(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
for index, title in enumerate(('Firsţ', 'Śecond', 'Thirḓ'), start=1):
votes.add(
id=index,
bfs_number=Decimal(str(index)),
date=date(1990, 6, index),
title_de=title,
title_fr=''.join(reversed(title)),
short_title_de=title,
short_title_fr=''.join(reversed(title)),
short_title_en=''.join(reversed(title)),
_legal_form=index,
_result=index,
result_people_yeas_p=index / 10,
result_turnout=index / 10
)
assert votes.sort_order_by_key('date') == 'descending'
assert votes.sort_order_by_key('legal_form') == 'unsorted'
assert votes.sort_order_by_key('result') == 'unsorted'
assert votes.sort_order_by_key('result_people_yeas_p') == 'unsorted'
assert votes.sort_order_by_key('result_turnout') == 'unsorted'
assert votes.sort_order_by_key('title') == 'unsorted'
assert votes.sort_order_by_key('invalid') == 'unsorted'
votes = SwissVoteCollection(swissvotes_app, sort_by='', sort_order='')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = SwissVoteCollection(swissvotes_app, sort_by='xx', sort_order='yy')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = SwissVoteCollection(swissvotes_app, sort_by='date',
sort_order='yy')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = SwissVoteCollection(swissvotes_app, sort_by='xx',
sort_order='ascending')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = SwissVoteCollection(swissvotes_app, sort_by='result',
sort_order='yy')
assert votes.current_sort_by == 'result'
assert votes.current_sort_order == 'ascending'
votes = SwissVoteCollection(swissvotes_app)
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
assert 'date' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes = votes.by_order('date')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'ascending'
assert 'date' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('legal_form')
assert votes.current_sort_by == 'legal_form'
assert votes.current_sort_order == 'ascending'
assert 'legal_form' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('legal_form')
assert votes.current_sort_by == 'legal_form'
assert votes.current_sort_order == 'descending'
assert 'legal_form' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes = votes.by_order('result')
assert votes.current_sort_by == 'result'
assert votes.current_sort_order == 'ascending'
assert 'result' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('result')
assert votes.current_sort_by == 'result'
assert votes.current_sort_order == 'descending'
assert 'result' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes = votes.by_order('result_people_yeas_p')
assert votes.current_sort_by == 'result_people_yeas_p'
assert votes.current_sort_order == 'ascending'
assert 'result_people_yeas_p' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('result_people_yeas_p')
assert votes.current_sort_by == 'result_people_yeas_p'
assert votes.current_sort_order == 'descending'
assert 'result_people_yeas_p' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes = votes.by_order('result_turnout')
assert votes.current_sort_by == 'result_turnout'
assert votes.current_sort_order == 'ascending'
assert 'result_turnout' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('result_turnout')
assert votes.current_sort_by == 'result_turnout'
assert votes.current_sort_order == 'descending'
assert 'result_turnout' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes = votes.by_order('title')
assert votes.current_sort_by == 'title'
assert votes.current_sort_order == 'ascending'
assert 'title' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [1, 2, 3]
votes = votes.by_order('title')
assert votes.current_sort_by == 'title'
assert votes.current_sort_order == 'descending'
assert 'title' in str(votes.order_by)
assert 'DESC' in str(votes.order_by)
assert [vote.id for vote in votes.query()] == [3, 2, 1]
votes.app.session_manager.current_locale = 'fr_CH'
assert [vote.id for vote in votes.query()] == [1, 3, 2]
votes = votes.by_order(None)
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = votes.by_order('')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
votes = votes.by_order('xxx')
assert votes.current_sort_by == 'date'
assert votes.current_sort_order == 'descending'
def test_votes_available_descriptors(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
assert votes.available_descriptors == [set(), set(), set()]
votes.add(
id=1,
bfs_number=Decimal('1'),
date=date(1990, 6, 2),
title_de="Vote",
title_fr="Vote",
short_title_de="Vote",
short_title_fr="Vote",
short_title_en="Vote",
_legal_form=1,
descriptor_1_level_1=Decimal('4'),
descriptor_1_level_2=Decimal('4.2'),
descriptor_1_level_3=Decimal('4.21'),
descriptor_2_level_1=Decimal('10'),
descriptor_2_level_2=Decimal('10.3'),
descriptor_2_level_3=Decimal('10.35'),
descriptor_3_level_1=Decimal('10'),
descriptor_3_level_2=Decimal('10.3'),
descriptor_3_level_3=Decimal('10.33'),
)
votes.add(
id=2,
bfs_number=Decimal('2'),
date=date(1990, 6, 2),
title_de="Vote",
title_fr="Vote",
short_title_de="Vote",
short_title_fr="Vote",
short_title_en="Vote",
_legal_form=1,
descriptor_1_level_1=Decimal('10'),
descriptor_1_level_2=Decimal('10.3'),
descriptor_1_level_3=Decimal('10.35'),
descriptor_2_level_1=Decimal('1'),
descriptor_2_level_2=Decimal('1.6'),
descriptor_2_level_3=Decimal('1.62'),
)
votes.add(
id=3,
bfs_number=Decimal('3'),
date=date(1990, 6, 2),
title_de="Vote",
title_fr="Vote",
short_title_de="Vote",
short_title_fr="Vote",
short_title_en="Vote",
_legal_form=1,
descriptor_3_level_1=Decimal('8'),
descriptor_3_level_2=Decimal('8.3'),
)
assert SwissVoteCollection(swissvotes_app).available_descriptors == [
{Decimal('1.00'), Decimal('4.00'), Decimal('8.00'), Decimal('10.00')},
{Decimal('1.60'), Decimal('4.20'), Decimal('8.30'), Decimal('10.30')},
{Decimal('1.62'), Decimal('4.21'), Decimal('10.33'), Decimal('10.35')}
]
def test_votes_update(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
added, updated = votes.update([
SwissVote(
bfs_number=Decimal('1'),
date=date(1990, 6, 1),
title_de="First",
title_fr="First",
short_title_de="First",
short_title_fr="First",
short_title_en="First",
_legal_form=1,
),
SwissVote(
bfs_number=Decimal('2'),
date=date(1990, 6, 1),
title_de="Second",
title_fr="Second",
short_title_de="Second",
short_title_fr="Second",
short_title_en="Second",
_legal_form=1,
)
])
assert added == 2
assert updated == 0
assert votes.query().count() == 2
added, updated = votes.update([
SwissVote(
bfs_number=Decimal('1'),
date=date(1990, 6, 1),
title_de="First",
title_fr="First",
short_title_de="First",
short_title_fr="First",
short_title_en="First",
_legal_form=1,
)
])
assert added == 0
assert updated == 0
added, updated = votes.update([
SwissVote(
bfs_number=Decimal('1'),
date=date(1990, 6, 1),
title_de="First vote",
title_fr="First vote",
short_title_de="First vote",
short_title_fr="First vote",
short_title_en="First vote",
_legal_form=1,
)
])
assert added == 0
assert updated == 1
assert votes.by_bfs_number(Decimal('1')).title == 'First vote'
def test_votes_update_metadata(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
vote_1 = votes.add(
bfs_number=Decimal('1'),
date=date(1990, 6, 1),
title_de="First",
title_fr="First",
short_title_de="First",
short_title_fr="First",
short_title_en="First",
_legal_form=1,
)
vote_2 = votes.add(
bfs_number=Decimal('2'),
date=date(1990, 6, 1),
title_de="Second",
title_fr="Second",
short_title_de="Second",
short_title_fr="Second",
short_title_en="Second",
_legal_form=1,
)
added, updated = votes.update_metadata({
Decimal('1'): {
'essay.pdf': {'a': 10, 'b': 11},
'leafet.pdf': {'a': 20, 'c': 21},
},
Decimal('3'): {
'article.pdf': {'a': 30, 'b': 31},
},
})
assert added == 2
assert updated == 0
added, updated = votes.update_metadata({
Decimal('1'): {
'essay.pdf': {'a': 10, 'b': 12},
'letter.pdf': {'a': 40},
},
Decimal('2'): {
'legal.pdf': {'a': 40},
},
Decimal('3'): {
'article.pdf': {'a': 30, 'b': 31},
},
})
assert added == 2
assert updated == 1
assert vote_1.campaign_material_metadata == {
'essay.pdf': {'a': 10, 'b': 12},
'leafet.pdf': {'a': 20, 'c': 21},
'letter.pdf': {'a': 40}
}
assert vote_2.campaign_material_metadata == {
'legal.pdf': {'a': 40}
}
def test_votes_export(swissvotes_app):
votes = SwissVoteCollection(swissvotes_app)
vote = votes.add(
bfs_number=Decimal('100.1'),
date=date(1990, 6, 2),
title_de="Vote DE",
title_fr="Vote FR",
short_title_de="V D",
short_title_fr="V F",
short_title_en="V E",
keyword="Keyword",
_legal_form=1,
_parliamentary_initiated=1,
initiator="Initiator",
anneepolitique="anneepolitique",
descriptor_1_level_1=Decimal('4'),
descriptor_1_level_2=Decimal('4.2'),
descriptor_1_level_3=Decimal('4.21'),
descriptor_2_level_1=Decimal('10'),
descriptor_2_level_2=Decimal('10.3'),
descriptor_2_level_3=Decimal('10.35'),
descriptor_3_level_1=Decimal('10'),
descriptor_3_level_2=Decimal('10.3'),
descriptor_3_level_3=Decimal('10.33'),
_result=1,
result_turnout=Decimal('20.01'),
_result_people_accepted=1,
result_people_yeas_p=Decimal('40.01'),
_result_cantons_accepted=1,
result_cantons_yeas=Decimal('1.5'),
result_cantons_nays=Decimal('24.5'),
brief_description_title='Kurzbeschreibung'
)
vote._result_ag_accepted = 0
vote._result_ai_accepted = 0
vote._result_ar_accepted = 0
vote._result_be_accepted = 0
vote._result_bl_accepted = 0
vote._result_bs_accepted = 0
vote._result_fr_accepted = 0
vote._result_ge_accepted = 0
vote._result_gl_accepted = 0
vote._result_gr_accepted = 0
vote._result_ju_accepted = 0
vote._result_lu_accepted = 0
vote._result_ne_accepted = 0
vote._result_nw_accepted = 0
vote._result_ow_accepted = 0
vote._result_sg_accepted = 0
vote._result_sh_accepted = 0
vote._result_so_accepted = 0
vote._result_sz_accepted = 0
vote._result_tg_accepted = 0
vote._result_ti_accepted = 0
vote._result_ur_accepted = 0
vote._result_vd_accepted = 0
vote._result_vs_accepted = 0
vote._result_zg_accepted = 0
vote._result_zh_accepted = 0
vote.procedure_number = '24.557'
vote._position_federal_council = 1
vote._position_parliament = 1
vote._position_national_council = 1
vote.position_national_council_yeas = 10
vote.position_national_council_nays = 20
vote._position_council_of_states = 1
vote.position_council_of_states_yeas = 30
vote.position_council_of_states_nays = 40
vote.duration_federal_assembly = 30
vote.duration_initative_collection = 32
vote.duration_referendum_collection = 35
vote.signatures_valid = 40
vote.recommendations = {
'fdp': 1,
'cvp': 1,
'sps': 1,
'svp': 1,
'lps': 2,
'ldu': 2,
'evp': 2,
'csp': 3,
'pda': 3,
'poch': 3,
'gps': 4,
'sd': 4,
'rep': 4,
'edu': 5,
'fps': 5,
'lega': 5,
'kvp': 66,
'glp': 66,
'bdp': None,
'mcg': 9999,
'mitte': 9999,
'sav': 1,
'eco': 2,
'sgv': 3,
'sbv-usp': 3,
'sgb': 3,
'travs': 3,
'vsa': 9999,
'vpod': 9999,
'ssv': 9999,
'gem': 9999,
'kdk': 9999,
'vdk': 9999,
'endk': 9999,
'fdk': 9999,
'edk': 9999,
'gdk': 9999,
'ldk': 9999,
'sodk': 9999,
'kkjpd': 9999,
'bpuk': 9999,
'sbk': 9999,
'acs': 9999,
'tcs': 9999,
'vcs': 9999,
'voev': 9999,
}
vote.recommendations_other_yes = "Pro Velo"
vote.recommendations_other_no = "Biosuisse"
vote.recommendations_other_free = "Pro Natura, Greenpeace"
vote.recommendations_other_counter_proposal = "Pro Juventute"
vote.recommendations_other_popular_initiative = "Pro Senectute"
vote.recommendations_divergent = {
'bdp_ag': 1,
'bdp_ai': 1,
'bdp_ar': 1,
'bdp_be': 1,
'bdp_bl': 1,
'bdp_bs': 1,
'bdp_fr': 1,
'bdp_ge': 1,
'bdp_gl': 1,
'bdp_gr': 1,
'bdp_ju': 1,
'bdp_lu': 1,
'bdp_ne': 1,
'bdp_nw': 1,
'bdp_ow': 1,
'bdp_sg': 1,
'bdp_sh': 1,
'bdp_so': 1,
'bdp_sz': 1,
'bdp_tg': 1,
'bdp_ti': 1,
'bdp_ur': 1,
'bdp_vd': 1,
'bdp_vs': 1,
'bdp_vsr': 1,
'bdp_vso': 1,
'bdp_zg': 1,
'bdp_zh': 1,
'jbdp_ch': 1,
'csp_fr': 1,
'csp_gr': 1,
'csp_ju': 1,
'csp_lu': 1,
'csp_ow': 1,
'csp_sg': 1,
'csp_vs': 1,
'csp_vsr': 1,
'csp_vso': 1,
'csp_zh': 1,
'cvp-fr_ch': 1,
'cvp_ag': 1,
'cvp_ai': 1,
'cvp_ar': 1,
'cvp_be': 1,
'cvp_bl': 1,
'cvp_bs': 1,
'cvp_fr': 1,
'cvp_ge': 1,
'cvp_gl': 1,
'cvp_gr': 1,
'cvp_ju': 1,
'cvp_lu': 1,
'cvp_ne': 1,
'cvp_nw': 1,
'cvp_ow': 1,
'cvp_sg': 1,
'cvp_sh': 1,
'cvp_so': 1,
'cvp_sz': 1,
'cvp_tg': 1,
'cvp_ti': 1,
'cvp_ur': 1,
'cvp_vd': 1,
'cvp_vs': 1,
'cvp_vsr': 1,
'cvp_vso': 1,
'cvp_zg': 1,
'cvp_zh': 1,
'jcvp_ch': 1,
'jcvp_ag': 1,
'jcvp_be': 1,
'jcvp_gr': 1,
'jcvp_lu': 1,
'jcvp_so': 1,
'jcvp_zh': 1,
'edu_ag': 1,
'edu_ai': 1,
'edu_ar': 1,
'edu_be': 1,
'edu_bl': 1,
'edu_bs': 1,
'edu_fr': 1,
'edu_ge': 1,
'edu_gl': 1,
'edu_gr': 1,
'edu_ju': 1,
'edu_lu': 1,
'edu_ne': 1,
'edu_nw': 1,
'edu_ow': 1,
'edu_sg': 1,
'edu_sh': 1,
'edu_so': 1,
'edu_sz': 1,
'edu_tg': 1,
'edu_ti': 1,
'edu_ur': 1,
'edu_vd': 1,
'edu_vs': 1,
'edu_vsr': 1,
'edu_vso': 1,
'edu_zg': 1,
'edu_zh': 1,
'evp_ag': 1,
'evp_ai': 1,
'evp_ar': 1,
'evp_be': 1,
'evp_bl': 1,
'evp_bs': 1,
'evp_fr': 1,
'evp_ge': 1,
'evp_gl': 1,
'evp_gr': 1,
'evp_ju': 1,
'evp_lu': 1,
'evp_ne': 1,
'evp_nw': 1,
'evp_ow': 1,
'evp_sg': 1,
'evp_sh': 1,
'evp_so': 1,
'evp_sz': 1,
'evp_tg': 1,
'evp_ti': 1,
'evp_ur': 1,
'evp_vd': 1,
'evp_vs': 1,
'evp_zg': 1,
'evp_zh': 1,
'jevp_ch': 1,
'fdp-fr_ch': 1,
'fdp_ag': 1,
'fdp_ai': 1,
'fdp_ar': 1,
'fdp_be': 1,
'fdp_bl': 1,
'fdp_bs': 1,
'fdp_fr': 1,
'fdp_ge': 1,
'fdp_gl': 1,
'fdp_gr': 1,
'fdp_ju': 1,
'fdp_lu': 1,
'fdp_ne': 1,
'fdp_nw': 1,
'fdp_ow': 1,
'fdp_sg': 1,
'fdp_sh': 1,
'fdp_so': 1,
'fdp_sz': 1,
'fdp_tg': 1,
'fdp_ti': 1,
'fdp_ur': 1,
'fdp_vd': 1,
'fdp_vs': 1,
'fdp_vsr': 1,
'fdp_vso': 1,
'fdp_zg': 1,
'fdp_zh': 1,
'jfdp_ch': 1,
'jfdp_ag': 1,
'jfdp_bl': 1,
'jfdp_fr': 1,
'jfdp_gr': 1,
'jfdp_ju': 1,
'jfdp_lu': 1,
'jfdp_sh': 1,
'jfdp_ti': 1,
'jfdp_vd': 1,
'jfdp_zh': 1,
'fps_ag': 1,
'fps_ai': 1,
'fps_be': 1,
'fps_bl': 1,
'fps_bs': 1,
'fps_sg': 1,
'fps_sh': 1,
'fps_so': 1,
'fps_tg': 1,
'fps_zh': 1,
'glp_ag': 1,
'glp_ai': 1,
'glp_ar': 1,
'glp_be': 1,
'glp_bl': 1,
'glp_bs': 1,
'glp_fr': 1,
'glp_ge': 1,
'glp_gl': 1,
'glp_gr': 1,
'glp_ju': 1,
'glp_lu': 1,
'glp_ne': 1,
'glp_nw': 1,
'glp_ow': 1,
'glp_sg': 1,
'glp_sh': 1,
'glp_so': 1,
'glp_sz': 1,
'glp_tg': 1,
'glp_ti': 1,
'glp_ur': 1,
'glp_vd': 1,
'glp_vs': 1,
'glp_vsr': 1,
'glp_vso': 1,
'glp_zg': 1,
'glp_zh': 1,
'jglp_ch': 1,
'gps_ag': 66,
'gps_ai': 66,
'gps_ar': 66,
'gps_be': 66,
'gps_bl': 66,
'gps_bs': 66,
'gps_fr': 66,
'gps_ge': 66,
'gps_gl': 66,
'gps_gr': 66,
'gps_ju': 66,
'gps_lu': 66,
'gps_ne': 66,
'gps_nw': 66,
'gps_ow': 66,
'gps_sg': 66,
'gps_sh': 66,
'gps_so': 66,
'gps_sz': 66,
'gps_tg': 66,
'gps_ti': 66,
'gps_ur': 66,
'gps_vd': 66,
'gps_vs': 66,
'gps_vsr': 66,
'gps_vso': 66,
'gps_zg': 66,
'gps_zh': 66,
'jgps_ch': 66,
'kvp_sg': 1,
'lps_be': 1,
'lps_bl': 1,
'lps_bs': 1,
'lps_fr': 1,
'lps_ge': 1,
'lps_ju': 1,
'lps_ne': 1,
'lps_sg': 1,
'lps_ti': 1,
'lps_vd': 1,
'lps_vs': 1,
'lps_zh': 1,
'jlps_ch': 1,
'jlps_so': 1,
'jlps_ti': 1,
'ldu_ag': 1,
'ldu_ar': 1,
'ldu_be': 1,
'ldu_bl': 1,
'ldu_bs': 1,
'ldu_gr': 1,
'ldu_lu': 1,
'ldu_ne': 1,
'ldu_sg': 1,
'ldu_sh': 1,
'ldu_so': 1,
'ldu_tg': 1,
'ldu_vd': 1,
'ldu_zg': 1,
'ldu_zh': 1,
'jldu_ch': 1,
'poch_so': 2,
'poch_zh': 2,
'pda_be': 1,
'pda_bl': 1,
'pda_bs': 1,
'pda_ge': 1,
'pda_ju': 1,
'pda_ne': 1,
'pda_sg': 1,
'pda_ti': 1,
'pda_vd': 1,
'pda_zh': 1,
'jpda_ch': 1,
'rep_ag': 1,
'rep_ge': 1,
'rep_ne': 1,
'rep_tg': 1,
'rep_vd': 1,
'rep_zh': 1,
'sd_ag': 1,
'sd_be': 1,
'sd_bl': 1,
'sd_bs': 1,
'sd_fr': 1,
'sd_ge': 1,
'sd_gr': 1,
'sd_lu': 1,
'sd_ne': 1,
'sd_sg': 1,
'sd_so': 1,
'sd_tg': 1,
'sd_ti': 1,
'sd_vd': 1,
'sd_zh': 1,
'jsd_ch': 1,
'sps_ag': 1,
'sps_ai': 1,
'sps_ar': 1,
'sps_be': 1,
'sps_bl': 1,
'sps_bs': 1,
'sps_fr': 1,
'sps_ge': 1,
'sps_gl': 1,
'sps_gr': 1,
'sps_ju': 1,
'sps_lu': 1,
'sps_ne': 1,
'sps_nw': 1,
'sps_ow': 1,
'sps_sg': 1,
'sps_sh': 1,
'sps_so': 1,
'sps_sz': 1,
'sps_tg': 1,
'sps_ti': 1,
'sps_ur': 1,
'sps_vd': 1,
'sps_vs': 1,
'sps_vsr': 1,
'sps_vso': 1,
'sps_zg': 1,
'sps_zh': 1,
'juso_ch': 1,
'juso_be': 1,
'juso_ge': 1,
'juso_ju': 1,
'juso_ti': 1,
'juso_vs': 1,
'juso_zh': 1,
'svp_ag': 3,
'svp_ai': 3,
'svp_ar': 3,
'svp_be': 3,
'svp_bl': 3,
'svp_bs': 3,
'svp_fr': 3,
'svp_ge': 3,
'svp_gl': 3,
'svp_gr': 3,
'svp_ju': 3,
'svp_lu': 3,
'svp_ne': 3,
'svp_nw': 3,
'svp_ow': 3,
'svp_sg': 3,
'svp_sh': 3,
'svp_so': 3,
'svp_sz': 3,
'svp_tg': 3,
'svp_ti': 3,
'svp_ur': 3,
'svp_vd': 3,
'svp_vs': 3,
'svp_vsr': 3,
'svp_vso': 3,
'svp_zg': 3,
'svp_zh': 3,
'jsvp_ch': 3,
'jsvp_ag': 3,
'jsvp_be': 3,
'jsvp_ge': 3,
'jsvp_sh': 3,
'jsvp_ur': 3,
'jsvp_zh': 3,
'sgb_ag': 1,
'sgb_ju': 1,
'sgb_vs': 1,
'sgv_ag': 1,
'sgv_bs': 1,
'sgv_sh': 1,
'vpod_ge': 1,
'vpod_vd': 1,
'mitte-fr_ch': 2,
'mitte_ag': 2,
'mitte_ai': 2,
'mitte_ar': 2,
'mitte_be': 2,
'mitte_bl': 2,
'mitte_bs': 2,
'mitte_fr': 2,
'mitte_ge': 2,
'mitte_gl': 2,
'mitte_gr': 2,
'mitte_ju': 2,
'mitte_lu': 2,
'mitte_ne': 2,
'mitte_nw': 2,
'mitte_ow': 2,
'mitte_sg': 2,
'mitte_sh': 2,
'mitte_so': 2,
'mitte_sz': 2,
'mitte_tg': 2,
'mitte_ti': 2,
'mitte_ur': 2,
'mitte_vd': 2,
'mitte_vs': 2,
'mitte_vsr': 2,
'mitte_vso': 2,
'mitte_zg': 2,
'mitte_zh': 2,
'jmitte_ch': 2,
'jmitte_ag': 2,
'jmitte_ai': 2,
'jmitte_ar': 2,
'jmitte_be': 2,
'jmitte_bl': 2,
'jmitte_bs': 2,
'jmitte_fr': 2,
'jmitte_ge': 2,
'jmitte_gl': 2,
'jmitte_gr': 2,
'jmitte_ju': 2,
'jmitte_lu': 2,
'jmitte_ne': 2,
'jmitte_nw': 2,
'jmitte_ow': 2,
'jmitte_sg': 2,
'jmitte_sh': 2,
'jmitte_so': 2,
'jmitte_sz': 2,
'jmitte_tg': 2,
'jmitte_ti': 2,
'jmitte_ur': 2,
'jmitte_vd': 2,
'jmitte_vs': 2,
'jmitte_vsr': 2,
'jmitte_vso': 2,
'jmitte_zg': 2,
'jmitte_zh': 2,
}
vote.national_council_election_year = 1990
vote.national_council_share_fdp = Decimal('01.10')
vote.national_council_share_cvp = Decimal('02.10')
vote.national_council_share_sps = Decimal('03.10')
vote.national_council_share_svp = Decimal('04.10')
vote.national_council_share_lps = Decimal('05.10')
vote.national_council_share_ldu = Decimal('06.10')
vote.national_council_share_evp = Decimal('07.10')
vote.national_council_share_csp = Decimal('08.10')
vote.national_council_share_pda = Decimal('09.10')
vote.national_council_share_poch = Decimal('10.10')
vote.national_council_share_gps = Decimal('11.10')
vote.national_council_share_sd = Decimal('12.10')
vote.national_council_share_rep = Decimal('13.10')
vote.national_council_share_edu = Decimal('14.10')
vote.national_council_share_fps = Decimal('15.10')
vote.national_council_share_lega = Decimal('16.10')
vote.national_council_share_kvp = Decimal('17.10')
vote.national_council_share_glp = Decimal('18.10')
vote.national_council_share_bdp = Decimal('19.10')
vote.national_council_share_mcg = Decimal('20.20')
vote.national_council_share_mitte = Decimal('20.10')
vote.national_council_share_ubrige = Decimal('21.20')
vote.national_council_share_yeas = Decimal('22.20')
vote.national_council_share_nays = Decimal('23.20')
vote.national_council_share_neutral = Decimal('24.20')
vote.national_council_share_none = Decimal('25.20')
vote.national_council_share_empty = Decimal('26.20')
vote.national_council_share_free_vote = Decimal('27.20')
vote.national_council_share_unknown = Decimal('28.20')
vote.bfs_map_de = 'map de'
vote.bfs_map_fr = 'map fr'
vote.link_curia_vista_de = 'https://curia.vista/de'
vote.link_curia_vista_fr = 'https://curia.vista/fr'
vote.link_easyvote_de = 'https://easy.vote/de'
vote.link_easyvote_fr = 'https://easy.vote/fr'
vote.link_bk_results_de = 'https://bk.results/de'
vote.link_bk_results_fr = 'https://bk.results/fr'
vote.link_bk_chrono_de = 'https://bk.chrono/de'
vote.link_bk_chrono_fr = 'https://bk.chrono/fr'
vote.link_federal_council_de = 'https://federal.council/de'
vote.link_federal_council_fr = 'https://federal.council/fr'
vote.link_federal_council_en = 'https://federal.council/en'
vote.link_federal_departement_de = 'https://federal.departement/de'
vote.link_federal_departement_fr = 'https://federal.departement/fr'
vote.link_federal_departement_en = 'https://federal.departement/en'
vote.link_federal_office_de = 'https://federal.office/de'
vote.link_federal_office_fr = 'https://federal.office/fr'
vote.link_federal_office_en = 'https://federal.office/en'
vote.posters_mfg_yea = (
'https://museum.ch/objects/1 '
'https://museum.ch/objects/2'
)
vote.posters_mfg_nay = (
'https://museum.ch/objects/3 '
'https://museum.ch/objects/4'
)
vote.posters_sa_yea = (
'https://sozialarchiv.ch/objects/1 '
'https://sozialarchiv.ch/objects/2'
)
vote.posters_sa_nay = (
'https://sozialarchiv.ch/objects/3 '
'https://sozialarchiv.ch/objects/4'
)
vote.link_post_vote_poll_de = 'https://post.vote.poll/de'
vote.link_post_vote_poll_fr = 'https://post.vote.poll/fr'
vote.link_post_vote_poll_en = 'https://post.vote.poll/en'
vote.media_ads_total = 1001
vote.media_ads_yea_p = Decimal('10.06')
vote.media_coverage_articles_total = 1007
vote.media_coverage_tonality_total = Decimal('10.10')
votes.session.flush()
votes.session.expire_all()
file = StringIO()
votes.export_csv(file)
file.seek(0)
rows = list(DictReader(file))
assert len(rows) == 1
csv = dict(rows[0])
expected = {
'anr': '100,1',
'datum': '02.06.1990',
'titel_off_d': 'Vote DE',
'titel_off_f': 'Vote FR',
'titel_kurz_d': 'V D',
'titel_kurz_f': 'V F',
'titel_kurz_e': 'V E',
'kurzbetitel': 'Kurzbeschreibung',
'stichwort': 'Keyword',
'rechtsform': '1',
'pa-iv': '1',
'd1e1': '4',
'd1e2': '4,2',
'd1e3': '4,21',
'd2e1': '10',
'd2e2': '10,3',
'd2e3': '10,35',
'd3e1': '10',
'd3e2': '10,3',
'd3e3': '10,33',
'volk': '1',
'stand': '1',
'annahme': '1',
'bet': '20,01',
'volkja-proz': '40,01',
'kt-ja': '1,5',
'kt-nein': '24,5',
'ag-annahme': '0',
'ai-annahme': '0',
'ar-annahme': '0',
'be-annahme': '0',
'bkchrono-de': 'https://bk.chrono/de',
'bkchrono-fr': 'https://bk.chrono/fr',
'bkresults-de': 'https://bk.results/de',
'bkresults-fr': 'https://bk.results/fr',
'bl-annahme': '0',
'bs-annahme': '0',
'curiavista-de': 'https://curia.vista/de',
'curiavista-fr': 'https://curia.vista/fr',
'easyvideo_de': 'https://easy.vote/de',
'easyvideo_fr': 'https://easy.vote/fr',
'info_br-de': 'https://federal.council/de',
'info_br-fr': 'https://federal.council/fr',
'info_br-en': 'https://federal.council/en',
'info_dep-de': 'https://federal.departement/de',
'info_dep-fr': 'https://federal.departement/fr',
'info_dep-en': 'https://federal.departement/en',
'info_amt-de': 'https://federal.office/de',
'info_amt-fr': 'https://federal.office/fr',
'info_amt-en': 'https://federal.office/en',
'fr-annahme': '0',
'ge-annahme': '0',
'gl-annahme': '0',
'gr-annahme': '0',
'ju-annahme': '0',
'lu-annahme': '0',
'ne-annahme': '0',
'nw-annahme': '0',
'ow-annahme': '0',
'sg-annahme': '0',
'sh-annahme': '0',
'so-annahme': '0',
'sz-annahme': '0',
'tg-annahme': '0',
'ti-annahme': '0',
'ur-annahme': '0',
'vd-annahme': '0',
'vs-annahme': '0',
'zg-annahme': '0',
'zh-annahme': '0',
'gesch_nr': '24.557',
'br-pos': '1',
'bv-pos': '1',
'nr-pos': '1',
'nrja': '10',
'nrnein': '20',
'sr-pos': '1',
'srja': '30',
'srnein': '40',
'dauer_bv': '30',
'i-dauer_samm': '32',
'fr-dauer_samm': '35',
'unter_g': '40',
'p-fdp': '1',
'p-cvp': '1',
'p-sps': '1',
'p-svp': '1',
'p-lps': '2',
'p-ldu': '2',
'p-evp': '2',
'p-ucsp': '3',
'p-pda': '3',
'p-poch': '3',
'p-gps': '4',
'p-sd': '4',
'p-rep': '4',
'p-edu': '5',
'p-fps': '5',
'p-lega': '5',
'p-kvp': '66',
'p-glp': '66',
'p-bdp': '.',
'p-mcg': '9999',
'p-mitte': '9999',
'p-sav': '1',
'p-eco': '2',
'p-sgv': '3',
'p-sbv': '3',
'p-sgb': '3',
'p-travs': '3',
'p-vsa': '9999',
'p-vpod': '9999',
'p-ssv': '9999',
'p-gem': '9999',
'p-kdk': '9999',
'p-vdk': '9999',
'p-endk': '9999',
'p-fdk': '9999',
'p-edk': '9999',
'p-gdk': '9999',
'p-ldk': '9999',
'p-sodk': '9999',
'p-kkjpd': '9999',
'p-bpuk': '9999',
'p-sbk': '9999',
'p-acs': '9999',
'p-tcs': '9999',
'p-vcs': '9999',
'p-voev': '9999',
'p-others_yes': 'Pro Velo',
'p-others_no': 'Biosuisse',
'p-others_free': 'Pro Natura, Greenpeace',
'p-others_counterp': 'Pro Juventute',
'p-others_init': 'Pro Senectute',
'pdev-bdp_AG': '1',
'pdev-bdp_AI': '1',
'pdev-bdp_AR': '1',
'pdev-bdp_BE': '1',
'pdev-bdp_BL': '1',
'pdev-bdp_BS': '1',
'pdev-bdp_FR': '1',
'pdev-bdp_GE': '1',
'pdev-bdp_GL': '1',
'pdev-bdp_GR': '1',
'pdev-bdp_JU': '1',
'pdev-bdp_LU': '1',
'pdev-bdp_NE': '1',
'pdev-bdp_NW': '1',
'pdev-bdp_OW': '1',
'pdev-bdp_SG': '1',
'pdev-bdp_SH': '1',
'pdev-bdp_SO': '1',
'pdev-bdp_SZ': '1',
'pdev-bdp_TG': '1',
'pdev-bdp_TI': '1',
'pdev-bdp_UR': '1',
'pdev-bdp_VD': '1',
'pdev-bdp_VS': '1',
'pdev-bdp_VSr': '1',
'pdev-bdp_VSo': '1',
'pdev-bdp_ZG': '1',
'pdev-bdp_ZH': '1',
'pdev-jbdp_CH': '1',
'pdev-csp_FR': '1',
'pdev-csp_GR': '1',
'pdev-csp_JU': '1',
'pdev-csp_LU': '1',
'pdev-csp_OW': '1',
'pdev-csp_SG': '1',
'pdev-csp_VS': '1',
'pdev-csp_VSr': '1',
'pdev-csp_VSo': '1',
'pdev-csp_ZH': '1',
'pdev-cvp_frauen': '1',
'pdev-cvp_AG': '1',
'pdev-cvp_AI': '1',
'pdev-cvp_AR': '1',
'pdev-cvp_BE': '1',
'pdev-cvp_BL': '1',
'pdev-cvp_BS': '1',
'pdev-cvp_FR': '1',
'pdev-cvp_GE': '1',
'pdev-cvp_GL': '1',
'pdev-cvp_GR': '1',
'pdev-cvp_JU': '1',
'pdev-cvp_LU': '1',
'pdev-cvp_NE': '1',
'pdev-cvp_NW': '1',
'pdev-cvp_OW': '1',
'pdev-cvp_SG': '1',
'pdev-cvp_SH': '1',
'pdev-cvp_SO': '1',
'pdev-cvp_SZ': '1',
'pdev-cvp_TG': '1',
'pdev-cvp_TI': '1',
'pdev-cvp_UR': '1',
'pdev-cvp_VD': '1',
'pdev-cvp_VS': '1',
'pdev-cvp_VSr': '1',
'pdev-cvp_VSo': '1',
'pdev-cvp_ZG': '1',
'pdev-cvp_ZH': '1',
'pdev-jcvp_CH': '1',
'pdev-jcvp_AG': '1',
'pdev-jcvp_BE': '1',
'pdev-jcvp_GR': '1',
'pdev-jcvp_LU': '1',
'pdev-jcvp_SO': '1',
'pdev-jcvp_ZH': '1',
'pdev-edu_AG': '1',
'pdev-edu_AI': '1',
'pdev-edu_AR': '1',
'pdev-edu_BE': '1',
'pdev-edu_BL': '1',
'pdev-edu_BS': '1',
'pdev-edu_FR': '1',
'pdev-edu_GE': '1',
'pdev-edu_GL': '1',
'pdev-edu_GR': '1',
'pdev-edu_JU': '1',
'pdev-edu_LU': '1',
'pdev-edu_NE': '1',
'pdev-edu_NW': '1',
'pdev-edu_OW': '1',
'pdev-edu_SG': '1',
'pdev-edu_SH': '1',
'pdev-edu_SO': '1',
'pdev-edu_SZ': '1',
'pdev-edu_TG': '1',
'pdev-edu_TI': '1',
'pdev-edu_UR': '1',
'pdev-edu_VD': '1',
'pdev-edu_VS': '1',
'pdev-edu_VSr': '1',
'pdev-edu_VSo': '1',
'pdev-edu_ZG': '1',
'pdev-edu_ZH': '1',
'pdev-evp_AG': '1',
'pdev-evp_AI': '1',
'pdev-evp_AR': '1',
'pdev-evp_BE': '1',
'pdev-evp_BL': '1',
'pdev-evp_BS': '1',
'pdev-evp_FR': '1',
'pdev-evp_GE': '1',
'pdev-evp_GL': '1',
'pdev-evp_GR': '1',
'pdev-evp_JU': '1',
'pdev-evp_LU': '1',
'pdev-evp_NE': '1',
'pdev-evp_NW': '1',
'pdev-evp_OW': '1',
'pdev-evp_SG': '1',
'pdev-evp_SH': '1',
'pdev-evp_SO': '1',
'pdev-evp_SZ': '1',
'pdev-evp_TG': '1',
'pdev-evp_TI': '1',
'pdev-evp_UR': '1',
'pdev-evp_VD': '1',
'pdev-evp_VS': '1',
'pdev-evp_ZG': '1',
'pdev-evp_ZH': '1',
'pdev-jevp_CH': '1',
'pdev-fdp_Frauen': '1',
'pdev-fdp_AG': '1',
'pdev-fdp_AI': '1',
'pdev-fdp_AR': '1',
'pdev-fdp_BE': '1',
'pdev-fdp_BL': '1',
'pdev-fdp_BS': '1',
'pdev-fdp_FR': '1',
'pdev-fdp_GE': '1',
'pdev-fdp_GL': '1',
'pdev-fdp_GR': '1',
'pdev-fdp_JU': '1',
'pdev-fdp_LU': '1',
'pdev-fdp_NE': '1',
'pdev-fdp_NW': '1',
'pdev-fdp_OW': '1',
'pdev-fdp_SG': '1',
'pdev-fdp_SH': '1',
'pdev-fdp_SO': '1',
'pdev-fdp_SZ': '1',
'pdev-fdp_TG': '1',
'pdev-fdp_TI': '1',
'pdev-fdp_UR': '1',
'pdev-fdp_VD': '1',
'pdev-fdp_VS': '1',
'pdev-fdp_VSr': '1',
'pdev-fdp_Vso': '1',
'pdev-fdp_ZG': '1',
'pdev-fdp_ZH': '1',
'pdev-jfdp_CH': '1',
'pdev-jfdp_AG': '1',
'pdev-jfdp_BL': '1',
'pdev-jfdp_FR': '1',
'pdev-jfdp_GR': '1',
'pdev-jfdp_JU': '1',
'pdev-jfdp_LU': '1',
'pdev-jfdp_SH': '1',
'pdev-jfdp_TI': '1',
'pdev-jfdp_VD': '1',
'pdev-jfdp_ZH': '1',
'pdev-fps_AG': '1',
'pdev-fps_AI': '1',
'pdev-fps_BE': '1',
'pdev-fps_BL': '1',
'pdev-fps_BS': '1',
'pdev-fps_SG': '1',
'pdev-fps_SH': '1',
'pdev-fps_SO': '1',
'pdev-fps_TG': '1',
'pdev-fps_ZH': '1',
'pdev-glp_AG': '1',
'pdev-glp_AI': '1',
'pdev-glp_AR': '1',
'pdev-glp_BE': '1',
'pdev-glp_BL': '1',
'pdev-glp_BS': '1',
'pdev-glp_FR': '1',
'pdev-glp_GE': '1',
'pdev-glp_GL': '1',
'pdev-glp_GR': '1',
'pdev-glp_JU': '1',
'pdev-glp_LU': '1',
'pdev-glp_NE': '1',
'pdev-glp_NW': '1',
'pdev-glp_OW': '1',
'pdev-glp_SG': '1',
'pdev-glp_SH': '1',
'pdev-glp_SO': '1',
'pdev-glp_SZ': '1',
'pdev-glp_TG': '1',
'pdev-glp_TI': '1',
'pdev-glp_UR': '1',
'pdev-glp_VD': '1',
'pdev-glp_VS': '1',
'pdev-glp_VSr': '1',
'pdev-glp_VSo': '1',
'pdev-glp_ZG': '1',
'pdev-glp_ZH': '1',
'pdev-jglp_CH': '1',
'pdev-gps_AG': '66',
'pdev-gps_AI': '66',
'pdev-gps_AR': '66',
'pdev-gps_BE': '66',
'pdev-gps_BL': '66',
'pdev-gps_BS': '66',
'pdev-gps_FR': '66',
'pdev-gps_GE': '66',
'pdev-gps_GL': '66',
'pdev-gps_GR': '66',
'pdev-gps_JU': '66',
'pdev-gps_LU': '66',
'pdev-gps_NE': '66',
'pdev-gps_NW': '66',
'pdev-gps_OW': '66',
'pdev-gps_SG': '66',
'pdev-gps_SH': '66',
'pdev-gps_SO': '66',
'pdev-gps_SZ': '66',
'pdev-gps_TG': '66',
'pdev-gps_TI': '66',
'pdev-gps_UR': '66',
'pdev-gps_VD': '66',
'pdev-gps_VS': '66',
'pdev-gps_VSr': '66',
'pdev-gps_VSo': '66',
'pdev-gps_ZG': '66',
'pdev-gps_ZH': '66',
'pdev-jgps_CH': '66',
'pdev-kvp_SG': '1',
'pdev-lps_BE': '1',
'pdev-lps_BL': '1',
'pdev-lps_BS': '1',
'pdev-lps_FR': '1',
'pdev-lps_GE': '1',
'pdev-lps_JU': '1',
'pdev-lps_NE': '1',
'pdev-lps_SG': '1',
'pdev-lps_TI': '1',
'pdev-lps_VD': '1',
'pdev-lps_VS': '1',
'pdev-lps_ZH': '1',
'pdev-jlps_CH': '1',
'pdev-jlps_SO': '1',
'pdev-jlps_TI': '1',
'pdev-ldu_AG': '1',
'pdev-ldu_AR': '1',
'pdev-ldu_BE': '1',
'pdev-ldu_BL': '1',
'pdev-ldu_BS': '1',
'pdev-ldu_GR': '1',
'pdev-ldu_LU': '1',
'pdev-ldu_NE': '1',
'pdev-ldu_SG': '1',
'pdev-ldu_SH': '1',
'pdev-ldu_SO': '1',
'pdev-ldu_TG': '1',
'pdev-ldu_VD': '1',
'pdev-ldu_ZG': '1',
'pdev-ldu_ZH': '1',
'pdev-jldu_CH': '1',
'pdev-poch_SO': '2',
'pdev-poch_ZH': '2',
'pdev-pda_BE': '1',
'pdev-pda_BL': '1',
'pdev-pda_BS': '1',
'pdev-pda_GE': '1',
'pdev-pda_JU': '1',
'pdev-pda_NE': '1',
'pdev-pda_SG': '1',
'pdev-pda_TI': '1',
'pdev-pda_VD': '1',
'pdev-pda_ZH': '1',
'pdev-jpda_CH': '1',
'pdev-rep_AG': '1',
'pdev-rep_GE': '1',
'pdev-rep_NE': '1',
'pdev-rep_TG': '1',
'pdev-rep_VD': '1',
'pdev-rep_ZH': '1',
'pdev-sd_AG': '1',
'pdev-sd_BE': '1',
'pdev-sd_BL': '1',
'pdev-sd_BS': '1',
'pdev-sd_FR': '1',
'pdev-sd_GE': '1',
'pdev-sd_GR': '1',
'pdev-sd_LU': '1',
'pdev-sd_NE': '1',
'pdev-sd_SG': '1',
'pdev-sd_SO': '1',
'pdev-sd_TG': '1',
'pdev-sd_TI': '1',
'pdev-sd_VD': '1',
'pdev-sd_ZH': '1',
'pdev-jsd_CH': '1',
'pdev-sps_AG': '1',
'pdev-sps_AI': '1',
'pdev-sps_AR': '1',
'pdev-sps_BE': '1',
'pdev-sps_BL': '1',
'pdev-sps_BS': '1',
'pdev-sps_FR': '1',
'pdev-sps_GE': '1',
'pdev-sps_GL': '1',
'pdev-sps_GR': '1',
'pdev-sps_JU': '1',
'pdev-sps_LU': '1',
'pdev-sps_NE': '1',
'pdev-sps_NW': '1',
'pdev-sps_OW': '1',
'pdev-sps_SG': '1',
'pdev-sps_SH': '1',
'pdev-sps_SO': '1',
'pdev-sps_SZ': '1',
'pdev-sps_TG': '1',
'pdev-sps_TI': '1',
'pdev-sps_UR': '1',
'pdev-sps_VD': '1',
'pdev-sps_VS': '1',
'pdev-sps_VSr': '1',
'pdev-sps_VSo': '1',
'pdev-sps_ZG': '1',
'pdev-sps_ZH': '1',
'pdev-juso_CH': '1',
'pdev-juso_BE': '1',
'pdev-juso_GE': '1',
'pdev-juso_JU': '1',
'pdev-juso_TI': '1',
'pdev-juso_VS': '1',
'pdev-juso_ZH': '1',
'pdev-svp_AG': '3',
'pdev-svp_AI': '3',
'pdev-svp_AR': '3',
'pdev-svp_BE': '3',
'pdev-svp_BL': '3',
'pdev-svp_BS': '3',
'pdev-svp_FR': '3',
'pdev-svp_GE': '3',
'pdev-svp_GL': '3',
'pdev-svp_GR': '3',
'pdev-svp_JU': '3',
'pdev-svp_LU': '3',
'pdev-svp_NE': '3',
'pdev-svp_NW': '3',
'pdev-svp_OW': '3',
'pdev-svp_SG': '3',
'pdev-svp_SH': '3',
'pdev-svp_SO': '3',
'pdev-svp_SZ': '3',
'pdev-svp_TG': '3',
'pdev-svp_TI': '3',
'pdev-svp_UR': '3',
'pdev-svp_VD': '3',
'pdev-svp_VS': '3',
'pdev-svp_VSr': '3',
'pdev-svp_VSo': '3',
'pdev-svp_ZG': '3',
'pdev-svp_ZH': '3',
'pdev-jsvp_CH': '3',
'pdev-jsvp_AG': '3',
'pdev-jsvp_BE': '3',
'pdev-jsvp_GE': '3',
'pdev-jsvp_SH': '3',
'pdev-jsvp_UR': '3',
'pdev-jsvp_ZH': '3',
'pdev-sgb_AG': '1',
'pdev-sgb_JU': '1',
'pdev-sgb_VS': '1',
'pdev-sgv_AG': '1',
'pdev-sgv_BS': '1',
'pdev-sgv_SH': '1',
'pdev-vpod_GE': '1',
'pdev-vpod_VD': '1',
'pdev-mitte_frauen': '2',
'pdev-mitte_AG': '2',
'pdev-mitte_AI': '2',
'pdev-mitte_AR': '2',
'pdev-mitte_BE': '2',
'pdev-mitte_BL': '2',
'pdev-mitte_BS': '2',
'pdev-mitte_FR': '2',
'pdev-mitte_GE': '2',
'pdev-mitte_GL': '2',
'pdev-mitte_GR': '2',
'pdev-mitte_JU': '2',
'pdev-mitte_LU': '2',
'pdev-mitte_NE': '2',
'pdev-mitte_NW': '2',
'pdev-mitte_OW': '2',
'pdev-mitte_SG': '2',
'pdev-mitte_SH': '2',
'pdev-mitte_SO': '2',
'pdev-mitte_SZ': '2',
'pdev-mitte_TG': '2',
'pdev-mitte_TI': '2',
'pdev-mitte_UR': '2',
'pdev-mitte_VD': '2',
'pdev-mitte_VS': '2',
'pdev-mitte_VSr': '2',
'pdev-mitte_VSo': '2',
'pdev-mitte_ZG': '2',
'pdev-mitte_ZH': '2',
'pdev-jmitte_CH': '2',
'pdev-jmitte_AG': '2',
'pdev-jmitte_AI': '2',
'pdev-jmitte_AR': '2',
'pdev-jmitte_BE': '2',
'pdev-jmitte_BL': '2',
'pdev-jmitte_BS': '2',
'pdev-jmitte_FR': '2',
'pdev-jmitte_GE': '2',
'pdev-jmitte_GL': '2',
'pdev-jmitte_GR': '2',
'pdev-jmitte_JU': '2',
'pdev-jmitte_LU': '2',
'pdev-jmitte_NE': '2',
'pdev-jmitte_NW': '2',
'pdev-jmitte_OW': '2',
'pdev-jmitte_SG': '2',
'pdev-jmitte_SH': '2',
'pdev-jmitte_SO': '2',
'pdev-jmitte_SZ': '2',
'pdev-jmitte_TG': '2',
'pdev-jmitte_TI': '2',
'pdev-jmitte_UR': '2',
'pdev-jmitte_VD': '2',
'pdev-jmitte_VS': '2',
'pdev-jmitte_VSr': '2',
'pdev-jmitte_VSo': '2',
'pdev-jmitte_ZG': '2',
'pdev-jmitte_ZH': '2',
'nr-wahl': '1990',
'w-fdp': '1,1',
'w-cvp': '2,1',
'w-sp': '3,1',
'w-svp': '4,1',
'w-lps': '5,1',
'w-ldu': '6,1',
'w-evp': '7,1',
'w-csp': '8,1',
'w-pda': '9,1',
'w-poch': '10,1',
'w-gps': '11,1',
'w-sd': '12,1',
'w-rep': '13,1',
'w-edu': '14,1',
'w-fps': '15,1',
'w-lega': '16,1',
'w-kvp': '17,1',
'w-glp': '18,1',
'w-bdp': '19,1',
'w-mcg': '20,2',
'w-mitte': '20,1',
'w-ubrige': '21,2',
'ja-lager': '22,2',
'nein-lager': '23,2',
'keinepar-summe': '25,2',
'leer-summe': '26,2',
'freigabe-summe': '27,2',
'neutral-summe': '24,2',
'unbekannt-summe': '28,2',
'urheber': 'Initiator',
'anneepolitique': 'anneepolitique',
'bfsmap-de': 'map de',
'bfsmap-fr': 'map fr',
'poster_ja_mfg': (
'https://museum.ch/objects/1 '
'https://museum.ch/objects/2'
),
'poster_nein_mfg': (
'https://museum.ch/objects/3 '
'https://museum.ch/objects/4'
),
'poster_ja_sa': (
'https://sozialarchiv.ch/objects/1 '
'https://sozialarchiv.ch/objects/2'
),
'poster_nein_sa': (
'https://sozialarchiv.ch/objects/3 '
'https://sozialarchiv.ch/objects/4'
),
'nach_cockpit_d': 'https://post.vote.poll/de',
'nach_cockpit_f': 'https://post.vote.poll/fr',
'nach_cockpit_e': 'https://post.vote.poll/en',
'inserate-total': '1001',
'inserate-jaanteil': '10,06',
'mediares-tot': '1007',
'mediaton-tot': '10,1',
}
assert csv == expected
file = BytesIO()
votes.export_xlsx(file)
file.seek(0)
workbook = load_workbook(file)
sheet = workbook['DATA']
xlsx = dict(
zip(
[cell.value for cell in tuple(sheet.rows)[0]],
[cell.value for cell in tuple(sheet.rows)[1]]
)
)
expected = {
'anr': 100.1,
'datum': datetime(1990, 6, 2),
'titel_off_d': 'Vote DE',
'titel_off_f': 'Vote FR',
'titel_kurz_d': 'V D',
'titel_kurz_f': 'V F',
'titel_kurz_e': 'V E',
'kurzbetitel': 'Kurzbeschreibung',
'stichwort': 'Keyword',
'rechtsform': 1.0,
'pa-iv': 1.0,
'd1e1': 4.0,
'd1e2': 4.2,
'd1e3': 4.21,
'd2e1': 10.0,
'd2e2': 10.3,
'd2e3': 10.35,
'd3e1': 10.0,
'd3e2': 10.3,
'd3e3': 10.33,
'volk': 1.0,
'stand': 1.0,
'annahme': 1.0,
'bet': 20.01,
'volkja-proz': 40.01,
'kt-ja': 1.5,
'kt-nein': 24.5,
'ag-annahme': 0.0,
'ai-annahme': 0.0,
'ar-annahme': 0.0,
'be-annahme': 0.0,
'bkchrono-de': 'https://bk.chrono/de',
'bkchrono-fr': 'https://bk.chrono/fr',
'bkresults-de': 'https://bk.results/de',
'bkresults-fr': 'https://bk.results/fr',
'bl-annahme': 0.0,
'bs-annahme': 0.0,
'curiavista-de': 'https://curia.vista/de',
'curiavista-fr': 'https://curia.vista/fr',
'easyvideo_de': 'https://easy.vote/de',
'easyvideo_fr': 'https://easy.vote/fr',
'info_br-de': 'https://federal.council/de',
'info_br-fr': 'https://federal.council/fr',
'info_br-en': 'https://federal.council/en',
'info_dep-de': 'https://federal.departement/de',
'info_dep-fr': 'https://federal.departement/fr',
'info_dep-en': 'https://federal.departement/en',
'info_amt-de': 'https://federal.office/de',
'info_amt-fr': 'https://federal.office/fr',
'info_amt-en': 'https://federal.office/en',
'fr-annahme': 0.0,
'ge-annahme': 0.0,
'gl-annahme': 0.0,
'gr-annahme': 0.0,
'ju-annahme': 0.0,
'lu-annahme': 0.0,
'ne-annahme': 0.0,
'nw-annahme': 0.0,
'ow-annahme': 0.0,
'sg-annahme': 0.0,
'sh-annahme': 0.0,
'so-annahme': 0.0,
'sz-annahme': 0.0,
'tg-annahme': 0.0,
'ti-annahme': 0.0,
'ur-annahme': 0.0,
'vd-annahme': 0.0,
'vs-annahme': 0.0,
'zg-annahme': 0.0,
'zh-annahme': 0.0,
'gesch_nr': '24.557',
'br-pos': 1.0,
'bv-pos': 1.0,
'nr-pos': 1.0,
'nrja': 10.0,
'nrnein': 20.0,
'sr-pos': 1.0,
'srja': 30.0,
'srnein': 40.0,
'dauer_bv': 30.0,
'i-dauer_samm': 32.0,
'fr-dauer_samm': 35.0,
'unter_g': 40.0,
'p-fdp': 1.0,
'p-cvp': 1.0,
'p-sps': 1.0,
'p-svp': 1.0,
'p-lps': 2.0,
'p-ldu': 2.0,
'p-evp': 2.0,
'p-ucsp': 3.0,
'p-pda': 3.0,
'p-poch': 3.0,
'p-gps': 4.0,
'p-sd': 4.0,
'p-rep': 4.0,
'p-edu': 5.0,
'p-fps': 5.0,
'p-lega': 5.0,
'p-kvp': 66.0,
'p-glp': 66.0,
'p-bdp': None,
'p-mcg': 9999.0,
'p-mitte': 9999.0,
'p-sav': 1.0,
'p-eco': 2.0,
'p-sgv': 3.0,
'p-sbv': 3.0,
'p-sgb': 3.0,
'p-travs': 3.0,
'p-vsa': 9999.0,
'p-vsa': 9999.0,
'p-vpod': 9999.0,
'p-ssv': 9999.0,
'p-gem': 9999.0,
'p-kdk': 9999.0,
'p-vdk': 9999.0,
'p-endk': 9999.0,
'p-fdk': 9999.0,
'p-edk': 9999.0,
'p-gdk': 9999.0,
'p-ldk': 9999.0,
'p-sodk': 9999.0,
'p-kkjpd': 9999.0,
'p-bpuk': 9999.0,
'p-sbk': 9999.0,
'p-acs': 9999.0,
'p-tcs': 9999.0,
'p-vcs': 9999.0,
'p-voev': 9999.0,
'p-others_yes': 'Pro Velo',
'p-others_no': 'Biosuisse',
'p-others_free': 'Pro Natura, Greenpeace',
'p-others_counterp': 'Pro Juventute',
'p-others_init': 'Pro Senectute',
'pdev-bdp_AG': 1.0,
'pdev-bdp_AI': 1.0,
'pdev-bdp_AR': 1.0,
'pdev-bdp_BE': 1.0,
'pdev-bdp_BL': 1.0,
'pdev-bdp_BS': 1.0,
'pdev-bdp_FR': 1.0,
'pdev-bdp_GE': 1.0,
'pdev-bdp_GL': 1.0,
'pdev-bdp_GR': 1.0,
'pdev-bdp_JU': 1.0,
'pdev-bdp_LU': 1.0,
'pdev-bdp_NE': 1.0,
'pdev-bdp_NW': 1.0,
'pdev-bdp_OW': 1.0,
'pdev-bdp_SG': 1.0,
'pdev-bdp_SH': 1.0,
'pdev-bdp_SO': 1.0,
'pdev-bdp_SZ': 1.0,
'pdev-bdp_TG': 1.0,
'pdev-bdp_TI': 1.0,
'pdev-bdp_UR': 1.0,
'pdev-bdp_VD': 1.0,
'pdev-bdp_VS': 1.0,
'pdev-bdp_VSr': 1.0,
'pdev-bdp_VSo': 1.0,
'pdev-bdp_ZG': 1.0,
'pdev-bdp_ZH': 1.0,
'pdev-jbdp_CH': 1.0,
'pdev-csp_FR': 1.0,
'pdev-csp_GR': 1.0,
'pdev-csp_JU': 1.0,
'pdev-csp_LU': 1.0,
'pdev-csp_OW': 1.0,
'pdev-csp_SG': 1.0,
'pdev-csp_VS': 1.0,
'pdev-csp_VSr': 1.0,
'pdev-csp_VSo': 1.0,
'pdev-csp_ZH': 1.0,
'pdev-cvp_frauen': 1.0,
'pdev-cvp_AG': 1.0,
'pdev-cvp_AI': 1.0,
'pdev-cvp_AR': 1.0,
'pdev-cvp_BE': 1.0,
'pdev-cvp_BL': 1.0,
'pdev-cvp_BS': 1.0,
'pdev-cvp_FR': 1.0,
'pdev-cvp_GE': 1.0,
'pdev-cvp_GL': 1.0,
'pdev-cvp_GR': 1.0,
'pdev-cvp_JU': 1.0,
'pdev-cvp_LU': 1.0,
'pdev-cvp_NE': 1.0,
'pdev-cvp_NW': 1.0,
'pdev-cvp_OW': 1.0,
'pdev-cvp_SG': 1.0,
'pdev-cvp_SH': 1.0,
'pdev-cvp_SO': 1.0,
'pdev-cvp_SZ': 1.0,
'pdev-cvp_TG': 1.0,
'pdev-cvp_TI': 1.0,
'pdev-cvp_UR': 1.0,
'pdev-cvp_VD': 1.0,
'pdev-cvp_VS': 1.0,
'pdev-cvp_VSr': 1.0,
'pdev-cvp_VSo': 1.0,
'pdev-cvp_ZG': 1.0,
'pdev-cvp_ZH': 1.0,
'pdev-jcvp_CH': 1.0,
'pdev-jcvp_AG': 1.0,
'pdev-jcvp_BE': 1.0,
'pdev-jcvp_GR': 1.0,
'pdev-jcvp_LU': 1.0,
'pdev-jcvp_SO': 1.0,
'pdev-jcvp_ZH': 1.0,
'pdev-edu_AG': 1.0,
'pdev-edu_AI': 1.0,
'pdev-edu_AR': 1.0,
'pdev-edu_BE': 1.0,
'pdev-edu_BL': 1.0,
'pdev-edu_BS': 1.0,
'pdev-edu_FR': 1.0,
'pdev-edu_GE': 1.0,
'pdev-edu_GL': 1.0,
'pdev-edu_GR': 1.0,
'pdev-edu_JU': 1.0,
'pdev-edu_LU': 1.0,
'pdev-edu_NE': 1.0,
'pdev-edu_NW': 1.0,
'pdev-edu_OW': 1.0,
'pdev-edu_SG': 1.0,
'pdev-edu_SH': 1.0,
'pdev-edu_SO': 1.0,
'pdev-edu_SZ': 1.0,
'pdev-edu_TG': 1.0,
'pdev-edu_TI': 1.0,
'pdev-edu_UR': 1.0,
'pdev-edu_VD': 1.0,
'pdev-edu_VS': 1.0,
'pdev-edu_VSr': 1.0,
'pdev-edu_VSo': 1.0,
'pdev-edu_ZG': 1.0,
'pdev-edu_ZH': 1.0,
'pdev-evp_AG': 1.0,
'pdev-evp_AI': 1.0,
'pdev-evp_AR': 1.0,
'pdev-evp_BE': 1.0,
'pdev-evp_BL': 1.0,
'pdev-evp_BS': 1.0,
'pdev-evp_FR': 1.0,
'pdev-evp_GE': 1.0,
'pdev-evp_GL': 1.0,
'pdev-evp_GR': 1.0,
'pdev-evp_JU': 1.0,
'pdev-evp_LU': 1.0,
'pdev-evp_NE': 1.0,
'pdev-evp_NW': 1.0,
'pdev-evp_OW': 1.0,
'pdev-evp_SG': 1.0,
'pdev-evp_SH': 1.0,
'pdev-evp_SO': 1.0,
'pdev-evp_SZ': 1.0,
'pdev-evp_TG': 1.0,
'pdev-evp_TI': 1.0,
'pdev-evp_UR': 1.0,
'pdev-evp_VD': 1.0,
'pdev-evp_VS': 1.0,
'pdev-evp_ZG': 1.0,
'pdev-evp_ZH': 1.0,
'pdev-jevp_CH': 1.0,
'pdev-fdp_Frauen': 1.0,
'pdev-fdp_AG': 1.0,
'pdev-fdp_AI': 1.0,
'pdev-fdp_AR': 1.0,
'pdev-fdp_BE': 1.0,
'pdev-fdp_BL': 1.0,
'pdev-fdp_BS': 1.0,
'pdev-fdp_FR': 1.0,
'pdev-fdp_GE': 1.0,
'pdev-fdp_GL': 1.0,
'pdev-fdp_GR': 1.0,
'pdev-fdp_JU': 1.0,
'pdev-fdp_LU': 1.0,
'pdev-fdp_NE': 1.0,
'pdev-fdp_NW': 1.0,
'pdev-fdp_OW': 1.0,
'pdev-fdp_SG': 1.0,
'pdev-fdp_SH': 1.0,
'pdev-fdp_SO': 1.0,
'pdev-fdp_SZ': 1.0,
'pdev-fdp_TG': 1.0,
'pdev-fdp_TI': 1.0,
'pdev-fdp_UR': 1.0,
'pdev-fdp_VD': 1.0,
'pdev-fdp_VS': 1.0,
'pdev-fdp_VSr': 1.0,
'pdev-fdp_Vso': 1.0,
'pdev-fdp_ZG': 1.0,
'pdev-fdp_ZH': 1.0,
'pdev-jfdp_CH': 1.0,
'pdev-jfdp_AG': 1.0,
'pdev-jfdp_BL': 1.0,
'pdev-jfdp_FR': 1.0,
'pdev-jfdp_GR': 1.0,
'pdev-jfdp_JU': 1.0,
'pdev-jfdp_LU': 1.0,
'pdev-jfdp_SH': 1.0,
'pdev-jfdp_TI': 1.0,
'pdev-jfdp_VD': 1.0,
'pdev-jfdp_ZH': 1.0,
'pdev-fps_AG': 1.0,
'pdev-fps_AI': 1.0,
'pdev-fps_BE': 1.0,
'pdev-fps_BL': 1.0,
'pdev-fps_BS': 1.0,
'pdev-fps_SG': 1.0,
'pdev-fps_SH': 1.0,
'pdev-fps_SO': 1.0,
'pdev-fps_TG': 1.0,
'pdev-fps_ZH': 1.0,
'pdev-glp_AG': 1.0,
'pdev-glp_AI': 1.0,
'pdev-glp_AR': 1.0,
'pdev-glp_BE': 1.0,
'pdev-glp_BL': 1.0,
'pdev-glp_BS': 1.0,
'pdev-glp_FR': 1.0,
'pdev-glp_GE': 1.0,
'pdev-glp_GL': 1.0,
'pdev-glp_GR': 1.0,
'pdev-glp_JU': 1.0,
'pdev-glp_LU': 1.0,
'pdev-glp_NE': 1.0,
'pdev-glp_NW': 1.0,
'pdev-glp_OW': 1.0,
'pdev-glp_SG': 1.0,
'pdev-glp_SH': 1.0,
'pdev-glp_SO': 1.0,
'pdev-glp_SZ': 1.0,
'pdev-glp_TG': 1.0,
'pdev-glp_TI': 1.0,
'pdev-glp_UR': 1.0,
'pdev-glp_VD': 1.0,
'pdev-glp_VS': 1.0,
'pdev-glp_VSr': 1.0,
'pdev-glp_VSo': 1.0,
'pdev-glp_ZG': 1.0,
'pdev-glp_ZH': 1.0,
'pdev-jglp_CH': 1.0,
'pdev-gps_AG': 66.0,
'pdev-gps_AI': 66.0,
'pdev-gps_AR': 66.0,
'pdev-gps_BE': 66.0,
'pdev-gps_BL': 66.0,
'pdev-gps_BS': 66.0,
'pdev-gps_FR': 66.0,
'pdev-gps_GE': 66.0,
'pdev-gps_GL': 66.0,
'pdev-gps_GR': 66.0,
'pdev-gps_JU': 66.0,
'pdev-gps_LU': 66.0,
'pdev-gps_NE': 66.0,
'pdev-gps_NW': 66.0,
'pdev-gps_OW': 66.0,
'pdev-gps_SG': 66.0,
'pdev-gps_SH': 66.0,
'pdev-gps_SO': 66.0,
'pdev-gps_SZ': 66.0,
'pdev-gps_TG': 66.0,
'pdev-gps_TI': 66.0,
'pdev-gps_UR': 66.0,
'pdev-gps_VD': 66.0,
'pdev-gps_VS': 66.0,
'pdev-gps_VSr': 66.0,
'pdev-gps_VSo': 66.0,
'pdev-gps_ZG': 66.0,
'pdev-gps_ZH': 66.0,
'pdev-jgps_CH': 66.0,
'pdev-kvp_SG': 1.0,
'pdev-lps_BE': 1.0,
'pdev-lps_BL': 1.0,
'pdev-lps_BS': 1.0,
'pdev-lps_FR': 1.0,
'pdev-lps_GE': 1.0,
'pdev-lps_JU': 1.0,
'pdev-lps_NE': 1.0,
'pdev-lps_SG': 1.0,
'pdev-lps_TI': 1.0,
'pdev-lps_VD': 1.0,
'pdev-lps_VS': 1.0,
'pdev-lps_ZH': 1.0,
'pdev-jlps_CH': 1.0,
'pdev-jlps_SO': 1.0,
'pdev-jlps_TI': 1.0,
'pdev-ldu_AG': 1.0,
'pdev-ldu_AR': 1.0,
'pdev-ldu_BE': 1.0,
'pdev-ldu_BL': 1.0,
'pdev-ldu_BS': 1.0,
'pdev-ldu_GR': 1.0,
'pdev-ldu_LU': 1.0,
'pdev-ldu_NE': 1.0,
'pdev-ldu_SG': 1.0,
'pdev-ldu_SH': 1.0,
'pdev-ldu_SO': 1.0,
'pdev-ldu_TG': 1.0,
'pdev-ldu_VD': 1.0,
'pdev-ldu_ZG': 1.0,
'pdev-ldu_ZH': 1.0,
'pdev-jldu_CH': 1.0,
'pdev-poch_SO': 2.0,
'pdev-poch_ZH': 2.0,
'pdev-pda_BE': 1.0,
'pdev-pda_BL': 1.0,
'pdev-pda_BS': 1.0,
'pdev-pda_GE': 1.0,
'pdev-pda_JU': 1.0,
'pdev-pda_NE': 1.0,
'pdev-pda_SG': 1.0,
'pdev-pda_TI': 1.0,
'pdev-pda_VD': 1.0,
'pdev-pda_ZH': 1.0,
'pdev-jpda_CH': 1.0,
'pdev-rep_AG': 1.0,
'pdev-rep_GE': 1.0,
'pdev-rep_NE': 1.0,
'pdev-rep_TG': 1.0,
'pdev-rep_VD': 1.0,
'pdev-rep_ZH': 1.0,
'pdev-sd_AG': 1.0,
'pdev-sd_BE': 1.0,
'pdev-sd_BL': 1.0,
'pdev-sd_BS': 1.0,
'pdev-sd_FR': 1.0,
'pdev-sd_GE': 1.0,
'pdev-sd_GR': 1.0,
'pdev-sd_LU': 1.0,
'pdev-sd_NE': 1.0,
'pdev-sd_SG': 1.0,
'pdev-sd_SO': 1.0,
'pdev-sd_TG': 1.0,
'pdev-sd_TI': 1.0,
'pdev-sd_VD': 1.0,
'pdev-sd_ZH': 1.0,
'pdev-jsd_CH': 1.0,
'pdev-sps_AG': 1.0,
'pdev-sps_AI': 1.0,
'pdev-sps_AR': 1.0,
'pdev-sps_BE': 1.0,
'pdev-sps_BL': 1.0,
'pdev-sps_BS': 1.0,
'pdev-sps_FR': 1.0,
'pdev-sps_GE': 1.0,
'pdev-sps_GL': 1.0,
'pdev-sps_GR': 1.0,
'pdev-sps_JU': 1.0,
'pdev-sps_LU': 1.0,
'pdev-sps_NE': 1.0,
'pdev-sps_NW': 1.0,
'pdev-sps_OW': 1.0,
'pdev-sps_SG': 1.0,
'pdev-sps_SH': 1.0,
'pdev-sps_SO': 1.0,
'pdev-sps_SZ': 1.0,
'pdev-sps_TG': 1.0,
'pdev-sps_TI': 1.0,
'pdev-sps_UR': 1.0,
'pdev-sps_VD': 1.0,
'pdev-sps_VS': 1.0,
'pdev-sps_VSr': 1.0,
'pdev-sps_VSo': 1.0,
'pdev-sps_ZG': 1.0,
'pdev-sps_ZH': 1.0,
'pdev-juso_CH': 1.0,
'pdev-juso_BE': 1.0,
'pdev-juso_GE': 1.0,
'pdev-juso_JU': 1.0,
'pdev-juso_TI': 1.0,
'pdev-juso_VS': 1.0,
'pdev-juso_ZH': 1.0,
'pdev-svp_AG': 3.0,
'pdev-svp_AI': 3.0,
'pdev-svp_AR': 3.0,
'pdev-svp_BE': 3.0,
'pdev-svp_BL': 3.0,
'pdev-svp_BS': 3.0,
'pdev-svp_FR': 3.0,
'pdev-svp_GE': 3.0,
'pdev-svp_GL': 3.0,
'pdev-svp_GR': 3.0,
'pdev-svp_JU': 3.0,
'pdev-svp_LU': 3.0,
'pdev-svp_NE': 3.0,
'pdev-svp_NW': 3.0,
'pdev-svp_OW': 3.0,
'pdev-svp_SG': 3.0,
'pdev-svp_SH': 3.0,
'pdev-svp_SO': 3.0,
'pdev-svp_SZ': 3.0,
'pdev-svp_TG': 3.0,
'pdev-svp_TI': 3.0,
'pdev-svp_UR': 3.0,
'pdev-svp_VD': 3.0,
'pdev-svp_VS': 3.0,
'pdev-svp_VSr': 3.0,
'pdev-svp_VSo': 3.0,
'pdev-svp_ZG': 3.0,
'pdev-svp_ZH': 3.0,
'pdev-jsvp_CH': 3.0,
'pdev-jsvp_AG': 3.0,
'pdev-jsvp_BE': 3.0,
'pdev-jsvp_GE': 3.0,
'pdev-jsvp_SH': 3.0,
'pdev-jsvp_UR': 3.0,
'pdev-jsvp_ZH': 3.0,
'pdev-sgb_AG': 1.0,
'pdev-sgb_JU': 1.0,
'pdev-sgb_VS': 1.0,
'pdev-sgv_AG': 1.0,
'pdev-sgv_BS': 1.0,
'pdev-sgv_SH': 1.0,
'pdev-vpod_GE': 1.0,
'pdev-vpod_VD': 1.0,
'pdev-mitte_frauen': 2.0,
'pdev-mitte_AG': 2.0,
'pdev-mitte_AI': 2.0,
'pdev-mitte_AR': 2.0,
'pdev-mitte_BE': 2.0,
'pdev-mitte_BL': 2.0,
'pdev-mitte_BS': 2.0,
'pdev-mitte_FR': 2.0,
'pdev-mitte_GE': 2.0,
'pdev-mitte_GL': 2.0,
'pdev-mitte_GR': 2.0,
'pdev-mitte_JU': 2.0,
'pdev-mitte_LU': 2.0,
'pdev-mitte_NE': 2.0,
'pdev-mitte_NW': 2.0,
'pdev-mitte_OW': 2.0,
'pdev-mitte_SG': 2.0,
'pdev-mitte_SH': 2.0,
'pdev-mitte_SO': 2.0,
'pdev-mitte_SZ': 2.0,
'pdev-mitte_TG': 2.0,
'pdev-mitte_TI': 2.0,
'pdev-mitte_UR': 2.0,
'pdev-mitte_VD': 2.0,
'pdev-mitte_VS': 2.0,
'pdev-mitte_VSr': 2.0,
'pdev-mitte_VSo': 2.0,
'pdev-mitte_ZG': 2.0,
'pdev-mitte_ZH': 2.0,
'pdev-jmitte_CH': 2.0,
'pdev-jmitte_AG': 2.0,
'pdev-jmitte_AI': 2.0,
'pdev-jmitte_AR': 2.0,
'pdev-jmitte_BE': 2.0,
'pdev-jmitte_BL': 2.0,
'pdev-jmitte_BS': 2.0,
'pdev-jmitte_FR': 2.0,
'pdev-jmitte_GE': 2.0,
'pdev-jmitte_GL': 2.0,
'pdev-jmitte_GR': 2.0,
'pdev-jmitte_JU': 2.0,
'pdev-jmitte_LU': 2.0,
'pdev-jmitte_NE': 2.0,
'pdev-jmitte_NW': 2.0,
'pdev-jmitte_OW': 2.0,
'pdev-jmitte_SG': 2.0,
'pdev-jmitte_SH': 2.0,
'pdev-jmitte_SO': 2.0,
'pdev-jmitte_SZ': 2.0,
'pdev-jmitte_TG': 2.0,
'pdev-jmitte_TI': 2.0,
'pdev-jmitte_UR': 2.0,
'pdev-jmitte_VD': 2.0,
'pdev-jmitte_VS': 2.0,
'pdev-jmitte_VSr': 2.0,
'pdev-jmitte_VSo': 2.0,
'pdev-jmitte_ZG': 2.0,
'pdev-jmitte_ZH': 2.0,
'nr-wahl': 1990.0,
'w-fdp': 1.1,
'w-cvp': 2.1,
'w-sp': 3.1,
'w-svp': 4.1,
'w-lps': 5.1,
'w-ldu': 6.1,
'w-evp': 7.1,
'w-csp': 8.1,
'w-pda': 9.1,
'w-poch': 10.1,
'w-gps': 11.1,
'w-sd': 12.1,
'w-rep': 13.1,
'w-edu': 14.1,
'w-fps': 15.1,
'w-lega': 16.1,
'w-kvp': 17.1,
'w-glp': 18.1,
'w-bdp': 19.1,
'w-mcg': 20.2,
'w-mitte': 20.1,
'w-ubrige': 21.2,
'ja-lager': 22.2,
'nein-lager': 23.2,
'keinepar-summe': 25.2,
'leer-summe': 26.2,
'freigabe-summe': 27.2,
'neutral-summe': 24.2,
'unbekannt-summe': 28.2,
'urheber': 'Initiator',
'anneepolitique': 'anneepolitique',
'bfsmap-de': 'map de',
'bfsmap-fr': 'map fr',
'poster_ja_mfg': (
'https://museum.ch/objects/1 '
'https://museum.ch/objects/2'
),
'poster_nein_mfg': (
'https://museum.ch/objects/3 '
'https://museum.ch/objects/4'
),
'poster_ja_sa': (
'https://sozialarchiv.ch/objects/1 '
'https://sozialarchiv.ch/objects/2'
),
'poster_nein_sa': (
'https://sozialarchiv.ch/objects/3 '
'https://sozialarchiv.ch/objects/4'
),
'nach_cockpit_d': 'https://post.vote.poll/de',
'nach_cockpit_f': 'https://post.vote.poll/fr',
'nach_cockpit_e': 'https://post.vote.poll/en',
'inserate-total': 1001,
'inserate-jaanteil': 10.06,
'mediares-tot': 1007,
'mediaton-tot': 10.10,
}
assert xlsx == expected
assert csv.keys() == xlsx.keys()
|
from django.contrib import admin
from authors.apps.reports.models import ReportArticle
# Register your models here.
admin.site.register(ReportArticle)
|
#!/usr/bin/python2
#-*- coding:utf-8 -*-
import MySQLdb
import logging
from pprint import pprint
from datetime import timedelta
from datetime import datetime
logger = logging.getLogger(__name__)
def get_conn(host,user,passwd,db):
try:
conn = MySQLdb.connect(host=host,user=user,passwd=passwd,db=db)
except Exception as e:
print "connection error",e
raise
return conn
def init_cursor(conn):
cursor = conn.cursor()
#字典游标
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
return cursor
def fetchall(sql,cursor,data=None):
cursor.execute(sql,data)
all =cursor.fetchall()
return all
def fetchone(sql,cursor,data=None):
cursor.execute(sql,data)
one =cursor.fetchone()
return one
def processlist(cursor):
sql ="""show processlist"""
all_list = fetchall(sql,cursor)
return all_list
if __name__ == '__main__':
now = datetime.now()
print now
try:
conn = get_conn('localhost','root','abc123!!','kx')
except Exception as e:
conn = get_conn('localhost','root','mrmuxl','kx')
cursor = init_cursor(conn)
processlist = processlist(cursor)
if processlist:
for i in processlist:
print i
cursor.close()
print "cursor close"
conn.commit()
conn.close()
print "conn close"
|
import os.path
from jinja2 import Environment, FileSystemLoader
import py.path
def generate_static(tmpl_path, out_path):
jinja_env = Environment(loader=FileSystemLoader(str(tmpl_path)))
site_path = tmpl_path.join('site')
if os.path.exists(str(out_path)):
out_path.remove()
out_path.mkdir()
for subfile in reversed(list(site_path.visit())):
out_file_path = out_path.join(subfile.relto(site_path))
if os.path.isdir(str(subfile)):
out_file_path.mkdir()
continue
tmpl_name = str(subfile.relto(tmpl_path))
out_file_data = jinja_env.get_template(tmpl_name).render()
with out_file_path.open('wb') as f:
f.write(out_file_data.encode('utf-8'))
def main():
import sys
tmpl_path_str, out_path_str = sys.argv[1:]
generate_static(py.path.local(tmpl_path_str),
py.path.local(out_path_str))
if __name__ == '__main__':
main()
|
from openspending.ui.test import helpers as h
h.skip("Not yet implemented.")
|
class Book:
def __init__(self, book_id, book_name, details, author):
self.book_id = book_id
self.book_name = book_name
self.details = details
self.author = author |
import os
import utils
import nltk
from loguru import logger
from pathlib import Path
class Glove():
def __init__(self, dataset_name) -> None:
input_file_path = 'input/' + dataset_name + '/raw.txt'
if not os.path.isfile(input_file_path):
raise Exception('Could not find ' + input_file_path)
self.dataset_name = dataset_name
self.input_file_path = input_file_path
self.dataset_path = 'input/' + dataset_name
self.corpus_file_path = self.dataset_path + '/corpus_for_glove.txt'
if not os.path.isfile('lib/glove/build/glove'):
self.setup_glove()
def setup_glove(self):
os.system('cd lib && git clone https://github.com/stanfordnlp/glove && cd glove && make')
os.system('chmod +x lib/glove-for-rhymes.sh')
def generate_vectors(self, vectors_file_path):
if not os.path.isfile(self.corpus_file_path):
self.__generate_corpus_file()
self.write_vectors(vectors_file_path)
def __generate_corpus_file(self):
logger.info('Reading corpus file and preprocessing')
lines = utils.read_file(self.input_file_path).split("\n")
last_document_lines = []
documents = []
for line in lines:
if not line.strip():
document = " ".join(last_document_lines)
documents.append(document)
last_document_lines = []
continue
if self.dataset_name == 'quran':
words = [word.lower() for word in nltk.word_tokenize(line) if word]
else:
words = [word.lower() for word in nltk.word_tokenize(line) if word.isalpha()]
last_document_lines.append(" ".join(words))
if len(last_document_lines):
document = " ".join(last_document_lines)
documents.append(document)
utils.write_file(self.corpus_file_path, "\n\n".join(documents))
logger.info('Finished. Saving sentences for later use in Glove')
return True
def write_vectors(self, vectors_file_path):
input_dir = os.path.realpath(self.dataset_path)
output_dir = os.path.realpath(os.path.dirname(vectors_file_path))
script_path = 'cd lib && ./glove-for-rhymes.sh ' + input_dir + ' ' + output_dir
os.system(script_path)
|
import numpy as np
import matplotlib.pyplot as plt
x=[]
y=[]
bigx=[]
bigy=[]
def func1(r1,r2,d):
global x,y
x=list(np.arange(r1,r2,d))
y=list(range(len(x)))
for i in range(len(x)):
z=[]
s=0.5
for j in range(70):
s=x[i]*s*(1-s)
if j>=50:
z.append(s)
y[i]=list(set(z))
return x,y
def func2():
global x,y,bigx,bigy
bigx=[]
bigy=[]
for i in range(len(x)):
n=len(y[i])
if n<=16:
a=[x[i]]*n
b=y[i]
bigx.extend(a)
bigy.extend(b)
plt.plot(bigx,bigy,'b.',ms=5)
plt.show()
|
import webapp2
import os
import jinja2
import json
import datetime
import time
import urllib
import urllib2
import soundcloud
import sys
import random
import math
from google.appengine.ext import db
from google.appengine.api import memcache, urlfetch
from google.appengine.api.urlfetch import fetch
from secret import client_id, client_secret
template_dir = os.path.dirname(__file__)
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
#handler for the jinja2 env. allows us to use templates! c/p this code all you want for other projects
client = soundcloud.Client(client_id=client_id, client_secret=client_secret)
segmentLen = 3
isRequesting = False #global var
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template,**kw))
class SegmentHandler(Handler):
def get(self):
self.write('hello world')
class ReqJSON(db.Model):
genre = db.StringProperty(required = True)
json_str = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
class RandomHandler(Handler):
def asyncFetch(self, urlToFetch, genre):
'''
async requests for concurrent requests:
store in memcache a boolean value for whether it's requesting or not
if not requesting: send soundcloud api request and set bool var to true, callback function is to update the memcache and database with new info and set bool var to false
meanwhile, retrieve info from memcache and return it
'''
isRequesting = memcache.get('isRequesting')
if not isRequesting:
memcache.set('isRequesting', True)
#async request here, set it equal to req
def handle_result(rpc):
result = rpc.get_result()
# ... Do something with result...
# Use a helper function to define the scope of the callback.
def create_callback(rpc):
return lambda: handle_result(rpc)
rpc = urlfetch.create_rpc()
rpc.callback = create_callback(rpc)
urlfetch.make_fetch_call(rpc, urlToFetch)
#retrieve from memcache and return
#comments = json.loads(fetch(link, deadline=200).content) #retrieve comments
def get(self, genre1):
genre = urllib.quote(genre1) #to make sure it's url valid
if '/' in genre:
genre, sortOption = genre.split('/')
else:
sortOption = 'random' #random by default
arr = []
comments = []
url = 'https://api-v2.soundcloud.com/explore/' + genre + '?limit=200'
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
tracks = memcache.get('tracks_' + genre)
tracks_filtered = memcache.get('tracks_filtered_' + genre) #type string or None
lastUpdated = memcache.get('lastUpdated_' + genre) #type string or None
if tracks:
tracks = json.loads(tracks)
filter_change_needed = False
#if is requesting, skip this
if lastUpdated is None or int(time.time()) - float(lastUpdated) > 3600*24 or isRequesting: #if memcache needs to update bc too old
##################################
req = json.loads(fetch(url, deadline=200).content) #ASYNC PLS, fetching tracks
tracks = req.get('tracks')
# print req.get('next_href')
memcache.set('tracks_'+genre, json.dumps(tracks))
memcache.set('lastUpdated_'+genre, int(time.time()))
filter_change_needed = True
#if memcache needs to update (or not found in memcache)
query = db.GqlQuery('SELECT * FROM ReqJSON') #query db to check if we already did this before
query = list(query)
# print "DB QUERY"
in_db = False
tooOld = False #check if db needs to update as well
for q in query:
if q.genre == genre: #if found in db. USE THIS TO IMPLEMENT MULTIPLE GENRE FEATURE
in_db = True
if time.time() - time.mktime(q.created.timetuple()) > 3600*24: #if the db entry is more than a day old, delete and refresh. ***CHANGE THIS
q.delete() #delete old entry
tooOld = True
tracks_filtered = json.loads(q.json_str)
if not in_db or tooOld: #if not in db or db needs to be updated(along with memcache), we send http requests, and then store to db
tracks_filtered = [] #going to generate list of track objects
for a in range(len(tracks)):
if tracks[a].get('streamable') == True and \
tracks[a].get('duration') > 120000 and \
tracks[a].get('duration') < 360000 and \
tracks[a].get('commentable') == True and \
tracks[a].get('playback_count') > 1000 and \
tracks[a].get('comment_count') > 5 and \
tracks[a].get('likes_count') > 50: #if this track isn't spam and can be applicable to the app
intrack = {}
startTime = 0
greatestSum = 0
#now we find best part based on comment density
#retrieve comments
#instantiate array with length = length of song in seconds
#parse through comments, increment index of list that it appears in
#parse through array, set starting index as startTime if sum is greater than greatestSum
link = tracks[a].get('uri') + "/comments?client_id=" + client_id
comments = json.loads(fetch(link, deadline=200).content) #retrieve comments
#are we retrieving comments correctly? sanity check
# for b in range(len(comments)):
# arr.append(comments[b].get('timestamp'))
#okay this works
#calculating startTime based on comment density now
arr = [0] * (int(tracks[a].get('duration')/1000)+10)
for b in range(len(comments)):
if comments[b].get('timestamp') and comments[b].get('timestamp') < len(arr)*1000:
arr[int(comments[b].get('timestamp'))/1000] += 1
for index in range(1,len(arr)-segmentLen):
tempsum = sum(arr[index:(index+segmentLen)])
if tempsum>greatestSum:
greatestSum = tempsum
startTime = index
# how about reddit's hot algorithm? include a hotness attr
# hotness value = log(num_likes * 20*num_comments) + time_elapsed/45000
if tracks[a].get('release_day'):
time_track = datetime.datetime(tracks[a].get('release_year'), tracks[a].get('release_month'), tracks[a].get('release_day'))
else:
time_track = datetime.datetime(2011,5,1)
time_obj = time_track - datetime.datetime(2007, 8, 1)
time_dif = time_obj.days*3600*24 + time_obj.seconds
hotness = math.log(20*len(comments) * tracks[a].get('likes_count'), 10) + time_dif/45000
intrack['hotness'] = hotness
# var title: String
# var id: Int
# var duration: Int
# var stream_url: String
# var start_time: Int
# var permalink_url: String
# // Optional Variables (could be nil if not there)
# var genre: String?
# var subtitle: String?
# var artwork_url: String?
#extracting only the necessary json parts
intrack['start_time'] = startTime*1000
attributes = ['id', 'duration', 'stream_url', 'permalink_url', 'genre', 'description', 'artwork_url', 'title', 'comment_count']
for attr in attributes:
if attr == 'artwork_url': #exception since we want the highest quality album art
intrack[attr] = str(tracks[a].get(attr)).replace('large', 't500x500')
else:
intrack[attr] = tracks[a].get(attr)
tracks_filtered.append(intrack)
track = ReqJSON(genre = genre, json_str=json.dumps(tracks_filtered)) #add to db
track.put()
memcache.set('tracks_filtered_'+genre, json.dumps(tracks_filtered))
###################################################
#ABOVE PORTION CAN PROBABLY BE PUT IN CALLBACK FUNCTION. ALSO NEED CALLBACK FUNCTION WITHIN THIS ONE TO HANDLE COMMENT REQUESTS
if tracks_filtered and not filter_change_needed: #if the filtered tracks list exists in memcache and change isn't needed
tracks_filtered = json.loads(tracks_filtered) #convert to list of track objects
#now, to return json
#just return tracks_filtered list of objects, each one with an additional start time for most popular segment
#sort randomly (shuffle)
if tracks_filtered and sortOption == 'random':
random.shuffle(tracks_filtered)
#or sort based on reddit's hot algorithm?
elif tracks_filtered:
tracks_filtered.sort(key=lambda x: x.get('hotness'), reverse=True)
self.write(json.dumps(tracks_filtered))
class APIHandler(Handler):
def get(self, inp):
self.write(inp)
class MultiGenreHandler(Handler): #new format is soundsieve-backend.appspot.com/api/<sort option>?genre=<genre>&genre=<genre>...etc
def get(self):
self.response.headers['Content-Type'] = 'application/json; charset=UTF-8'
genres = self.request.get_all('genre')
sort = self.request.get('sort')
tracks = []
for genre in genres:
genre = urllib.quote(genre)
newtracks = json.loads(fetch('https://soundsieve-backend.appspot.com/api/randomTrack/' + str(genre), deadline=1500).content)
if newtracks:
tracks = tracks + list(newtracks)
if sort == 'hot':
tracks.sort(key=lambda x: x.get('hotness'), reverse=True)
else: #default is random
random.shuffle(tracks)
self.write(json.dumps(tracks))
|
def findall(pattern, string_to_search_in: str):
"""
Yields all the positions of
the pattern p in the string s.
"""
i = string_to_search_in.find(pattern)
while i != -1:
yield i
i = string_to_search_in.find(pattern, i + 1)
|
'''Use this for development'''
from .base import *
from decouple import config
import dj_database_url
ALLOWED_HOSTS += ['127.0.0.1']
DEBUG = True
WSGI_APPLICATION = 'home.wsgi.dev.application'
DATABASES = {
'default': dj_database_url.config(default=config('DATABASE_URL'))
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
)
# Stripe
STRIPE_PUBLIC_KEY = config('STRIPE_TEST_PUBLIC_KEY')
STRIPE_SECRET_KEY = config('STRIPE_TEST_SECRET_KEY')
|
import unittest
import time
from BSTestRunner import BSTestRunner
'''
# @Author : minpanpan
# @content : python自带的unittest测试框架
# @File : run.py
# @Software: PyCharm
suite = unittest.TestSuite()#创建测试套件
all_cases = unittest.defaultTestLoader.discover('.','test_*.py')
#找到某个目录下所有的以test开头的Python文件里面的测试用例
for case in all_cases:
suite.addTests(case)#把所有的测试用例添加进来
fp = open('res.html','wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='all_tests',description='所有测试情况')
runner.run(suite)
#运行测试
test_dir='./testcase'
report_dir='./report'
discover = unittest.defaultTestLoader.discover(test_dir, pattern='test*.py')
now = time.strftime("%Y-%m-%d-%H_%M_%S",time.localtime(time.time()))
report_name=report_dir+'/'+now+'report.html'
with open(report_name,'wb')as f:
runner=BSTestRunner(stream=f,title='ETM Api Test Report',description='All the Etanmo api test')
runner.run(discover)
'''
import unittest
if __name__=='__main__':
'''
test_dir = './testcase'
suite = unittest.TestSuite() # 创建测试套件
all_cases = unittest.defaultTestLoader.discover(test_dir, pattern='test*.py')
f=open('E:\\myreport.html','wb')
suite.addTests(all_cases)
# 使用HTMLTestRunner配置参数,输出报告路径、报告标题、描述
runner = HTMLTestRunner.HTMLTestRunner(stream=f, title='Report_title', description='Report_description',verbosity=2)
runner.run(suite)
'''
test_dir = './testcase'
report_dir = './report'
discover = unittest.defaultTestLoader.discover(test_dir, pattern='test*.py')
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
report_name = report_dir + '/' + now + 'report.html'
with open(report_name, 'wb')as f:
runner = BSTestRunner(stream=f, title='ETM Api Test Report', description='All the Etanmo api test')
runner.run(discover)
|
import io, os, argparse, random, statistics, codecs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type = str, help = 'input to language folder (e.g. persian)')
parser.add_argument('--output', type = str, help = 'output file for descriptive statistics')
parser.add_argument('--lang', type = str, help = 'language')
args = parser.parse_args()
lang = args.lang
choices = ['A'] ### number of morphs and average morphs per word are the same across the five random splits of each data set)
replacement = ['with', 'without']
files = [lang + '_train_tgt_' + '1A', lang + '_dev_tgt_' + '1A', lang + '_test_tgt_' + '1A']
data = []
outfile = io.open(args.output, 'w', encoding = 'utf-8')
outfile.write(' '.join(w for w in ['Language', 'Size', 'Replacement', 'Morphs', 'morphs_per_word']) + '\n')
for d in os.listdir(args.input):
for r in replacement:
all_morphs = 0
all_morphs_per_word = 0
for n in range(1, 51):
morphs = []
morphs_per_word = 0
for choice in choices:
split = str(n) + choice
train = []
dev = []
test = []
for f in files:
with io.open(args.input + d + '/' + r + '/' + f, encoding = 'utf-8') as f:
for line in f:
toks = ''.join(c for c in line.split())
toks = toks.split('!')
for morph in toks:
morphs.append(morph)
morphs_per_word += len(toks)
morphs = len(set(morphs))
all_morphs += morphs
morphs_per_word = morphs_per_word / int(d)
all_morphs_per_word += morphs_per_word
all_morphs = round(all_morphs / 50, 2)
all_morphs_per_word = round(all_morphs_per_word / 50, 2)
outfile.write(' '.join(str(w) for w in [lang, d, r, all_morphs, all_morphs_per_word]) + '\n')
|
# coding: utf-8
"""
Lilt REST API
The Lilt REST API enables programmatic access to the full-range of Lilt backend services including: * Training of and translating with interactive, adaptive machine translation * Large-scale translation memory * The Lexicon (a large-scale termbase) * Programmatic control of the Lilt CAT environment * Translation memory synchronization Requests and responses are in JSON format. The REST API only responds to HTTPS / SSL requests. ## Authentication Requests are authenticated via REST API key, which requires the Business plan. Requests are authenticated using [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Add your REST API key as both the `username` and `password`. For development, you may also pass the REST API key via the `key` query parameter. This is less secure than HTTP Basic Auth, and is not recommended for production use. # noqa: E501
The version of the OpenAPI document: v2.0
Contact: support@lilt.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import lilt
from lilt.api.projects_api import ProjectsApi # noqa: E501
from lilt.rest import ApiException
class TestProjectsApi(unittest.TestCase):
"""ProjectsApi unit test stubs"""
def setUp(self):
self.api = lilt.api.projects_api.ProjectsApi() # noqa: E501
def tearDown(self):
pass
def test_create_project(self):
"""Test case for create_project
Create a Project # noqa: E501
"""
pass
def test_delete_project(self):
"""Test case for delete_project
Delete a Project # noqa: E501
"""
pass
def test_get_project(self):
"""Test case for get_project
Retrieve a Project # noqa: E501
"""
pass
def test_get_project_report(self):
"""Test case for get_project_report
Retrieve Project report # noqa: E501
"""
pass
def test_get_project_status(self):
"""Test case for get_project_status
Retrieve Project status # noqa: E501
"""
pass
def test_update_project(self):
"""Test case for update_project
Update a Project # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
'''
To-do : If points <=1024, add random noise
for points >2048, split into two
in both cases, make sure the labels and indices are also taken care
'''
from airplane_kd_helpers import *
import glob
import numpy as np
import os
class_names = {
"02691156":"Airplane_02691156",
"02773838":"Bag_02773838",
"02954340":"Cap_02954340",
"02958343":"Car_02958343",
"03001627":"Chair_03001627",
"03261776":"Earphone_03261776",
"03467517":"Guitar_03467517",
"03624134":"Knife_03624134",
"03636649":"Lamp_03636649",
"03642806":"Laptop_03642806",
"03790512":"Motorbike_03790512",
"03797390":"Mug_03797390",
"03948459":"Pistol_03948459",
"04099429":"Rocket_04099429",
"04225987":"Skateboard_04225987",
"04379243":"Table_04379243"}
INP_SZ_1 = 1024
INP_SZ_2 = 2048
INP_SZ_3 = 4096
NUM_PTS = 2996
def get_fname(folder_name,suffix):
global class_names
words = folder_name.split('/')
return class_names[words[len(words)-1]] + '_' + suffix
def check_class_equality(c1,c2):
c1_split = c1.split('/')
c2_split = c2.split('/')
return c1_split[len(c1_split)-1] == c2_split[len(c2_split)-1]
def check_file_equality(f1,f2):
f1_split = f1.split('/')
f2_split = f2.split('/')
return (f1_split[len(f1_split)-1].split('.'))[0] == (f2_split[len(f2_split)-1].split('.'))[0]
# Collecting all of the data
data_folders = ["./data/train_data/*","./data/val_data/*","./data/test_data/*"]
label_folders = ["./data/train_label/*","./data/val_label/*"]
data_fnames = ["X_train.npy","X_val.npy","X_test.npy"]
ind_map_fnames = ["ind_map_train.npy","ind_map_val.npy","ind_map_test.npy"]
label_fnames = ["y_train.npy","y_val.npy"]
print("Processing data..")
for i in range(2): #iterating over train, val
main_data_folder = data_folders[i]
main_label_folder = label_folders[i]
data_classes = sorted(glob.glob(main_data_folder))
label_classes = sorted(glob.glob(main_label_folder))
for data_class,label_class in zip(data_classes,label_classes):
print(data_class)
# if os.path.exists(get_fname(data_class,data_fnames[i])):
# continue
if(check_class_equality(data_class,label_class) != True):
print("Glob picks up in different order. Re-write code!")
exit()
model_files = sorted(glob.glob(data_class + '/*'))
label_files = sorted(glob.glob(label_class + '/*'))
data = []
ind_maps = []
labels = []
for model_file,label_file in zip(model_files,label_files):
print(model_file)
if(check_file_equality(model_file,label_file) != True):
print("Glob picks up in different order. Re-write code!")
exit()
pts = read_pts(model_file)
lbls = read_labels(label_file)
kd_leaves,kd_inds = create_kd_tree(pts)
kd_leaves_placed = np.zeros((NUM_PTS,4))
ind_start = (NUM_PTS - len(kd_leaves))/2
kd_leaves_placed[ind_start:ind_start + len(kd_leaves), 0:3] = kd_leaves
kd_leaves_placed[ind_start:ind_start + len(kd_leaves), 3] = np.ones(len(kd_leaves))
kd_inds_placed = (-1)*np.zeros(NUM_PTS)
kd_inds_placed[ind_start:ind_start + len(kd_leaves)] = kd_inds
inds_for_lbls = [int(f) for f in kd_inds]
kd_labels_placed = (-1)*np.zeros((NUM_PTS,1))
kd_labels_placed[ind_start:ind_start + len(kd_leaves)] = lbls[inds_for_lbls]
data.append(kd_leaves_placed)
ind_maps.append(kd_inds_placed)
labels.append(kd_labels_placed)
np.save(get_fname(data_class,data_fnames[i]),data)
np.save(get_fname(data_class,ind_map_fnames[i]),ind_maps)
np.save(get_fname(label_class,label_fnames[i]),labels)
# Processing the test set (only points)
# print("Processing test data...")
# test_folder = data_folders[2]
# data_classes = sorted(glob.glob(test_folder))
# for data_class in data_classes:
# print(data_class)
# model_files = sorted(glob.glob(data_class + '/*'))
# data = []
# ind_maps = []
# for model_file in model_files:
# print(model_file)
# pts = read_pts(model_file)
# kd_leaves,kd_inds = create_kd_tree(pts)
# kdl,kdi = augment_kd(kd_leaves,kd_inds)
# for l,i in zip(kdl,kdi):
# data.append(l)
# ind_maps.append(i)
# np.save(get_fname(data_class,data_fnames[2]),data)
# np.save(get_fname(data_class,ind_map_fnames[2]),ind_maps)
|
# -*- coding: utf-8 -*-
from tkinter import *
from getFiles import get_file_size
import picture_factory
RESULT_DELETE = -2
RESULT_SKIP = -1
RESULT_RENAME = 1
RESULT_REPLACE = 2
class Preview:
def set_photos(self, new_photo1=None, new_photo2=None, size=None):
if size:
self.__size = size
# set photo 1
if new_photo1:
# load photo 1
img = self.img1.pic(new_photo1, size=self.__size)
self.photo1_lbl.configure(image=img)
self.photo1_lbl.image = img
name = self.img1.file_name(new_photo1)
f_dir = picture_factory.dirs(new_photo1)
f_size = get_file_size(new_photo1)
self.photo1_name_lbl.configure(text=str(name + "\n" + f_dir + "\n" + str(f_size/1000) + " kb"))
else:
self.photo1_lbl.configure(image=self.img1.pic())
self.photo1_lbl.image = self.img1.pic()
name = self.img1.file_name(self.photo1_path)
f_dir = picture_factory.dirs(self.photo1_path)
f_size = get_file_size(self.photo1_path)
self.photo1_name_lbl.configure(text=str(name + "\n" + f_dir + "\n" + str(f_size/1000) + " kb"))
# set photo 2
if new_photo2:
# load photo 2
img = self.img2.pic(new_photo2, size=self.__size)
self.photo2_lbl.configure(image=img)
self.photo2_lbl.image = img
name = self.img2.file_name(new_photo2)
f_dir = picture_factory.dirs(new_photo2)
f_size = get_file_size(new_photo2)
self.photo2_name_lbl.configure(text=str(name + "\n" + f_dir + "\n" + str(f_size/1000) + " kb"))
else:
if self.photo2_path:
# self.img2 = pic(img_path=self.photo2_path, size=size)
self.photo2_lbl.configure(image=self.img2.pic())
self.photo2_lbl.image = self.img2.pic()
name = self.img2.file_name(self.photo2_path)
f_dir = picture_factory.dirs(self.photo2_path)
f_size = get_file_size(self.photo2_path)
self.photo2_name_lbl.configure(text=str(name + "\n" + f_dir + "\n" + str(f_size/1000) + " kb"))
def select_dir_dis(self):
print("Preview : select_dir_dis : set_dir_dis = ", self.set_dir_dis.get())
self.result = self.set_dir_dis.get()
self.win.quit()
# def on_key_down_photo1_dir_name_entry(self, event):
# print("on_key_down_photo1_dir_name_entry : event = ", event)
def on_key_up_photo1_dir_name_entry(self, event):
# print("on_key_up_photo1_dir_name_entry : event = ", event)
if event.keycode == 36:
self.select_dir_dis()
def __init__(self, width=None, height=None, photo_1=None, photo_2=None, index=0):
self.new_pic_name = StringVar()
self.set_dir_dis = StringVar()
self.result = None
if width is None:
self.width = 500
else:
if width < 500:
self.width = 500
if height is None:
self.height = 300
else:
if height < 300:
self.height = 300
self.__size = 2
self.photo1_path = photo_1
self.photo2_path = photo_2
if self.photo1_path:
self.img1 = picture_factory.Picture(self.photo1_path)
if self.photo2_path:
self.img2 = picture_factory.Picture(self.photo2_path)
self.win = Toplevel()
self.win.minsize(self.width, self.height)
self.win.title("preview")
# call self.close_mod when the close button is pressed
self.win.protocol("WM_DELETE_WINDOW", self.close_mod)
self.photo1_lbl = None
self.photo2_lbl = None
self.photo1_name_lbl = None
self.photo2_name_lbl = None
self.action_lbl = None
self.rename_btn = None
self.rename_txt = None
self.replace_btn = None
self.skip_btn = None
self.set_dir_dis.set("")
self.new_pic_name.set("")
# add widgets for preview mode and compare preview mode
if photo_1 and photo_2 is None:
self.frame = Frame(self.win)
self.frame.grid(row=0, column=0, padx=80, pady=80, sticky=NSEW)
self.photo1_lbl = Label(self.frame)
self.photo1_name_lbl = Label(self.frame, text="name1")
self.photo1_creation_date_lbl = Label(self.frame, text="creation date1")
self.photo1_dir_name_entry = Entry(self.frame, textvariable=self.set_dir_dis)
self.ok_select_dir_name_btn = Button(self.frame, text="OK", command=self.select_dir_dis)
self.photo1_lbl.grid(row=0, column=0, padx=10, pady=10, sticky=NSEW)
self.photo1_name_lbl.grid(row=1, column=0, padx=10, pady=10, sticky=EW)
self.photo1_creation_date_lbl.grid(row=2, column=0, padx=10, pady=10, sticky=EW)
self.photo1_dir_name_entry.grid(row=3, column=0, padx=10, pady=10, sticky=EW)
self.photo1_dir_name_entry.focus_set()
# self.photo1_dir_name_entry.bind("<KeyPress>", self.on_key_down_photo1_dir_name_entry)
self.photo1_dir_name_entry.bind("<KeyRelease>", self.on_key_up_photo1_dir_name_entry)
self.ok_select_dir_name_btn.grid(row=4, column=0, ipadx=10, ipady=10)
f_c_d = picture_factory.Picture(self.photo1_path)
self.photo1_creation_date_lbl.configure(text=str(f_c_d.take_date_per()))
self.photo1_dir_name_entry.focus_set()
elif photo_1 and photo_2:
self.pic_frame = Frame(self.win)
self.pic_frame.grid(row=0, column=0, padx=10, pady=10, sticky=NSEW)
self.photo1_lbl = Label(self.pic_frame)
self.photo1_name_lbl = Label(self.pic_frame, text="name1")
self.photo2_lbl = Label(self.pic_frame)
self.photo2_name_lbl = Label(self.pic_frame, text="name2")
self.photo1_lbl.grid(row=0, column=0, padx=10, pady=10, sticky=NSEW)
self.photo2_lbl.grid(row=0, column=2, padx=10, pady=10, sticky=NSEW)
self.action_lbl = Label(self.pic_frame, text=" > ")
self.action_lbl.grid(row=0, column=1, pady=5, sticky=NS)
self.photo1_name_lbl.grid(row=1, column=0, padx=10, pady=10, sticky=EW)
self.photo2_name_lbl.grid(row=1, column=2, padx=10, pady=10, sticky=EW)
self.btn_frame = Frame(self.win)
self.btn_frame.grid(row=1, column=0, padx=10, pady=10, sticky=NSEW)
name, exception = picture_factory.name(self.photo1_path).split('.', -1)
s = (name + '_' + str(index) + '.' + exception)
self.new_pic_name.set(s)
# print('preview_gui : Preview : __init__ : new_pic_name = ', self.new_pic_name.get())
self.delete_btn = Button(self.btn_frame, text="Delete", command=self.on_click_delete_btn)
self.replace_btn = Button(self.btn_frame, text="Replace", command=self.on_click_replace_btn)
self.rename_txt = Entry(self.btn_frame, text=self.new_pic_name)
self.rename_btn = Button(self.btn_frame, text="Rename", command=self.on_click_rename_btn)
self.skip_btn = Button(self.btn_frame, text="Skip", command=self.on_click_skip_btn)
self.rename_txt.grid(row=0, column=2, padx=10, pady=10, sticky=EW)
self.delete_btn.grid(row=1, column=0, padx=20, pady=10, sticky=EW)
self.replace_btn.grid(row=1, column=1, padx=10, pady=10, sticky=EW)
self.rename_btn.grid(row=1, column=2, padx=20, pady=10, sticky=EW)
self.skip_btn.grid(row=1, column=3, padx=20, pady=10, sticky=EW)
self.skip_btn.focus_set()
self.set_photos(new_photo1=photo_1, new_photo2=photo_2)
# remove this function and the call to protocol
# then the close button will act normally
def close_mod(self):
if self.photo1_path and self.photo2_path:
self.result = RESULT_SKIP, None
self.win.quit()
def on_click_rename_btn(self):
self.result = RESULT_RENAME, self.new_pic_name.get()
self.win.quit()
def on_click_replace_btn(self):
self.result = RESULT_REPLACE, None
self.win.quit()
def on_click_skip_btn(self):
self.result = RESULT_SKIP, None
self.win.quit()
def on_click_delete_btn(self):
self.result = RESULT_DELETE, None
self.win.quit() |
import json
import os
import sys
DEFAULT_OPTION_FILE = 'option.json'
ERROR_FORMAT = '\n* ERROR: [%s] [%s]\n'
class Option:
def __init__(self):
self.option = ''
self.source_path = ''
def get_option(self):
return self.option
def set_option(self):
if not os.path.isfile(DEFAULT_OPTION_FILE):
print(ERROR_FORMAT % ('set_option', '%s not found' % (DEFAULT_OPTION_FILE)))
return True
try:
self.option = json.loads(open(DEFAULT_OPTION_FILE, 'r').read())
except json.decoder.JSONDecodeError as e:
print(ERROR_FORMAT % ('set_option', e))
return True
return False
def Source_path(self):
if not 'source_path' in self.option:
return True, ''
return False, self.option["source_path"]
def get_source_path(self):
if self.source_path == '':
print(ERROR_FORMAT % ('source_path', 'there is no source_path in %s' % DEFAULT_OPTION_FILE))
return True, ''
return False, self.source_path
def run(self):
error = self.set_option()
if error:
return True, ''
error, result = self.Source_path()
self.source_path = result
return False, result
|
print "hello"
"""
Material from James' Discussion
* Open Office Hours *Every* Tuesday at the NYC Python Meetup
* Python has particular libraries that add tremendous values
* Python is:
* Readable
* Highly restrictable
* Coordinating a number of pieces
* Exceptionally performant
* A model for OOP in Python
* Real World
* Problem Model
* i.e. What's my P&L
* Analytical Model
* Written in Code (CS) Primitives
* i.e. Pseudo-Code
* The Code
* In language primitives
* OOP allows you to employ
* Modularizations
* Abstractions
* Python vs. R
* Python can do R
* (R can't do Python)
* Python is better with strings
* Python can be used as an orchestrator
"""
|
""" Given an array of points where points[i] = (xi,yi) represents a point on the X-Y plane and an integer k, return the k closest points to the origin (0,0).
The distance between two points on the X-Y plane is the Euclidean distance (i.e., √((x1 - x2)2 + (y1 - y2)2)).
You may return the answer in any order. The answer is guaranteed to be unique (except for the order that it is in).
You may use the built-in sorted function. """
from math import sqrt
def kClosest(points, k):
points.sort(key = lambda x: sqrt((x[0])**2+(x[1])**2))
return points[:k]
print(kClosest([(1,3),(-2,2)], 1)) #[(-2, 2)]
print(kClosest([(3,3),(5,-1),(-2,4)], 2)) #[(-2,4),(3,3)]
|
from datetime import timedelta, datetime
import numpy as np
class Video:
title =""
url = ""
description = ""
#feature
viewCount = 0
likeCount = 0
dislikeCount = 0
commentCount = 0
thumbnailUrl = ""
publishedAt = datetime.now
postedTime = timedelta(days=1)
#calculation
X1commentPerView = 0.0
X2viewScore = 0
X3likePerView = 0
X4likePerLikeAndDislike = 0
X5similarity = 0
# features = []
def __init__(self, id, snippetData, statisticsData):
if "viewCount" in statisticsData:
self.viewCount = int(statisticsData["viewCount"])
if "commentCount" in statisticsData:
self.commentCount = int(statisticsData["commentCount"])
if "likeCount" in statisticsData:
self.likeCount = int(statisticsData["likeCount"])
if "dislikeCount" in statisticsData:
self.dislikeCount = int(statisticsData["dislikeCount"])
self.title = snippetData["title"]
self.description = snippetData["description"]
self.publishedAt = datetime.strptime(snippetData["publishedAt"],"%Y-%m-%dT%H:%M:%S.000Z")
self.postedTime = int((datetime.today() - self.publishedAt).days)
self.url = "http://youtu.be/" + id
if self.postedTime != 0:
self.X2viewScore = float(self.viewCount/float(self.postedTime*24*60))
#print self.X2viewScore
if self.X2viewScore != 0:
self.X1commentPerView = float((self.commentCount/float(self.postedTime*24*60))/(float(self.X2viewScore)))
self.X3likePerView = float((self.likeCount/float(self.postedTime*24*60))/(float(self.X2viewScore)))
if self.likeCount+self.dislikeCount != 0:
self.X4likePerLikeAndDislike = float(self.likeCount/(float(self.likeCount+self.dislikeCount)*100))
# print np.array([self.X1commentPerView, self.X2viewScore, self.X3likePerView, self.X4likePerLikeAndDislike, self.X5similarity])
# self.features = np.array([self.X1commentPerView, self.X2viewScore, self.X3likePerView, self.X4likePerLikeAndDislike, self.X5similarity])
|
from sense_hat import SenseHat, ACTION_PRESSED
from time import sleep
import requests
import json
red = [255, 0, 0]
green = [0, 255, 0]
sense = SenseHat();
redMatrix = [red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red, red]
greenMatrix = [green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green, green]
def fetch_and_show_user():
r = requests.get('https://randomuser.me/api/?results=1').json()
with open('data.json', 'w') as outfile:
json.dump(r, outfile)
sense.show_message(r["results"][0]["name"]["first"])
fetch_and_show_user()
while True:
for event in sense.stick.get_events():
if event.action == "pressed":
if event.direction == "left":
sense.set_pixels(redMatrix)
elif event.direction == "right":
sense.set_pixels(greenMatrix)
fetch_and_show_user()
sleep(0.5)
sense.clear() |
from collections import deque
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __str__(self):
return 'TreeNode({!r})'.format(self.val)
def __repr__(self):
return self.__str__()
def from_list(lst):
""" Construct and return a tree. We use the leetcode binary tree
representation here. See https://leetcode.com/faq/#binary-tree """
if len(lst) == 0 or lst[0] is None:
return None
root = TreeNode(lst[0])
q = deque()
q.append(root)
index = 1
def new_node():
if index < len(lst) and lst[index] is not None:
node = TreeNode(lst[index])
q.append(node)
return node
return None
while index < len(lst):
left_node = new_node()
index += 1
right_node = new_node()
index += 1
parent = q.popleft()
parent.left = left_node
parent.right = right_node
return root
def test_from_list():
assert from_list([]) is None
assert from_list([None]) is None
t1 = from_list([1,2,3])
assert t1.val == 1
assert (t1.left.val == 2 and t1.left.left is None and t1.left.right is None)
assert (t1.right.val == 3 and t1.right.left is None and t1.right.right is None)
t2 = from_list([1, None, 2, 3, None, None, 4])
assert t2.val == 1 and t2.left is None
t2n2 = t2.right
assert t2n2.val == 2 and t2n2.right is None
t2n3 = t2n2.left
assert t2n3.val == 3 and t2n3.left is None
t2n4 = t2n3.right
assert t2n4.val == 4 and t2n4.left is None and t2n4.right is None
|
""" foxtail/appointments/views.py """
from django.shortcuts import redirect, render
from django.views.generic import TemplateView, UpdateView
from .models import Appointment
def AppointmentWaiverUploadView(request, token):
appointment = Appointment.verify_waiver_upload_token(token) # Get the relevant Appointment object instance.
if not appointment:
return redirect('home')
if request.method == 'POST':
form = AppointmentWaiverUploadForm(request.POST)
if form.is_valid():
# process data
# send a message to the message network
return redirect('success page wherever that is')
return render(request, 'appointments/upload_waiver_form.html', {'token': token, 'form': form})
# Later see: https://stackoverflow.com/questions/40250533/the-view-manager-views-login-didnt-return-an-httpresponse-object-it-returned-n?rq=1
#class AppointmentWaiverUploadView(TemplateView):
# model = Appointment
# fields = ['waiver']
|
from pkg_resources import resource_filename # @UnresolvedImport
from tornado.web import RequestHandler
import cairosvg
import json
import os
class ColoursHandler(RequestHandler):
''' returns a .png for an svg template '''
def get_template_path(self):
''' overrides the template path to use this module '''
return resource_filename('spddo.mongo', "svg")
def get(self, path=None):
colours = json.load(open(os.path.join(self.get_template_path(),
"colours.json")))
def colour_by_id(arg, default):
id_ = int(self.get_argument(arg, default))
for colour in colours.get("colours", []):
if colour.get("id") == id_:
return colour['colour']
raise Exception("No such colour {}".format(id_))
svg = self.render_string(
"colours.svg",
colour_one=colour_by_id("one", "4"),
colour_two=colour_by_id("two", "4"),
colour_three=colour_by_id("three", "4"),
colour_letter=colour_by_id("letter", "5"),
initial=self.get_argument("initial", ""))
width = int(self.get_argument("w", 64))
height = int(self.get_argument("h", 64))
self.set_header("content-type", "image/png")
self.write(cairosvg.svg2png(svg, # @UndefinedVariable
parent_width=width,
parent_height=height))
|
# -*- coding: utf-8 -*-
# @Author: vivekpatel99
# @Date: 2018-10-06 15:44:29
# @Last Modified by: vivekpatel99
# @Last Modified time: 2018-10-06 15:44:29
"""
This script intializes the logger and contains all the info and functions for logger
"""
import logging
# ------------------------------------------------------------------------------
# """ logger_init """
# ------------------------------------------------------------------------------
def logger_init(log_filepath, project_name):
# create logger with arg project name
logger = logging.getLogger(project_name)
if not logger.handlers:
# prevent logging from propagating to root logger
logger.propagate = 0
logger.setLevel(logging.DEBUG)
# create file handler which logs for error messages only
fh = logging.FileHandler(log_filepath, "w+")
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
# formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
if __name__ == "__main__":
pass |
#!/usr/bin/python
#\file slider_style1.py
#\brief Test styles of slider.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Apr.15, 2021
import sys
from PyQt4 import QtCore,QtGui
#from PyQt5 import QtCore,QtWidgets
#QtGui= QtWidgets
def Print(*s):
for ss in s: print ss,
print ''
try:
class SliderProxyStyle(QtGui.QProxyStyle):
def pixelMetric(self, metric, option, widget):
if metric==QtGui.QStyle.PM_SliderThickness:
return 68
elif metric==QtGui.QStyle.PM_SliderLength:
return 68
return super(SliderProxyStyle, self).pixelMetric(metric, option, widget)
except:
SliderProxyStyle= None
class TSlider(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.InitUI()
def SetSliderStyle(self, style):
if style=='Style-1':
self.slider1.setStyleSheet('')
elif style=='Style-2':
self.slider1.setStyleSheet('''
QSlider {
min-height: 68px;
max-height: 68px;
height: 68px;
}
QSlider::groove:horizontal {
background: transparent;
border: 2px solid #333;
height: 6px;
padding: -0 -2px;
margin: -0 0px;
}
QSlider::handle:horizontal {
background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f);
border: 1px solid #5c5c5c;
width: 60px;
margin: -30px 0;
border-radius: 3px;
}
''')
elif style=='Style-3':
self.slider1.setStyleSheet('''
QSlider {
height: 68px;
}
QSlider::groove:horizontal {
background: transparent;
border: 2px solid #aaa;
height: 60px;
margin: -0 0px;
}
QSlider::handle:horizontal {
background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f);
border: 1px solid #5c5c5c;
width: 60px;
margin: -0px 0;
border-radius: 3px;
}
''')
elif style=='Style-4':
self.slider1.setStyleSheet('''
QSlider {
height: 68px;
}
QSlider::groove:horizontal {
border-radius: 1px;
height: 3px;
margin: 0px;
background-color: rgb(52, 59, 72);
}
QSlider::handle:horizontal {
background-color: rgb(85, 170, 255);
border: none;
height: 40px;
width: 40px;
margin: -20px 0;
border-radius: 20px;
padding: -20px 0px;
}
QSlider::handle:horizontal:hover {
background-color: rgb(155, 180, 255);
}
QSlider::handle:horizontal:pressed {
background-color: rgb(65, 255, 195);
}
''')
elif style=='Style-5':
if SliderProxyStyle is None:
print 'Style-5 uses PyQt5. Comment out the PyQt4 import line and uncomment the PyQt import lines.'
return
self.slider1.setStyleSheet('')
style= SliderProxyStyle(self.slider1.style())
self.slider1.setStyle(style)
def InitUI(self):
# Set window size.
self.resize(600, 300)
# Set window title
self.setWindowTitle('Slider')
mainlayout= QtGui.QVBoxLayout()
self.setLayout(mainlayout)
cmbbx1= QtGui.QComboBox(self)
cmbbx1.addItem('Style-1')
cmbbx1.addItem('Style-2')
cmbbx1.addItem('Style-3')
cmbbx1.addItem('Style-4')
cmbbx1.addItem('Style-5')
#cmbbx1.setCurrentIndex(0)
cmbbx1.activated[str].connect(lambda:(self.SetSliderStyle(self.cmbbx1.currentText()), Print('Selected',self.cmbbx1.currentText())))
self.cmbbx1= cmbbx1
mainlayout.addWidget(cmbbx1)
vspacer0= QtGui.QSpacerItem(10, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
mainlayout.addItem(vspacer0)
slidergroup= QtGui.QGridLayout()
#slidergroup.move(10, 60)
#slidergroup.resize(10, 60)
#self.setLayout(slidergroup)
mainlayout.addLayout(slidergroup)
#slider1= QtGui.QSlider(QtCore.Qt.Vertical, self)
slider1= QtGui.QSlider(QtCore.Qt.Horizontal, self)
slider1.setTickPosition(QtGui.QSlider.TicksBothSides)
slider1.setRange(0, 8)
slider1.setTickInterval(1)
slider1.setSingleStep(1)
slider1.setValue(6)
#slider1.move(10, 60)
slider1.resize(100, 20)
slider1.toValue= lambda: 1000 + 100*self.slider1.value()
slider1.valueChanged.connect(lambda:self.label1.setText(str(self.slider1.toValue())))
self.slider1= slider1
slidergroup.addWidget(slider1, 0, 0, 1, 5)
label1= QtGui.QLabel('0',self)
self.label1= label1
slidergroup.addWidget(label1, 0, 5, 1, 1)
self.label1.setText(str(self.slider1.toValue()))
labelt1= QtGui.QLabel('1000',self)
slidergroup.addWidget(labelt1, 1, 0, 1, 1, QtCore.Qt.AlignLeft)
labelt2= QtGui.QLabel('1200',self)
slidergroup.addWidget(labelt2, 1, 1, 1, 1, QtCore.Qt.AlignLeft)
labelt3= QtGui.QLabel('1400',self)
slidergroup.addWidget(labelt3, 1, 2, 1, 1, QtCore.Qt.AlignCenter)
labelt4= QtGui.QLabel('1600',self)
slidergroup.addWidget(labelt4, 1, 3, 1, 1, QtCore.Qt.AlignRight)
labelt5= QtGui.QLabel('1800',self)
slidergroup.addWidget(labelt5, 1, 4, 1, 1, QtCore.Qt.AlignRight)
vspacer1= QtGui.QSpacerItem(10, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
slidergroup.addItem(vspacer1, 2, 0, 1, 6)
mainlayout.addItem(vspacer0)
# Add a button
btn1= QtGui.QPushButton('_________Exit?_________', self)
#btn1.setFlat(True)
btn1.setToolTip('Click to make something happen')
btn1.clicked.connect(lambda:self.close() if self.slider1.toValue()<1500 else Print('Hint: Set value less than 1500 to exit'))
btn1.resize(btn1.sizeHint())
#btn1.move(100, 150)
self.btn1= btn1
mainlayout.addWidget(btn1)
# Show window
self.show()
# Create an PyQT4 application object.
a = QtGui.QApplication(sys.argv)
# The QWidget widget is the base class of all user interface objects in PyQt4.
w = TSlider()
sys.exit(a.exec_())
|
"""
Time/Space Complexity = O(N)
"""
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums or not k:
return nums
k = k % len(nums)
out = [0]*len(nums)
for i, val in enumerate(nums):
if i + k >= len(nums):
i = abs(len(nums) - (i+k))
out[i] = val
else:
out[i+k] = val
for i in range(len(out)):
nums[i] = out[i] |
import re
from ctypes import (
c_int,
c_long,
c_longlong,
c_uint,
c_ulong,
c_ulonglong,
c_float,
c_double,
c_char,
c_char_p,
)
from twitter.common.lang import Compatibility
class ScanfResult(object):
def __init__(self):
self._dict = {}
self._list = []
def groups(self):
"""
Matched named parameters.
"""
return self._dict
def __getattr__(self, key):
if key in self._dict:
return self._dict[key]
else:
raise AttributeError('Could not find attribute: %s' % key)
def ungrouped(self):
"""
Matched unnamed parameters.
"""
return self._list
def __iter__(self):
return iter(self._list)
class ScanfParser(object):
class ParseError(Exception): pass
"""
Partial scanf emulator.
"""
CONVERSIONS = {
"c": (".", c_char),
"d": ("[-+]?\d+", c_int),
"ld": ("[-+]?\d+", c_long),
"lld": ("[-+]?\d+", c_longlong),
"f": (r"[-+]?[0-9]*\.?[0-9]*(?:[eE][-+]?[0-9]+)?", c_float),
"s": ("\S+", c_char_p),
"u": ("\d+", c_uint),
"lu": ("\d+", c_ulong),
"llu": ("\d+", c_ulonglong),
}
# ctypes don't do str->int conversion, so must preconvert for non-string types
PRECONVERSIONS = {
c_char: str, # to cover cases like unicode
c_int: int,
c_long: int,
c_longlong: long if Compatibility.PY2 else int,
c_uint: int,
c_ulong: int,
c_ulonglong: long if Compatibility.PY2 else int,
c_float: float,
c_double: float
}
def _preprocess_format_string(self, string):
def match_conversion(string, k):
MAX_CONVERSION_LENGTH = 3
for offset in range(MAX_CONVERSION_LENGTH, 0, -1):
k_offset = k + offset
if string[k:k_offset] in ScanfParser.CONVERSIONS:
re, converter = ScanfParser.CONVERSIONS[string[k:k_offset]]
if converter in ScanfParser.PRECONVERSIONS:
return (re, lambda val: converter(ScanfParser.PRECONVERSIONS[converter](val))), k_offset
else:
return (re, converter), k_offset
raise ScanfParser.ParseError('%s is an invalid format specifier' % (
string[k]))
def extract_specifier(string, k):
if string[k] == '%':
return '%', None, k+1
if string[k] == '*':
def null_apply(scan_object, value):
pass
(regex, preconversion), k = match_conversion(string, k+1)
return '(%s)' % regex, null_apply, k
if string[k] == '(':
offset = string[k+1:].find(')')
if offset == -1:
raise ScanfParser.ParseError("Unmatched (")
if offset == 0:
raise ScanfParser.ParseError("Empty label string")
name = string[k+1:k+1+offset]
(regex, preconversion), k = match_conversion(string, k+1+offset+1)
def dict_apply(scan_object, value):
scan_object._dict[name] = preconversion(value).value
return '(%s)' % regex, dict_apply, k
(regex, preconversion), k = match_conversion(string, k)
def list_apply(scan_object, value):
scan_object._list.append(preconversion(value).value)
return '(%s)' % regex, list_apply, k
re_str = ""
k = 0
applicators = []
while k < len(string):
if string[k] == '%' and len(string) > k+1:
regex, applicator, k = extract_specifier(string, k+1)
re_str += regex
if applicator:
applicators.append(applicator)
else:
re_str += re.escape(string[k])
k += 1
return re_str, applicators
def parse(self, line, allow_extra=False):
"""
Given a line of text, parse it and return a ScanfResult object.
"""
if not isinstance(line, Compatibility.string):
raise TypeError("Expected line to be a string, got %s" % type(line))
sre_match = self._re.match(line)
if sre_match is None:
raise ScanfParser.ParseError("Failed to match pattern: %s against %s" % (
self._re_pattern, line))
groups = list(sre_match.groups())
if len(groups) != len(self._applicators):
raise ScanfParser.ParseError("Did not parse all groups! Missing %d" % (
len(self._applicators) - len(groups)))
if sre_match.end() != len(line) and not allow_extra:
raise ScanfParser.ParseError("Extra junk on the line! '%s'" % (
line[sre_match.end():]))
so = ScanfResult()
for applicator, group in zip(self._applicators, groups):
applicator(so, group)
return so
def __init__(self, format_string):
"""
Given a format string, construct a parser.
The format string takes:
%c %d %u %f %s
%d and %u take l or ll modifiers
you can name parameters %(hey there)s and the string value will be keyed by "hey there"
you can parse but not save parameters by specifying %*f
"""
if not isinstance(format_string, Compatibility.string):
raise TypeError('format_string should be a string, instead got %s' % type(format_string))
self._re_pattern, self._applicators = self._preprocess_format_string(format_string)
self._re = re.compile(self._re_pattern)
|
#!/usr/bin/env python3
# Created by: Crestel Ong
# Created on: Sept 2021
# This programs calculates the perimeter of a hexagon
# user inputs the length of one side
import constants
def main():
# main function
print("We will be calculating the perimiter of a hexagon.")
# input
print("")
side_length = int(input("Enter the length of one side (cm) : "))
# process
perimeter = constants.NUMBER_OF_SIDES * side_length
# output
print("The perimeter of this hexagon is {0} cm.".format(perimeter))
print("\nDone.")
if __name__ == "__main__":
main()
|
from functools import reduce
from function import *
from utils import *
from splines_interpolation import spline_intepolation_coefs, spline_interpolation
def generate_grid(n, x_start, x_end, y_start, y_end):
x = find_equally_spaced(x_start, x_end, n)
y = find_equally_spaced(y_start, y_end, n)
return (x, y)
def lagrange_basis(i, nodes, point):
return reduce((lambda a, b: a * b), [(point - nodes[p]) / (nodes[i] - nodes[p]) \
for p in range(len(nodes)) if p != i])
def interpolate_two_variables_function(g, grid):
x_nodes, y_nodes = grid
n = len(x_nodes)
m = len(y_nodes)
z = [[g(x_nodes[i], y_nodes[j]) for j in range(m)] for i in range(n)]
def lagrange_polynom_function(x, y):
result = 0
for i in range(n):
for j in range(m):
tmp = z[i][j] * \
lagrange_basis(i, x_nodes, x) * lagrange_basis(j, y_nodes, y)
result += tmp
return result
return lagrange_polynom_function
def two_variables_spline_builder(alpha, beta, gamma, delta, xi):
def spline(x, y):
return alpha(y) + beta(y) * (x - xi) + (gamma(y) / 2) * (x - xi) ** 2 + \
(delta(y) / 6) * (x - xi) ** 3
return spline
def two_variables_spline_system_builder(splines, nodes):
def system(x, y):
for i in range(1, len(nodes)):
if nodes[i - 1] <= x <= nodes[i]:
return splines[i - 1](x, y)
raise Exception("x is out of range")
return system
def splines_two_variables_function_interpolation(g, grid):
x_nodes, y_nodes = grid
splines_for_y = []
for y in y_nodes:
splines_for_y.append(spline_intepolation_coefs(lambda x: g(x, y), x_nodes))
n = len(x_nodes) - 1
coefs_interpolated = [[], [], [], []]
for i in range(4):
# alphas, betas, gammas, deltas
for j in range(n):
# for each segment
coefs = [splines_for_y[k][i][j] for k in range(len(splines_for_y))]
def coef(y):
return coefs[y_nodes.index(y)]
coefs_interpolated[i].append(spline_interpolation(coef, y_nodes))
splines = [two_variables_spline_builder(coefs_interpolated[0][i], coefs_interpolated[1][i], \
coefs_interpolated[2][i], coefs_interpolated[3][i], x_nodes[i + 1]) for i in range(n)]
return two_variables_spline_system_builder(splines, x_nodes)
if __name__ == "__main__":
grid = generate_grid(18, START, END, SECOND_VARIABLE_START, SECOND_VARIABLE_END)
interpolation = interpolate_two_variables_function(g, grid)
splines = splines_two_variables_function_interpolation(g, grid)
grid_to_draw = generate_grid(20, START, END, SECOND_VARIABLE_START, SECOND_VARIABLE_END)
draw_two_variables_fuctions(grid_to_draw, (splines, "Splines"), \
(g, "Original function"), (interpolation, "Lagrange"))
|
class PurchasedGood:
def __init__(self, price, category = "General", tax = 0.07):
self.price = price
self.category = category
self.tax = tax
def calculate_total(self):
total = round(self.price+(self.price*self.tax), 2)
return total
good_1 = PurchasedGood(5.00)
print(good_1.price)
print(good_1.category)
print(good_1.tax)
print(good_1.calculate_total())
good_2 = PurchasedGood(5.00, category = "Grocery", tax = 0.03)
print(good_2.price)
print(good_2.category)
print(good_2.tax)
print(good_2.calculate_total())
|
#.-*- coding:utf-8 .-*-
import sys
import pika
def Main():
connection=pika.BlockingConnection(pika.ConnectionParameters("localhost"))
channel=connection.channel()
routing_key=sys.argv[1] if len(sys.argv)>1 else "test"
message=sys.argv[2:] if len(sys.argv)>2 else "test words"
message_str=" ".join(message)
channel.basic_publish(exchange="",routing_key=routing_key,body=message_str)
channel.close()
if __name__ == '__main__':
Main() |
from espnffl import FFLeague
lg = FFLeague('BTownsFinest', 1083362)
|
import unittest
import numpy as np
import pyqg
class LayeredModelTester(unittest.TestCase):
def setUp(self):
self.m = pyqg.LayeredModel(
nz = 3,
U = [.1,.05,.0],
V = [.1,.05,.0],
rho= [.1,.3,.5],
H = [.1,.1,.3],
f = 1.,
beta = 0.)
self.atol=1.e-16
# creates stretching matrix from scratch
self.S = np.zeros((self.m.nz,self.m.nz))
F11 = self.m.f2/self.m.gpi[0]/self.m.Hi[0]
F12 = self.m.f2/self.m.gpi[0]/self.m.Hi[1]
F22 = self.m.f2/self.m.gpi[1]/self.m.Hi[1]
F23 = self.m.f2/self.m.gpi[1]/self.m.Hi[2]
self.S[0,0], self.S[0,1] = -F11, F11
self.S[1,0], self.S[1,1], self.S[1,2] = F12, -(F12+F22), F22
self.S[2,1], self.S[2,2] = F23, -F23
def test_stretching(self):
""" Check if stretching matrix is consistent
and satisfies basic properties """
# the columns of the S must add to zero (i.e, S is singular)
err_msg = ' Zero is not an eigenvalue of S'
assert np.all(self.m.S.sum(axis=1)==0.) , err_msg
# the matrix Hi * S must by a symmetric matrix
HS = np.dot(np.diag(self.m.Hi),self.m.S)
np.testing.assert_allclose(HS,HS.T,atol=self.atol,
err_msg=' Hi*S is not symmetric')
np.testing.assert_allclose(self.m.S,self.S,atol=self.atol,
err_msg= ' Unmatched stretching matrix')
def test_init_background(self):
""" Check the initialization of the mean PV gradiends """
Qy = -np.einsum('ij,j->i',self.S,self.m.Ubg)
Qx = np.einsum('ij,j->i',self.S,self.m.Vbg)
np.testing.assert_allclose(Qy,self.m.Qy,atol=self.atol,
err_msg=' Unmatched Qy')
np.testing.assert_allclose(Qx,self.m.Qx,atol=self.atol,
err_msg=' Unmatched Qx ')
def test_inversion_matrix(self):
""" Check the inversion matrix """
# it suffices to test for a single wavenumber
M = self.S - np.eye(self.m.nz)*self.m.wv2[5,5]
Minv = np.zeros_like(M)
detM = np.linalg.det(M)
Minv[0,0] = M[1,1]*M[2,2] - M[1,2]*M[2,1]
Minv[0,1] = M[0,2]*M[2,1] - M[0,1]*M[2,2]
Minv[0,2] = M[0,1]*M[1,2] - M[0,2]*M[1,1]
Minv[1,0] = M[1,2]*M[2,0] - M[1,0]*M[2,2]
Minv[1,1] = M[0,0]*M[2,2] - M[0,2]*M[2,0]
Minv[1,2] = M[0,2]*M[1,0] - M[0,0]*M[1,2]
Minv[2,0] = M[1,0]*M[2,1] - M[1,1]*M[2,0]
Minv[2,1] = M[0,1]*M[2,0] - M[0,0]*M[2,1]
Minv[2,2] = M[0,0]*M[1,1] - M[0,1]*M[1,0]
Minv = Minv/detM
np.testing.assert_allclose(self.m.a[:,:,5,5], Minv,atol=self.atol,
err_msg= ' Unmatched inversion matrix ')
if __name__ == "__main__":
unittest.main()
|
from django.conf import settings
from django.urls import reverse
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def send_mail(self, template_prefix, email, context):
context[
"activate_url"
] = settings.CLIENT_REDIRECT_DOMAIN + "confirm-email/{}/".format(context["key"])
msg = self.render_mail(template_prefix, email, context)
msg.send()
# def get_email_confirmation_url(self, request, emailconfirmation):
# url = reverse(
# "account_confirm_email",
# args=[emailconfirmation.key])
# return settings.CLIENT_REDIRECT_DOMAIN + url
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request, sociallogin):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
|
from adapters.adapter_with_battery import AdapterWithBattery
from devices.sensor.smoke import SmokeSensor
class GasSensorAdapter(AdapterWithBattery):
def __init__(self, devices):
super().__init__(devices)
self.devices.append(SmokeSensor(devices, 'gas', 'gas'))
|
import pandas as pd
DIR = "/data/damoncrockett/flickr_data/"
TAGS = DIR + "raw/autotags/"
ASTRO = DIR+"astrophotography/"
DATA = ASTRO + "astro_tag_exif.csv"
df = pd.read_csv(DATA)
ids = list(df['id'])
import os
import glob
counter = -1
for file in glob.glob(os.path.join(TAGS,"*")):
tmp = pd.read_table(file,sep="\t",header=None)
tmp = tmp[tmp[0].isin(ids)]
counter+=1
if counter==0:
metadata = tmp
else:
metadata = metadata.append(tmp)
print counter, len(tmp)
metadata.rename(columns = {0:"id",
1:"autotags"}, inplace=True)
metadata.set_index("id",inplace=True)
df = df.join(metadata,on="id")
df.to_csv(ASTRO+"astro_tag_exif_autotags.csv",index=False) |
import pymysql
from Database import Database
database = Database()
_host = database.gethost()
_port = database.getport()
_sql_user = database.getuser()
_sql_password = database.getpassword()
_database = database.getdatabase()
# get username:string,password:string,useremail:string, return id
def register(login_name, username, password, useremail, bio):
connection = pymysql.connect(
_host, _port, _sql_user, _sql_password, _sql_password)
# End
try:
with connection.cursor() as cursor:
# Create a new record
# TODO add sql line in here
sql1 = "INSERT INTO `Auth` (`login_name`,`password`,`user_email`) VALUES (%s, %s, %s)"
cursor.execute(sql1, (login_name, password, useremail))
sql2 = "INSERT INTO `User` (`user_name`, `bio`) VALUES (%s, %s)"
cursor.execute(sql2, (username, bio))
# !connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
except Exception as e:
print("Wrong", e)
finally:
connection.close()
# Auth
def login(username, password): # get username:string,password:string, return id
connection = pymysql.connect(
_host, _port, _sql_user, _sql_password, _sql_password)
# End
try:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `Auth.user_id` FROM `Auth` WHERE `Auth.login_name` = %s AND Auth.`password` = %s"
cursor.execute(sql, (username, password))
result = cursor.fetchone()
return result
# TODO return user_id
except Exception as e:
print("Wrong", e)
finally:
connection.close()
def getuserinfo(id): # Auth:User_id , User:id TODO need to warp in to the login activity
connection = pymysql.connect(
_host, _port, _sql_user, _sql_password, _sql_password)
# End
try:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `*` FROM `User` WHERE `User`.id = %d"
cursor.execute(sql, (id))
results = cursor.fetchall()
for row in results:
user_name = row[1]
bio = row[2]
return {"user_name": user_name, "bio": bio}
except Exception as e:
print("Wrong", e)
finally:
connection.close()
def getcollection(id): # get userid return user activity
connection = pymysql.connect(
_host, _port, _sql_user, _sql_password, _sql_password)
# End
try:
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT Collection.activity_id FROM Collection WHERE Collection.user_id = %d"
cursor.execute(sql, (id))
# get user collention
result = cursor.fetchone()
return result
except Exception as e:
print("Wrong", e)
finally:
connection.close()
#! get act name ,introducation
# TODO add restor function
# def restoreuser(useremail):
# db = pymysql.connect(_host, _port, _sql_user, _sql_password, _sql_password)
# cursor = db.cursor()
# # TODO add sql line in here
# sql = ""
# # End
# cursor.execute(sql)
# db.close()
|
#encoding:utf-8
import urllib
import urllib2
import re
import thread
import time
URL = 'http://www.qiushibaike.com/hot/page/'
class Qsbk:
"""docstring for Qsbk"""
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = { 'User-Agent' : self.user_agent }
self.stories = []
self.enable = False
#根据传入的页数获取页码代码
def getPage(self, pageIndex):
try:
url = URL + str(pageIndex)
request = urllib2.Request(url, headers = self.headers)
response = urllib2.urlopen(request)
#将页面转化为utf-8编码
pageCode = response.read().decode('utf-8')
return pageCode
except urllib2.URLError as e:
if hasattr(e, "reason"):
print u'连接糗事百科失败,错误原因:', e.reason
return None
def getPageItems(self, pageIndex):
pageCode = self.getPage(pageIndex)
if not pageCode:
print "页面加载失败!"
return None
pattern = re.compile('<div.*?author clearfix">.*?<h2>(.*?)</h2>.*?content">.*?span>(.*?)</span>(.*?)number">(.*?)</i>.*?number">(.*?)</i>',re.S)
items = re.findall(pattern, pageCode)
pageStories = []
for item in items:
haveImg = re.search("img", item[2])
if not haveImg:
replaceBr = re.compile("<br/>")
text = re.sub(replaceBr, "\n", item[1])
#item[0]是作者,item[1]是内容,item[3]是点赞数,item[4]是评论数
pageStories.append([item[0].strip(), text.strip(), item[3].strip(), item[4].strip()])
return pageStories
#加载并提取页面的内容,加入到列表中
def loadPage(self):
if self.enable == True:
if len(self.stories) < 2:
#获取新一页
pageStories = self.getPageItems(self.pageIndex)
if pageStories:
self.stories.append(pageStories)
self.pageIndex += 1
def printPage(self, pageStories, page):
for story in pageStories:
#等待用户输入
input = raw_input()
#每当输入一次回车,判断一下是否加载新页面
self.loadPage()
#如果输入Q或q则退出
if input == "Q" or input == "q":
self.enable = False
return
print u'第%d页\t发布人:%s\t点赞数:%s\t评论数:%s\n%s' %(page, story[0], story[2], story[3], story[1])
def start(self):
print u'正在读取糗事百科,按回车查看新内容,输入Q或q退出'
#使能变量enable,开启程序
self.enable = True
#预加载页面
self.loadPage()
currentPage = 0
while self.enable:
if len(self.stories) > 0:
pageStories = self.stories[0]
currentPage += 1
del self.stories[0]
self.printPage(pageStories, currentPage)
spider = Qsbk()
spider.start() |
# Generated by Django 2.2.4 on 2019-12-01 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0010_auto_20191201_1353'),
]
operations = [
migrations.AlterField(
model_name='career',
name='time',
field=models.CharField(default='10:00/11:00', max_length=30, verbose_name='Horário'),
),
]
|
# -*- coding: utf-8 -*-
# 同餘式求解
# 任意模的同餘式的求解
from .NTLChineseRemainderTheorem import CHNRemainderTheorem
from .NTLCongruenceSimplification import congruenceSimplification
from .NTLExceptions import SolutionError
from .NTLGreatestCommonDivisor import greatestCommonDivisor
from .NTLPrimeFactorisation import primeFactorisation
from .NTLRepetiveSquareModulo import repetiveSquareModulo
from .NTLTrivialDivision import trivialDivision
from .NTLUtilities import jsrange
from .NTLValidations import int_check, list_check, pos_check
__all__ = ['polynomialCongruence',
'prmMCS', 'cpsMCS',
'prmMCSLite', 'prmMCSPro',
'makePolynomial', 'polyDerivative']
nickname = 'congsolve'
'''Usage sample:
# 1st set:
cgcExp = [20140515, 201405, 2014, 8, 6, 3, 1, 0]
cgcCoe = [20140515, 201495, 2014, 8, 1, 4, 1, 1]
modulo = 343
# 2nd set:
cgcExp = [2, 0]
cgcCoe = [1, -46]
modulo = 105
# 3rd set:
cgcExp = [2, 0]
cgcCoe = [1, -1219]
modulo = 2310
for ptr in range(len(cgcExp)):
print('%dx^%d' % (cgcCoe[ptr], cgcExp[ptr]), end=' ')
if ptr < len(cgcExp) - 1:
print('+', end=' ')
print('≡ 0 (mod %d)' % modulo)
remainder = congsolve(cgcExp, cgcCoe, modulo)
print('The solution of the above polynomial congruence is\n\tx ≡', end=' ')
for rst in remainder:
print('%d' % rst, end=' ')
print('(mod %d)' % modulo)
'''
def polynomialCongruence(cgcExp, cgcCoe, modulo):
list_check(cgcExp, cgcCoe)
int_check(modulo); pos_check(modulo)
if trivialDivision(modulo): # 判斷模的素性
remainder = prmMCS(cgcExp, cgcCoe, modulo) # 如為素數模則調用prmMCS()函數
else:
remainder = cpsMCS(cgcExp, cgcCoe, modulo) # 如為合數模則調用cpsMCS()函數
if len(remainder) == 0:
raise SolutionError('The polynomial congruence has no integral solution.')
return sorted(remainder)
# 素數模的同餘式求解
def prmMCS(cgcExp, cgcCoe, modulo):
remainder = []
# 同餘式簡化
(rmdExp, rmdCoe) = congruenceSimplification(cgcExp, cgcCoe, modulo)
polyCgc = makePolynomial(rmdExp, rmdCoe) # 將係數與指數數組生成多項式
r = lambda x: eval(polyCgc) # 用於計算多項式的取值
for x in jsrange(modulo): # 逐一驗算,如模為0則加入結果數組
if r(x) % modulo == 0:
remainder.append(x)
return remainder
# 生成多項式
def makePolynomial(expList, coeList):
polynomial = ''
for ptr in jsrange(len(expList)):
polynomial += str(coeList[ptr]) + '*x**' + str(expList[ptr])
if ptr < len(expList) - 1:
polynomial += ' + '
return polynomial
# 合數模的同餘式求解
def cpsMCS(cgcExp, cgcCoe, modulo):
# 分解模,以便判斷求解方式, 並生成因數表p與指數表q
(p, q) = primeFactorisation(modulo, wrap=True)
if len(p) == 1: # 若模為單素數的次冪,則調用prmMCSLite()函數求解
tmpMod = p[0]
tmpExp = q[0]
remainder = prmMCSLite(cgcExp, cgcCoe, tmpMod, tmpExp)
else: # 若模為多素數的次冪,則逐一對其素因數調用prmMCSLite()函數求解,再用中國剩餘定理處理
tmpRmd = []
tmpMod = []
for ptr in jsrange(len(p)):
tmpModVar = p[ptr]
tmpExpVar = q[ptr]
tmpMod.append(tmpModVar ** tmpExpVar)
tmpRmd.append(prmMCSLite(cgcExp, cgcCoe, tmpModVar, tmpExpVar))
# 用中國剩餘定理處理上述結果,得到最終結果
remainder = CHNRemainderTheorem(*zip(tmpMod, tmpRmd))
return remainder
# 單素數的次冪模同餘式求解
def prmMCSLite(cgcExp, cgcCoe, mod, exp):
# 獲取源素數模的同餘式的解
tmpRmd = prmMCS(cgcExp, cgcCoe, mod)
if exp == 1: return tmpRmd
# 作高次同餘式的提升,求出源素數次冪模的同餘式的解
remainder = prmMCSPro(cgcExp, cgcCoe, tmpRmd, mod, exp)
return remainder
# 高次同餘式的提升
def prmMCSPro(cgcExp, cgcCoe, rmd, mod, exp):
# 求取原同餘式的導式
(drvExp, drvCoe) = polyDerivative(cgcExp, cgcCoe)
polyDrv = makePolynomial(drvExp, drvCoe)
drv = lambda x: eval(polyDrv)
for tmpRmd in rmd:
# 尋找滿足(f'(x1),p)=1的x1
if greatestCommonDivisor(drv(tmpRmd), mod) == 1:
polyDrvMod = 0
# 用模重複平方法計算導式的值f'(x1) (mod p)
for ptr in jsrange(len(drvExp)):
polyDrvMod += repetiveSquareModulo(drvCoe[ptr]*tmpRmd, drvExp[ptr], mod)
x = tmpRmd
polyDrvMod = polyDrvMod % mod - mod
polyDrvRcp = 1 / polyDrvMod
break
for ctr in jsrange(0, exp):
poly = makePolynomial(cgcExp, cgcCoe)
fx = lambda x: eval(poly)
# t_(i-1) ≡ (-f(x_i)/p^i) * (f'(x1) (mod p)) (mod p)
t = ((-1 * fx(x) / (mod**ctr)) * polyDrvRcp) % mod
# x_i ≡ x_(i-1) + t_(i-1) * p^(i-1) (mod p^i)
x += (t * (mod**ctr)) % (mod**(ctr+1))
return x # remainder = x
# 求取多項式的導式
def polyDerivative(cgcExp, cgcCoe):
drvExp = []
drvCoe = []
for ptr in jsrange(len(cgcExp)):
if cgcExp[ptr] != 0: # 若該項次數不為0,即不為常數項,則需添加至導式中
drvExp.append(cgcExp[ptr] - 1) # 該項次數減一
drvCoe.append(cgcCoe[ptr] * cgcExp[ptr]) # 該項係數為原係數乘原次數
return drvExp, drvCoe
|
from sqlalchemy import create_engine, Integer, ForeignKey
from sqlalchemy import Column, String, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.sql import func
class IssuesDb():
def __init__(self, db_type, user_name, password, host, port, db_name):
self.db_type = db_type
self.user_name = user_name
self.password = password
self.host = host
self.port = port
self.db_name = db_name
self.initialize()
base = declarative_base()
def initialize(self):
db_string = "{0}://{1}:{2}@{3}:{4}/{5}".format(self.db_type,
self.user_name,
self.password,
self.host,
self.port,
self.db_name)
self.db = create_engine(db_string)
self.Session = sessionmaker(self.db)
def get_session(self):
session = self.Session()
return WrappedSession(session)
def create_defined_tables(self):
self.base.metadata.create_all(self.db)
def add_issue(self, session, repository, issue_number, link, status, creation_time, close_time, labels):
issue = Issue(repository=repository,
issue_number=issue_number,
link=link,
status=status,
creation_time=creation_time,
close_time=close_time,
labels=labels)
session.add(issue)
def get_all_issues(self, session):
query = session.query(Issue).all()
return query
def get_first_issue(self, session):
query = session.query(Issue).first()
return query
def delete_issue(self, session, id):
query = session.query(Issue).filter(Issue.id == id)
return query.delete()
def add_issue_file_change(self, session, issue_number, filename, good_code, bad_code):
issue_file_change = IssueFileChange(issue_number=issue_number,
filename=filename,
good_code=good_code,
bad_code=bad_code)
session.add(issue_file_change)
def get_all_issue_file_changes(self, session):
query = session.query(IssueFileChange).all()
return query
def get_first_issue_file_change(self, session):
query = session.query(IssueFileChange).first()
return query
def delete_issue_file_change(self, session, id):
query = session.query(IssueFileChange).filter(IssueFileChange.id == id)
return query.delete()
def commit_session(self, session):
session.commit()
class WrappedSession(object):
def __init__(self, session):
self.session = session
def __getattr__(self, key):
return getattr(self.session, key)
def __enter__(self):
return self.session
def __exit__(self, exc_type, exc_val, exc_tb):
self.session.rollback()
class Issue(IssuesDb.base):
__tablename__ = 'issues'
id = Column(Integer, primary_key=True, unique=True, nullable=False)
repository = Column(String)
issue_number = Column(Integer)
link = Column(String)
status = Column(String)
insertion_time = Column(DateTime(timezone=True), server_default=func.now())
creation_time = Column(DateTime(timezone=True))
close_time = Column(DateTime(timezone=True))
labels = Column(String)
class IssueFileChange(IssuesDb.base):
__tablename__ = 'issue_file_changes'
id = Column(Integer, primary_key=True, unique=True, nullable=False)
issue_id = Column(Integer, ForeignKey(Issue.id))
filename = Column(String)
good_code = Column(String)
bad_code = Column(String)
insertion_time = Column(DateTime(timezone=True), server_default=func.now())
issue = relationship(Issue, backref="file_changes") |
# 함수명: print_book_title, 매개변수: 없음, 리턴값: 없음
# 기능: 파이썬 교재명을 화면에 출력
def print_book_title() :
print('파이썬 정복')
# 함수명: print_book_publisher, 매개변수: 없음, 리턴값: 없음
# 기능: 파이썬 교재의 출판사명을 화면에 출력
def print_book_publisher() :
print('한빛미디어')
# print_book_title() 함수를 3회 호출하고
print_book_title()
print_book_title()
print_book_title()
'''
# case2
for _ in range(3) :
print_book_title()
'''
# print_bookr_publisher() 함수를 5회 호출한다.
print_book_publisher()
print_book_publisher()
print_book_publisher()
print_book_publisher()
print_book_publisher()
'''
# case2
for _ in range(5) :
print_book_publisher()
''' |
import os
print gv.InstrumentMgr
##print [h.name() for h in gv.InstrumentMgr._instrumentHandles]
##
print os.
print os.path.isfile('dummy.py')
print os.path.exists('dummy.py')
##
print gv.InstrumentMgr.loadInstrument(name=None, server=None, moduleFileOrDir='dummy.py', args=[], kwargs={}, new=False, forceReload=False) |
# Tasks
# Clean the data by replacing any missing values and removing duplicate rows.
# In this dataset, each customer is identified by a unique customer ID.
# The most recent version of a duplicated record should be retained.
# Explore the data by calculating summary and descriptive statistics for
# the features in the dataset, calculating correlations between features,
# and creating data visualizations to determine apparent relationships
# in the data.
# Based on your analysis of the customer data after removing all duplicate
# customer records, answer the questions below.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import seaborn as sns
# Import data for customer information
aw_custs = pd.read_csv('AdvWorksCusts.csv')
print( 'Load AdvWorksCusts.csv')
print( aw_custs.head(20) )
print( aw_custs.columns )
print( aw_custs.dtypes)
#print( (aw_custs.astype(np.object) == np.nan).any() )
for col in aw_custs.columns:
if aw_custs[col].dtype == object:
count = 0
count = [count + 1 for x in aw_custs[col] if type(x) == float]
print(col + ' ' + str(sum(count)))
#for i in range(20):
# print(aw_custs['MiddleName'][i], type(aw_custs['MiddleName'][i]))
# Drop column with too many missing values
aw_custs.drop(['Title', 'MiddleName', 'Suffix', 'AddressLine2'], axis = 1, inplace = True)
print(aw_custs.columns)
print(aw_custs.shape)
print(aw_custs['CustomerID'].unique().shape)
aw_custs.drop_duplicates(subset = 'CustomerID', keep = 'last', inplace = True)
print(aw_custs.shape)
aw_custs.to_csv('AdvWorksCusts_preped.csv', index=False)
aw_custs = pd.read_csv('AdvWorksCusts_preped.csv')
print( aw_custs.columns )
#print( aw_custs.groupby('Occupation').median() )
## new column 'age' by data collected date 1st January 1998 - 'BirthDate'
aw_custs['Age'] = (pd.to_datetime('1998-01-01') - pd.to_datetime(aw_custs['BirthDate'], errors='coerce')).astype('<m8[Y]')
## AgeGroup <25 , 25-45, 45-55, >55
aw_custs['AgeGroup'] = pd.cut(aw_custs['Age'], bins = [0,25,45,55,1000], \
labels = ['<25','25-45','45-55','>55'], right=False)
# Import data from AveMonthSpend
aw_ams = pd.read_csv('AW_AveMonthSpend.csv')
print('Load AW_AveMonthSpend.csv')
print( aw_ams.columns )
print( aw_ams.dtypes )
print( aw_ams.shape )
print( aw_ams['CustomerID'].unique().shape )
aw_ams.drop_duplicates(subset = 'CustomerID', keep = 'last', inplace = True)
print( aw_ams.shape )
print( aw_ams['CustomerID'].unique().shape )
aw_ams.to_csv('AW_AveMonthSpend_Preped.csv', index=False)
#
aw_ams = pd.read_csv('AW_AveMonthSpend_Preped.csv')
print(aw_ams.columns)
# Import data from AW_BikeBuyer.csv
aw_bb = pd.read_csv('AW_BikeBuyer.csv')
print('Load AW_BikeBuyer.csv')
print( aw_bb.columns )
print( aw_bb.dtypes )
print( aw_bb.shape )
print( aw_bb.CustomerID.unique().shape )
aw_bb.drop_duplicates(subset = 'CustomerID', keep = 'last', inplace = True)
print( aw_bb.shape )
print( aw_bb.CustomerID.unique().shape )
aw_bb.to_csv('AW_BikeBuyer_Preped.csv', index=False)
aw_bb = pd.read_csv('AW_BikeBuyer_Preped.csv')
print(aw_bb.describe())
aw_bb_counts = aw_bb['BikeBuyer'].value_counts()
print(aw_bb_counts)
# Join aw_custs aw_ams on CustomerID
aw_join=aw_custs.join(aw_ams.set_index('CustomerID'), on='CustomerID', how='inner')
print(aw_join.shape)
print(aw_join.columns)
print(aw_join.head(2))
print( aw_join.groupby(['Gender', 'AgeGroup']).mean() )
print( aw_join.groupby(['Gender', 'AgeGroup']).sum() )
print( aw_join.groupby('MaritalStatus').median() )
print( aw_join['NumberCarsOwned'].unique())
aw_join['NCarsGroup'] = pd.cut(aw_join['NumberCarsOwned'], \
bins = [-1,0,2,10], labels = ['No','1-2','>=3'], right=True)
print( aw_join[['NumberCarsOwned','NCarsGroup']].head())
print( aw_join.groupby('NCarsGroup').median() )
print( aw_join[['Gender','AveMonthSpend']].groupby('Gender').describe() )
aw_join['NChildrenAtHomeGroup'] = pd.cut(aw_join['NumberChildrenAtHome'], \
bins = [-1,0,100], labels = ['No','>=1'], right=True)
print( aw_join[['NChildrenAtHomeGroup','AveMonthSpend']].groupby('NChildrenAtHomeGroup').describe() )
aw_join=aw_join.join(aw_bb.set_index('CustomerID'), on='CustomerID', how='inner')
print( aw_join[['BikeBuyer','YearlyIncome']].groupby('BikeBuyer').median() )
print( aw_join[['BikeBuyer','NumberCarsOwned']].groupby('BikeBuyer').median() )
print( aw_join[['BikeBuyer','Occupation']].groupby('Occupation').count() )
print( aw_join[['BikeBuyer','Gender']].groupby('Gender').mean() )
print( aw_join[['BikeBuyer','MaritalStatus']].groupby('MaritalStatus').mean() )
print( aw_join[['CustomerID','Gender']].groupby('Gender').count() )
print( aw_join[['CustomerID','MaritalStatus']].groupby('MaritalStatus').count() )
aw_join.to_csv('AW_join.csv', index=False)
'''
def hist_plot(vals, lab):
## Distribution plot of Bike Buyer
sns.distplot(vals)
plt.title('Histogram of ' + lab)
plt.xlabel('Value')
plt.ylabel('Density')
#
hist_plot(aw_bb['
'''
|
class Process:
identifier = None
title = None
abstract = None
language = None
inputs = {}
outputs = {}
__inputs_defs__ = {}
__outputs_defs__ = {}
def __init__(self, identifier,
title = None,
abstract = None,
inputs = None,
outputs = None):
self.identifier = identifier
self.title = title
self.abstract = abstract
def add_input(self,inpt):
"""Register new input type.
inpt - pywps.process.Input
"""
self.__inputs_defs__[inpt.identifier] = inpt
def add_output(self,output):
"""Register new output type
"""
self.__outputs_defs__[output.identifier] = output
def get_input_type(self, identifier):
"""Returns input type of input registered under given identifier
identifier - String
returns "literal", "bbox", "complex" or None
"""
if identifier in self.__inputs_defs__.keys():
inpt = self.__inputs_defs__[identifier]
return inpt.type
else:
return None
|
from onegov.core.orm.types import UTCDateTime
from sedate import to_timezone
from sqlalchemy import Column
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.ext.mutable import MutableDict
class OccurrenceMixin:
""" Contains all attributes events and ocurrences share.
The ``start`` and ``end`` date and times are stored in UTC - that is, they
are stored internally without a timezone and are converted to UTC when
getting or setting, see :class:`UTCDateTime`. Use the properties
``localized_start`` and ``localized_end`` to get the localized version of
the date and times.
"""
#: Title of the event
title = Column(Text, nullable=False)
#: A nice id for the url, readable by humans
name = Column(Text)
#: Description of the location of the event
location = Column(Text, nullable=True)
#: Tags/Categories of the event
_tags = Column( # type:ignore[call-overload]
MutableDict.as_mutable(HSTORE), nullable=True, name='tags')
@property
def tags(self):
""" Tags/Categories of the event. """
return list(self._tags.keys()) if self._tags else []
@tags.setter
def tags(self, value):
self._tags = dict(((key.strip(), '') for key in value))
#: Timezone of the event
timezone = Column(String, nullable=False)
#: Start date and time of the event (of the first event if recurring)
start = Column(UTCDateTime, nullable=False)
@property
def localized_start(self):
""" The localized version of the start date/time. """
return to_timezone(self.start, self.timezone)
#: End date and time of the event (of the first event if recurring)
end = Column(UTCDateTime, nullable=False)
@property
def localized_end(self):
""" The localized version of the end date/time. """
return to_timezone(self.end, self.timezone)
|
# 19 May 2021
from datetime import datetime
from datetime import timedelta
# user input request format in 20 Dec 1978
date_input = input("Please enter you DOB in the format DD Mmm YYYY: ie. 20 Dec 1978")
# cast to a datetime object
date_object = datetime.strptime(date_input, '%d %b %Y')
# output some confirmation
print ("The year entered is ", date_object.year, "\n")
# do a calculation
my_age = datetime.today() - date_object
# show the result in different formats
print ("My exact age is ", my_age, "\n")
print ("My exact age just in days is ", my_age.days, "days\n")
print ("My exact age just in years is ", int(my_age.days/365), "years\n")
# add 10 days to my current age
print("In 10 days time my age will be ", datetime.today() + timedelta(days=10), ".\n")
# add my current age to today's date
print("I will be double my age in ", datetime.today()+ my_age, ".")
|
# Generated by Django 3.0.2 on 2020-03-28 14:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0006_auto_20200324_1551'),
]
operations = [
migrations.AlterField(
model_name='order',
name='bike',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Bikes'),
),
migrations.AlterField(
model_name='order',
name='car',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Cars'),
),
migrations.AlterField(
model_name='order',
name='customer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('Confirmed', 'Confirmed'), ('Delivered', 'Delivered'), ('Returned', 'Returned'), ('Canceled', 'Canceled'), ('Pending', 'Pending')], default='Pending', max_length=255),
),
]
|
import numpy as np
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression # 분류
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
# 1. 데이터
dataset = load_iris()
x = dataset.data
y = dataset.target
print(x.shape) # (150, 4)
print(y.shape) # (150,)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
for j in [MinMaxScaler, StandardScaler]:
scaler = j()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
print(j.__name__)
# 2. 모델
for i in [LinearSVC, SVC, KNeighborsClassifier, LogisticRegression, DecisionTreeClassifier, RandomForestClassifier]:
print()
model = i()
# 훈련
model.fit(x_train,y_train)
y_pred = model.predict(x_test)
# print('y_test :', y_test)
# print('y_pred :', y_pred)
result = model.score(x_test,y_test)
print(i.__name__ + '\'s score(acc) :', result)
acc = accuracy_score(y_test, y_pred)
print(i.__name__ + '\'s accuracy_score :', acc)
if j == StandardScaler:
break
print('=================================================================')
'''
MinMaxScaler
LinearSVC's score(acc) : 0.9666666666666667
LinearSVC's accuracy_score : 0.9666666666666667
SVC's score(acc) : 1.0
SVC's accuracy_score : 1.0
KNeighborsClassifier's score(acc) : 1.0
KNeighborsClassifier's accuracy_score : 1.0
LogisticRegression's score(acc) : 1.0
LogisticRegression's accuracy_score : 1.0
DecisionTreeClassifier's score(acc) : 1.0
DecisionTreeClassifier's accuracy_score : 1.0
RandomForestClassifier's score(acc) : 1.0
RandomForestClassifier's accuracy_score : 1.0
=================================================================
StandardScaler
LinearSVC's score(acc) : 1.0
LinearSVC's accuracy_score : 1.0
SVC's score(acc) : 1.0
SVC's accuracy_score : 1.0
KNeighborsClassifier's score(acc) : 1.0
KNeighborsClassifier's accuracy_score : 1.0
LogisticRegression's score(acc) : 1.0
LogisticRegression's accuracy_score : 1.0
DecisionTreeClassifier's score(acc) : 0.9666666666666667
DecisionTreeClassifier's accuracy_score : 0.9666666666666667
RandomForestClassifier's score(acc) : 1.0
RandomForestClassifier's accuracy_score : 1.0
'''
'''
Tensorflow's acc : 1.0
''' |
#this file should come after the bgprocess to show the questions in the time defined in trigger from the tempdata.csv temporary file
#it should not open more than once for the same question
#it should open a prompt ask the question, retrieve info and add it to the major diary.txt file.
#And then clean the datatemp file
import os
import datetime
import time
strv=""
# debug strv
strv= "almoco"
def mainbody():
# print("test")
def r():
with open("tempdata.csv", "r") as data:
# f.write(str(print(a, localq[a])))
for i in data:
global strv
strv = i
r()
# remove file and recreat it, to delete all data for next bgprocess
# strv variable is already stored in warn.py
os.remove("tempdata.csv")
open('tempdata.csv', 'w+')
print("")
print("DIARYHELPER MENU")
# print(" QUESTION IS:", strv)
print("[1] ANSWER QUESTION:")
print("[2] EXIT WITHOUT ANSWERING")
# a() is the answer
def a():
try:
# ma = menu answer
ma = input(":")
ma = int(ma)
b = ma - 1
except:
print("numbers only")
a()
def z1():
if ma == 1:
print(" QUESTION IS:", strv)
answer = input(": ")
print(answer, "' ok? , [1] yes, [0] No")
try:
# ma = menu answer1
afs = input(": ")
afs = int(afs)
b = afs - 1
except:
print("numbers only")
z1()
if afs == 1:
print("#1")
now = datetime.datetime.now()
snow = now.strftime("%x")
f = open("diary.txt","a") #opens file with name of "test.txt"
f.write(str(snow)+":"+" "+"question: "+strv+": "+'\n')
f.write(str(answer)+'\n')
f.close()
elif afs == 0:
print("#0")
print("")
print("")
mainbody()
print("##F")
elif ma == 2:
print("#2")
pass
else:
print("SHOULD BE 1 OR 2")
a()
z1()
a()
if __name__ == '__main__':
mainbody()
|
import numpy as np
from cs285.infrastructure import pytorch_util as ptu
from .base_policy import BasePolicy
from torch import nn
import torch
import pickle
def create_linear_layer(W, b) -> nn.Linear:
out_features, in_features = W.shape
linear_layer = nn.Linear(
in_features,
out_features,
)
linear_layer.weight.data = ptu.from_numpy(W.T)
linear_layer.bias.data = ptu.from_numpy(b[0])
return linear_layer
def read_layer(l):
assert list(l.keys()) == ['AffineLayer']
assert sorted(l['AffineLayer'].keys()) == ['W', 'b']
return l['AffineLayer']['W'].astype(np.float32), l['AffineLayer'][
'b'].astype(np.float32)
class LoadedGaussianPolicy(BasePolicy, nn.Module):
def __init__(self, filename, **kwargs):
super().__init__(**kwargs)
with open(filename, 'rb') as f:
data = pickle.loads(f.read())
self.nonlin_type = data['nonlin_type']
if self.nonlin_type == 'lrelu':
self.non_lin = nn.LeakyReLU(0.01)
elif self.nonlin_type == 'tanh':
self.non_lin = nn.Tanh()
else:
raise NotImplementedError()
policy_type = [k for k in data.keys() if k != 'nonlin_type'][0]
assert policy_type == 'GaussianPolicy', (
'Policy type {} not supported'.format(policy_type)
)
self.policy_params = data[policy_type]
assert set(self.policy_params.keys()) == {
'logstdevs_1_Da', 'hidden', 'obsnorm', 'out'
}
# Build the policy. First, observation normalization.
assert list(self.policy_params['obsnorm'].keys()) == ['Standardizer']
obsnorm_mean = self.policy_params['obsnorm']['Standardizer']['mean_1_D']
obsnorm_meansq = self.policy_params['obsnorm']['Standardizer'][
'meansq_1_D']
obsnorm_stdev = np.sqrt(
np.maximum(0, obsnorm_meansq - np.square(obsnorm_mean)))
print('obs', obsnorm_mean.shape, obsnorm_stdev.shape)
self.obs_norm_mean = nn.Parameter(ptu.from_numpy(obsnorm_mean))
self.obs_norm_std = nn.Parameter(ptu.from_numpy(obsnorm_stdev))
self.hidden_layers = nn.ModuleList()
# Hidden layers next
assert list(self.policy_params['hidden'].keys()) == ['FeedforwardNet']
layer_params = self.policy_params['hidden']['FeedforwardNet']
for layer_name in sorted(layer_params.keys()):
l = layer_params[layer_name]
W, b = read_layer(l)
linear_layer = create_linear_layer(W, b)
self.hidden_layers.append(linear_layer)
# Output layer
W, b = read_layer(self.policy_params['out'])
self.output_layer = create_linear_layer(W, b)
def forward(self, obs):
normed_obs = (obs - self.obs_norm_mean) / (self.obs_norm_std + 1e-6)
h = normed_obs
for layer in self.hidden_layers:
h = layer(h)
h = self.non_lin(h)
return self.output_layer(h)
##################################
def update(self, obs_no, acs_na, adv_n=None, acs_labels_na=None):
raise NotImplementedError("""
This policy class simply loads in a particular type of policy and
queries it. Do not try to train it.
""")
def get_action(self, obs):
if len(obs.shape) > 1:
observation = obs
else:
observation = obs[None, :]
observation = ptu.from_numpy(observation.astype(np.float32))
action = self(observation)
return ptu.to_numpy(action)
def save(self, filepath):
torch.save(self.state_dict(), filepath)
|
from onegov.core.templates import render_macro
from onegov.landsgemeinde.layouts import DefaultLayout
from onegov.landsgemeinde.models import AgendaItem
from onegov.landsgemeinde.models import Assembly
from onegov.landsgemeinde.models import Votum
from re import sub
def update_ticker(request, assembly, agenda_item=None, action='refresh'):
""" Updates the ticker.
Sends either a 'refresh' event to reload the whole ticker (in case the
assembly has been changed or an agenda item has been added/deleted) or
and 'update' event with the changed content of the agenda item.
Also sets the modified timestamp on the assembly used for the polling
fallback.
"""
assembly.stamp()
request.app.pages_cache.flush()
content = ''
if action == 'update' and agenda_item:
layout = DefaultLayout(request.app, request)
content = render_macro(
layout.macros['ticker_agenda_item'],
request,
{
'agenda_item': agenda_item,
'layout': layout,
}
)
content = sub(r'\s+', ' ', content)
content = content.replace('> ', '>').replace(' <', '<')
request.app.send_websocket({
'event': 'update',
'assembly': assembly.date.isoformat(),
'node': f'agenda-item-{agenda_item.number}',
'content': content
})
elif action == 'refresh':
request.app.send_websocket({
'event': 'refresh',
'assembly': assembly.date.isoformat(),
})
def ensure_states(item):
""" Ensure that all the states are meaningful when changing the state of
an assembly, agenda item or votum.
"""
def set_by_children(parent, children):
if all(x.state == 'scheduled' for x in children):
parent.state = 'scheduled'
elif all(x.state == 'completed' for x in children):
parent.state = 'completed'
else:
parent.state = 'ongoing'
def set_vota(vota, state):
for votum in vota:
votum.state = state
def set_agenda_items(agenda_items, state):
for agenda_item in agenda_items:
agenda_item.state = state
set_vota(agenda_item.vota, state)
if isinstance(item, Assembly):
if item.state in ('scheduled', 'completed'):
set_agenda_items(item.agenda_items, item.state)
if item.state == 'ongoing':
pass
if isinstance(item, AgendaItem):
assembly = item.assembly
prev = [x for x in assembly.agenda_items if x.number < item.number]
next = [x for x in assembly.agenda_items if x.number > item.number]
if item.state == 'scheduled':
set_vota(item.vota, 'scheduled')
set_agenda_items(next, 'scheduled')
set_by_children(assembly, assembly.agenda_items)
if item.state == 'ongoing':
set_agenda_items(prev, 'completed')
set_agenda_items(next, 'scheduled')
assembly.state = 'ongoing'
if item.state == 'completed':
set_vota(item.vota, 'completed')
set_agenda_items(prev, 'completed')
set_by_children(assembly, assembly.agenda_items)
if isinstance(item, Votum):
agenda_item = item.agenda_item
assembly = agenda_item.assembly
prev_v = [x for x in agenda_item.vota if x.number < item.number]
next_v = [x for x in agenda_item.vota if x.number > item.number]
prev_a = [
x for x in assembly.agenda_items if x.number < agenda_item.number
]
next_a = [
x for x in assembly.agenda_items if x.number > agenda_item.number
]
if item.state == 'scheduled':
set_vota(next_v, 'scheduled')
set_agenda_items(next_a, 'scheduled')
set_by_children(agenda_item, agenda_item.vota)
set_by_children(assembly, assembly.agenda_items)
if item.state == 'ongoing':
set_vota(prev_v, 'completed')
set_vota(next_v, 'scheduled')
set_agenda_items(prev_a, 'completed')
set_agenda_items(next_a, 'scheduled')
agenda_item.state = 'ongoing'
assembly.state = 'ongoing'
if item.state == 'completed':
set_vota(prev_v, 'completed')
set_agenda_items(prev_a, 'completed')
set_by_children(agenda_item, agenda_item.vota)
set_by_children(assembly, assembly.agenda_items)
|
import os
from day_6 import main
current_dir = os.path.dirname(os.path.abspath(__file__))
test_input_file = os.path.join(current_dir, 'test_input.txt')
def test_transform_input():
expected_list = [["a", "b", "c"], ["a", "b", "c"], ["a", "b", "c"], ["a"], ["b"]]
returned_dict = main.transform_input(test_input_file)
assert returned_dict == expected_list
assert len(returned_dict[0]) == 3
assert len(returned_dict[1]) == 3
assert len(returned_dict[2]) == 3
assert len(returned_dict[3]) == 1
assert len(returned_dict[4]) == 1
|
import math, random
import numpy as np
def sample_from_gaussian_mixture(batchsize, n_dim, n_labels):
if n_dim % 2 != 0:
raise Exception("n_dim must be a multiple of 2.")
def sample(x, y, label, n_labels):
shift = 1.4
r = 2.0 * np.pi / float(n_labels) * float(label)
new_x = x * math.cos(r) - y * math.sin(r)
new_y = x * math.sin(r) + y * math.cos(r)
new_x += shift * math.cos(r)
new_y += shift * math.sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.5
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, n_dim / 2))
y = np.random.normal(0, y_var, (batchsize, n_dim / 2))
z = np.empty((batchsize, n_dim), dtype=np.float32)
for batch in xrange(batchsize):
for zi in xrange(n_dim / 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], random.randint(0, n_labels - 1), n_labels)
return z
def sample_from_swiss_roll(batchsize, n_dim, n_labels):
def sample(label, n_labels):
uni = np.random.uniform(0.0, 3.0) / float(n_labels) + float(label) / float(n_labels)
r = math.sqrt(uni) * 3.0
rad = np.pi * 4.0 * math.sqrt(uni)
x = r * math.cos(rad)
y = r * math.sin(rad)
return np.array([x, y]).reshape((2,))
z = np.zeros((batchsize, n_dim), dtype=np.float32)
for batch in xrange(batchsize):
for zi in xrange(n_dim / 2):
z[batch, zi*2:zi*2+2] = sample(random.randint(0, n_labels - 1), n_labels)
return z |
# -*- coding: UTF-8 -*-
import sys, os
sys.dont_write_bytecode = True
import shutil
def listFile(dir, nameList) :
assert(os.path.isdir(dir))
for f in os.listdir(dir) :
p = os.path.join(dir,f)
if os.path.isfile(p) and f.endswith('.sc') :
nameList.append(p[2:].replace('\\', '/'))
elif os.path.isdir(p) :
listFile(p, nameList)
def headerLastMod() :
ret = 0
for f in os.listdir('.') :
if os.path.isfile(f) and (f.endswith('.sh') or f.endswith('.def')) :
ret = max(os.stat(f).st_mtime, ret)
return ret
if __name__ == '__main__' :
curDir = sys.argv[0].replace('\\', '/')
curDir = curDir[:curDir.rfind('/')+1]
os.chdir(curDir + '../shaders')
sys.path.append(".")
import fxc
fileList = []
listFile('.', fileList)
lastModTime = headerLastMod()
force = input("force all ? (y/n) : ")
vary = fxc.parseVaryDef('varying.predef')
macros = None
if len(sys.argv) > 1 :
macros = sys.argv[1]
force = force in ('y', 'Y', '1')
for f in fileList :
modTime = None
if not force :
modTime = os.stat(f).st_mtime
modTime = max(lastModTime, modTime)
scId = f[f.rfind('/')+4:-3]
if scId in vary :
v = vary[scId]
fxc.proc(f, v, macros, modTime)
#fxc.press_any_key_exit() |
'''
Problem Statement
Given an unsorted array of numbers, find the top ‘K’ frequently occurring numbers in it.
Example 1:
Input: [1, 3, 5, 12, 11, 12, 11], K = 2
Output: [12, 11]
Explanation: Both '11' and '12' apeared twice.
Example 2:
Input: [5, 12, 11, 3, 11], K = 2
Output: [11, 5] or [11, 12] or [11, 3]
Explanation: Only '11' appeared twice, all other numbers appeared once.
'''
from heapq import *
def find_topk_frequent_numbers(nums,K):
freq = {}
for n in nums:
freq[n] = freq.get(n,0) + 1
min_heap = []
for n, f in freq.items():
heappush(min_heap, (f, n))
if len(min_heap) > K:
heappop(min_heap)
result = []
while min_heap:
result.append(heappop(min_heap)[1])
return result
def main():
nums = [1, 3, 5, 12, 11, 12, 11]
K = 2
print(find_topk_frequent_numbers(nums,K))
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import requests
import time
import json
import os
def currentblock(akey):
# pulls current block number from API
try:
response = requests.get('https://api.polygonscan.com/api?module=proxy&action=eth_blockNumber&apikey=' + akey)
data = int(response.json()['result'][2:], 16)
except Exception:
print("Error in accessing API for current block number.")
return 0
return data
def startblock(address, akey):
# pulls block number of the first transaction at an address from API
try:
response = requests.get("https://api.polygonscan.com/api?module=account&action=tokentx&address=" + address + "&startblock=0&endblock=999999999999&sort=asc&apikey=" + akey)
except Exception:
print("Error in accessing API. Link may be out of date.")
return 0
else:
if response.status_code != 200:
print("API returned bad status " + str(response.status_code))
return 0
try:
data = int(response.json()['result'][0]['blockNumber'])
except Exception:
print('API response empty. Please retry.')
return 0
return data
def scanaddress(address, akey):
# scans address and compiles dictionary of all tokens/token contracts interacted with
block_inc = 3000
api_max = 10000
token_symbols, contract_addresses = set(), set()
block = startblock(address, akey)
end_block = currentblock(akey)
while block < end_block:
response = requests.get("https://api.polygonscan.com/api?module=account&action=tokentx&address=" + address + "&startblock=" + str(block) + "&endblock=" + str(block+block_inc-1) + "&sort=asc&apikey=" + akey)
if response.status_code != 200:
print('API returned bad status ' + str(response.status_code) + 'during reading loop.')
return
elif len(response.json()['result']) >= api_max:
block_inc = int(block_inc/2)
continue
for transaction in response.json()['result']:
token_symbols.add(transaction['tokenSymbol'])
contract_addresses.add(transaction['contractAddress'])
block += block_inc
return {x:y for x,y in zip(token_symbols, contract_addresses)}
def main():
api_key = 'ACXH82BAMGXAWA4FAD8SCHZKAWVCAG1TJH'
pooladdress = '0xb5f383998d4e58c140c15c441c75bb79170b6b45'
block_inc = 3000
api_max = 10000
end_block = currentblock(api_key)
time.sleep(0.5)
end_block = currentblock(api_key)
if os.path.isfile('tokendict.json'):
with open('tokendict.json', 'r') as f:
token_contract = json.loads(f.read())
else:
token_contract = scanaddress(pooladdress, api_key)
with open('tokendict.json', 'w') as f:
json.dump(token_contract, f)
if os.path.isfile('transactions.json'):
with open('transactions.json', 'r') as f:
results = json.loads(f.read())
block = int(results['last_block'])
else:
block = startblock(pooladdress, api_key)
results = {x:[] for x in token_contract}
results['last_block'] = block
if os.path.isfile('addresses.json'):
with open('addresses.json','r') as f:
addresses = set(json.loads(f.read()))
else:
addresses = set()
count = 0
while block < end_block:
response = requests.get("https://api.polygonscan.com/api?module=account&action=tokentx&address=" + pooladdress + "&startblock=" + str(block) + "&endblock=" + str(block+block_inc-1) + "&sort=asc&apikey=" + api_key)
if response.status_code != 200:
print('API returned bad status ' + str(response.status_code) + 'during reading loop.')
break
elif len(response.json()['result']) >= api_max:
block_inc = int(block_inc/2)
continue
for transaction in response.json()['result']:
[transaction.pop(x) for x in ['hash','nonce','blockHash','tokenName','transactionIndex','gas','gasPrice','gasUsed','cumulativeGasUsed','input','confirmations']]
results[transaction['tokenSymbol']].append(transaction)
results['last_block'] = transaction['blockNumber']
[addresses.add(x) for x in [transaction['to'], transaction['from']]]
count += 1
print('Reading block ' + str(block) + ' of ' + str(end_block))
block += block_inc
time.sleep(0.2)
print('Updated ' + str(count) + ' transaction entries')
with open('transactions.json', 'w') as f:
json.dump(results, f)
with open('addresses.json','w') as f:
json.dump(list(addresses), f)
if __name__ == '__main__':
main() |
# The Python list object is the most general sequence provided by the language. Lists arepositionally ordered collections of arbitrarily typed objects, and they have no fixed size.They are also mutable—unlike strings, lists can be modified in place by assignment tooffsets as well as a variety of list method calls. Accordingly, they provide a very flexibletool for representing arbitrary collections—lists of files in a folder, employees in acompany, emails in your inbox, and so on.
# A list of three different-type objects
items = [123, 'spam', 1.23]
# Number of items in the list
len(items)
# index by position
items[0]
# slice a list returns a new list
items[0:1]
# concat
items + [1, 2]
# repeate
items * 2
# add item in end of the lis
items.append('item')
# print(items)
# create an array from 0 to 99
# print(list(range(100))) |
# Generated by Django 3.2.5 on 2021-07-29 07:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('Phone_Number', models.BigIntegerField()),
('problem', models.TextField()),
('Time', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name='Modify',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('Lab_img', models.ImageField(upload_to='pics')),
('Pharmacy_img', models.ImageField(upload_to='pics')),
('equipments_img1', models.ImageField(upload_to='pics')),
('equipments_img2', models.ImageField(upload_to='pics')),
('number', models.BigIntegerField()),
('add', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='pharmacy_order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('District', models.CharField(choices=[(1, 'Anantapur'), (2, 'Chittoor'), (3, 'East Godavari'), (4, 'Guntur'), (5, 'YSR Kadapa'), (6, 'Krishna'), (7, 'Kurnool'), (8, 'Nellore'), (9, 'Prakasam'), (10, 'Srikakulam'), (11, 'Vijayanagaram'), (12, 'Visakapatnam'), (13, 'West Godavari')], max_length=50)),
('First_Name', models.CharField(max_length=50)),
('Last_Name', models.CharField(max_length=50)),
('Address', models.CharField(max_length=500)),
('pincode', models.IntegerField()),
('Phone_Number', models.PositiveBigIntegerField()),
('order', models.TextField()),
],
),
]
|
import pyalps
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pyalps.plot
import numpy as np
import pyalps.fit_wrapper as fw
from math import sqrt
#prepare the input parameters
parms = []
for j2 in [0.,1.]:
for t in np.linspace(0.01,1.0,20):
parms.append(
{
'LATTICE' : "coupled ladders",
'local_S' : 0.5,
'ALGORITHM' : 'loop',
'SEED' : 0,
'T' : t,
'J0' : 1 ,
'J1' : 1,
'J2' : j2,
'THERMALIZATION' : 5000,
'SWEEPS' : 50000,
'MODEL' : "spin",
'L' : 8,
'W' : 8
}
)
#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm8c',parms)
pyalps.runApplication('loop',input_file)
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm8c'),['Staggered Susceptibility','Susceptibility'])
susc1=pyalps.collectXY(data,x='T',y='Susceptibility', foreach=['J2'])
lines = []
for data in susc1:
pars = [fw.Parameter(1), fw.Parameter(1)]
data.y= data.y[data.x < 1]
data.x= data.x[data.x < 1]
f = lambda self, x, pars: (pars[0]()/np.sqrt(x))*np.exp(-pars[1]()/x)
fw.fit(None, f, pars, [v.mean for v in data.y], data.x)
prefactor = pars[0].get()
gap = pars[1].get()
print prefactor,gap
lines += plt.plot(data.x, f(None, data.x, pars))
lines[-1].set_label('$J_2=%.4s$: $\chi = \\frac{%.4s}{T}\exp(\\frac{-%.4s}{T})$' % (data.props['J2'], prefactor,gap))
plt.figure()
pyalps.plot.plot(susc1)
plt.xlabel(r'$T$')
plt.ylabel(r'$\chi$')
plt.title('Susceptibility')
plt.legend(loc='best')
plt.savefig('2D_heisenberg_ladder_7.eps',dpi=400)
|
import os
def get_root():
this_folder = os.path.dirname(__file__)
root_folder = os.path.realpath(f'{this_folder}/..')
return root_folder |
import mlrose_hiive
import numpy as np
import datetime
import matplotlib.pyplot as plt
from util import generate_graph
"""
np.random.seed(SEED)
N = 20
fitness = mlrose.Knapsack(weights=np.random.uniform(size=N), values=np.arange(1, N+1, 1),max_weight_pct=0.8)
problem_fit = mlrose.DiscreteOpt(length = N,fitness_fn = fitness,maximize = True,max_val = 2 # binary)
"""
# Count function evaluation counts
def ks_fitness_fn(state):
global eval_count
"""
N = 5
weights = [10, 5, 2, 8, 15]
values = [1, 2, 3, 4, 5]
max_weight_pct = 0.8
"""
N = 10
weights = [0.11133083, 0.21076757, 0.23296249, 0.15194456, 0.83017814, 0.40791941,
0.5557906, 0.74552394, 0.24849976, 0.9686594 ]
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
max_weight_pct = 0.8
fitness = mlrose_hiive.Knapsack(weights=weights, values=values, max_weight_pct=max_weight_pct)
eval_count += 1
return fitness.evaluate(state)
def rhc(problem, iterations, random_seed, graph_file, graph_title):
fitness = []
fit_time = []
fn_evals = []
global eval_count
for i in iterations:
eval_count = 0
start = datetime.datetime.now()
best_state, best_fitness, _ = mlrose_hiive.random_hill_climb(problem,
max_iters=i, random_state=random_seed)
finish = datetime.datetime.now()
fitness.append(best_fitness)
fit_time.append((finish - start).total_seconds())
fn_evals.append(eval_count)
plt.plot(iterations, fitness, label="Fitness score")
plt.legend(loc="best")
plt.grid()
generate_graph(graph_file + "rhc", graph_title + "Random Hill Climbing",
"Iterations", "Fitness")
print('Best score achieved: ', max(fitness))
index = fitness.index(max(fitness))
print('Time taken to achieve that: ', fit_time[index])
print('Function evaluations taken to achieve that: ', fn_evals[index])
def sa(problem, iterations, random_seed, graph_file, graph_title):
decays = [0.001, 0.002, 0.003, 0.004, 0.005]
best_score = []
time_taken = []
fn_evals_taken = []
# fig1, ax1 = plt.subplots()
# fig2, ax2 = plt.subplots()
global eval_count
for decay in decays:
schedule = mlrose_hiive.ArithDecay(init_temp=1.0, decay=decay)
fitness = []
fit_time = []
fn_evals = []
for i in iterations:
eval_count = 0
start = datetime.datetime.now()
# Solve using simulated annealing - attempt 1
best_state, best_fitness, _ = mlrose_hiive.simulated_annealing(problem, schedule=schedule,
max_iters=i, random_state=random_seed)
finish = datetime.datetime.now()
fn_evals.append(eval_count)
fitness.append(best_fitness)
fit_time.append((finish - start).total_seconds())
# print('iteration: ',i)
# print('best_state:', best_state)
# print('best_fitness: ', best_fitness)
best_score.append(max(fitness))
index = fitness.index(max(fitness))
time_taken.append(fit_time[index])
fn_evals_taken.append(fn_evals[index])
# print('index: ', index)
# print('time for that: ', fit_time[index])
plt.plot(iterations, fitness, label="Cooling = " + str(decay))
# ax2.plot(fn_evals, fitness, label="Cooling = " + str(decay))
plt.legend(loc="best")
plt.grid()
generate_graph(graph_file + "sa_iter", graph_title + "Simulated Annealing", "Iterations", "Fitness")
"""
ax2.legend(loc="best")
ax2.grid()
generate_graph("cp_sa_evals", "Continuous Peaks - Simulated Annealing", "Function evaluations", "Fitness")
"""
# Decays best_score and time_taken
plt.plot(decays, best_score)
plt.grid()
generate_graph(graph_file + "sa_decays", graph_title + "Simulated Annealing",
"Cooling Component", "Best Score Achieved")
plt.plot(decays, time_taken)
plt.grid()
generate_graph(graph_file + "sa_decay_time", graph_title + "Simulated Annealing",
"Cooling Component", "Time taken to achieve that")
plt.scatter(time_taken, best_score)
for i, txt in enumerate(decays):
plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
plt.legend(loc='best', title='Cooling Component')
plt.grid()
generate_graph(graph_file + "sa_scatter", graph_title + "Simulated Annealing",
"Time Taken", "Best Score achieved")
print('decays: ', decays)
print('Best scores reached: ', best_score)
print('Time taken to do that: ', time_taken)
print('Function evaluations taken: ', fn_evals_taken)
def ga(problem, iterations, random_seed, graph_file, graph_title):
mutation_prob = [0.1, 0.2, 0.3, 0.4, 0.5]
best_score = []
time_taken = []
fn_evals_taken = []
global eval_count
for m in mutation_prob:
fitness = []
fit_time = []
fn_evals = []
for i in iterations:
eval_count = 0
start = datetime.datetime.now()
best_state, best_fitness, _ = mlrose_hiive.genetic_alg(problem, mutation_prob=m,
max_iters=i, random_state=random_seed)
finish = datetime.datetime.now()
fitness.append(best_fitness)
fit_time.append((finish - start).total_seconds())
fn_evals.append(eval_count)
# Find the best score achieved in that mutation prob
best_score.append(max(fitness))
index = fitness.index(max(fitness))
# find the time that was taken to achieve that
time_taken.append(fit_time[index])
fn_evals_taken.append(fn_evals[index])
plt.plot(iterations, fitness, label="Mutation = " + str(m))
plt.legend(loc="best", title='Mutation Probability')
plt.grid()
generate_graph(graph_file + "ga", graph_title + "Genetic Algorithm", "Iterations", "Fitness")
# Decays best_score and time_taken
plt.plot(mutation_prob, best_score)
plt.grid()
generate_graph(graph_file + "ga_mut", graph_title + "Genetic Algorithm",
"Mutation Probability", "Best Score Achieved")
"""
plt.plot(mutation_prob, time_taken)
plt.grid()
generate_graph("cp_sa_decay_time", "Continuous Peaks - Genetic Algorithm", "Mutation Probability",
"Time taken to achieve that")
"""
plt.scatter(time_taken, best_score)
for i, txt in enumerate(mutation_prob):
plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
plt.legend(loc='best', title='Mutation Probability')
plt.grid()
generate_graph(graph_file + "ga_scatter", graph_title + "Genetic Algorithm",
"Time Taken", "Best Score achieved")
print('Mutation prob: ', mutation_prob)
print('Best scores reached: ', best_score)
print('Time taken to do that: ', time_taken)
print('Function evaluations taken: ', fn_evals_taken)
def mimic(problem, iterations, random_seed, graph_file, graph_title):
keep_pct = [0.1, 0.25, 0.50]
best_score = []
time_taken = []
fn_evals_taken = []
global eval_count
for k in keep_pct:
fitness = []
fit_time = []
fn_evals = []
for i in iterations:
eval_count = 0
start = datetime.datetime.now()
best_state, best_fitness, _ = mlrose_hiive.mimic(problem, keep_pct=k,
max_iters=i, random_state=random_seed)
finish = datetime.datetime.now()
fitness.append(best_fitness)
fit_time.append((finish - start).total_seconds())
fn_evals.append(eval_count)
# Find the best score achieved in that mutation prob
best_score.append(max(fitness))
index = fitness.index(max(fitness))
# find the time that was taken to achieve that
time_taken.append(fit_time[index])
fn_evals_taken.append(fn_evals[index])
plt.plot(iterations, fitness, label="keep_pct = " + str(k))
plt.legend(loc="best", title='Proportion of samples kept')
plt.grid()
generate_graph(graph_file + "mimic", graph_title + "MIMIC: ", "Iterations", "Fitness")
# Decays best_score and time_taken
plt.plot(keep_pct, best_score)
plt.grid()
generate_graph(graph_file + "mimic_pct", graph_title + "MIMIC",
"Proportion of samples kept", "Best Score Achieved")
"""
plt.plot(mutation_prob, time_taken)
plt.grid()
generate_graph("cp_sa_decay_time", "Continuous Peaks - Genetic Algorithm", "Mutation Probability",
"Time taken to achieve that")
"""
plt.scatter(time_taken, best_score)
for i, txt in enumerate(keep_pct):
plt.annotate(s=str(txt), xy=(time_taken[i], best_score[i]))
plt.legend(loc='best', title='Proportion of samples kept')
plt.grid()
generate_graph(graph_file + "mimic_scatter", graph_title + "MIMIC",
"Time Taken", "Best Score achieved")
print('Proportion of samples kept: ', keep_pct)
print('Best scores reached: ', best_score)
print('Time taken to do that: ', time_taken)
print('Function evaluations taken: ', fn_evals_taken)
if __name__ == "__main__":
# Initialize fitness function object using Custom function
fitness_fn = mlrose_hiive.CustomFitness(ks_fitness_fn)
# Define optimization problem object
N = 10
problem = mlrose_hiive.DiscreteOpt(length=N, fitness_fn=fitness_fn, maximize=True, max_val=2)
max_iters = 1500
iterations = range(0, max_iters, 50)
random_seed = 1
graph_file = 'ks_'
graph_title = 'Knapsack Problem - '
print('***************Knapsack Optimization Problem*****************')
# Random hill climbing
print('--------------Random Hill Climbing---------------')
rhc(problem, iterations, random_seed, graph_file, graph_title)
# simulate annealing
print('--------------Simulated Annealing---------------')
sa(problem, iterations, random_seed, graph_file, graph_title)
# Genetic Algorithm
print('--------------Genetic Algorithm---------------')
ga(problem,iterations,random_seed, graph_file, graph_title)
# MIMIC
print('--------------MIMIC---------------')
mimic(problem, iterations, random_seed, graph_file, graph_title)
|
#!/usr/bin/python2.7
import xml.etree.ElementTree as ET
from urllib2 import urlopen
import datetime
from time import time
import os.path
data_path = os.path.expanduser('~/bus/data')
uts_url = 'http://uts.pvta.com:81/InfoPoint/map/GetVehicleXml.ashx?RouteId=%s'
ntf_url = 'http://ntf.pvta.com:81/InfoPoint/map/GetVehicleXml.ashx?RouteId=%s'
routes = {}
routes.update({ r: uts_url%r for r in ['30', '31', '38', '39', '45', '46'] })
routes.update({ r: ntf_url%r for r in ['B43', 'R41', 'M40'] })
def fetch_route_vehicles(url):
f = urlopen(url)
d = ET.fromstring(f.read())
vehicles = []
for b in d:
lat, lon = float(b.attrib['lat']), float(b.attrib['lng'])
bus_n = b.attrib['name']
vehicles.append((bus_n, (lat,lon)))
return vehicles
for route,url in routes.items():
vehicles = fetch_route_vehicles(url)
t = time()
p = os.path.join(data_path, 'route%s' % route)
if not os.path.isdir(p):
os.makedirs(p)
f = open(os.path.join(p, datetime.date.today().strftime('%Y%m%d')), 'a')
for (bus_n, (lat,lon)) in vehicles:
f.write('%5s\t%5s\t%20d\t%2.10f\t%2.10f\n' % (bus_n, route, t, lat, lon))
|
"""
DS=int(input("Enter the marks"))
Micro=int(input("Enter the number"))
Al=int(input("Enter the marks"))
Java=int(input("Enter the marks"))
net=int(input("Enter the marks"))
n=(DS+Micro+Al+Java+net)*100/500
if 0<=n<40:
print("D")
elif 40<=n<60:
print("C")
elif 60<=n<70:
print("B")
elif 70<=n<80:
print("A")
elif 80<=n<100:
print("A+")
else:
print("invalid percentag")
"""
marks=[int(m) for m in input("Enter the number").split()]
per=sum(marks)*100/500
print(per) |
import random
import math
import pygame
from pygame.locals import *
from items.armor import base as armor
from items.weapon import base as weapon
class baseEnemy(object):
def __init__(self, name, lvl, hostile, x, y, img, ID):
self.name = name
self.lvl = lvl
self.img = pygame.image.load(img).convert_alpha()
self.imgFile = img
self.ID = ID
self.x = x
self.y = y
self.spawnObj = 0
self.spawnObjs = []
self.dmg = 0
self.hp = 1
self.xp = self.dmg + self.hp
# Sets hostility and if the enemy is hostile,
# it will set the entity to seeking
self.hostile = hostile
self.seeking = hostile
self.alive = True
def spawnObjFunc(self):
item = None
if self.spawnObj > random.random():
item = self.spawnObjs[random.randint(0, len(self.spawnObjs) - 1)]
return item
def getEnemyDistance(self, enemyX, enemyY):
distance = round(math.sqrt((enemyX - self.x)**2 + (enemyY - self.y)**2), 2)
return distance
def update(self, player, Map):
if self.hp < 1:
self.alive = False
if self.alive == True:
DIR = random.randint(0,5)
# If the entity is seaking the player, it will
# move towards the player's X or Y, depending
# on where the player is.
# 1 = right
# 2 = left
# 3 = down
# 4 = up
if self.seeking == True:
if abs(self.x - player.x) > abs(self.y - player.y):
if self.x < player.x - 1:
DIR = 1
elif self.x > player.x + 1:
DIR = 2
elif self.y < player.y - 1:
DIR = 3
elif self.y > player.y + 1:
DIR = 4
else:
# ADD ATTACK FUNCTION
self.attack(player)
DIR = 0
else:
if self.y < player.y - 1:
DIR = 3
elif self.y > player.y + 1:
DIR = 4
elif self.x < player.x - 1:
DIR = 1
elif self.x > player.x + 1:
DIR = 2
else:
# ADD ATTACK FUNCTION
self.attack(player)
DIR = 0
x = 0
while x<2:
if DIR == 1 and self.x < 20:
move = True
for layer in Map.layers:
if layer[self.x+1][self.y].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.x == self.x+1 and self.y == enemy.y:
move = False
if move == True:
self.x += 1
break
else:
if player.y < self.y:
DIR = 4
elif player.y > self.y:
DIR = 3
if DIR == 2 and self.x > 0:
move = True
for layer in Map.layers:
if layer[self.x-1][self.y].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.x == self.x-1 and self.y == enemy.y:
move = False
if move == True:
self.x -= 1
break
else:
if player.y < self.y:
DIR = 4
elif player.y > self.y:
DIR = 3
if DIR == 3 and self.y < 18:
move = True
for layer in Map.layers:
if layer[self.x][self.y+1].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.y == self.y+1 and self.x == enemy.x:
move = False
if move == True:
self.y += 1
break
else:
if player.x < self.x:
DIR = 2
elif player.x > self.x:
DIR = 1
if DIR == 4 and self.y > 0:
move = True
for layer in Map.layers:
if layer[self.x][self.y-1].walkable == False:
move = False
for enemy in Map.enemies:
if enemy.y == self.y-1 and self.x == enemy.x:
move = False
if move == True:
self.y -= 1
break
else:
if player.x < self.x:
DIR = 2
elif player.x > self.x:
DIR = 1
else:
pass
x += 1
def draw(self, window):
window.blit(self.img, (self.x*32, self.y*32))
def attack(self, enemy):
if self.dmg <= enemy.getMaxArmor():
minatt = 0
else:
minatt = self.dmg - enemy.getMaxArmor()
if self.dmg <= enemy.getMaxArmor():
maxatt = 1
else:
maxatt = self.dmg - enemy.getMaxArmor()
dmg = random.randint(minatt, maxatt)
enemy.hp -= dmg
print('Enemy HP: ' + str(self.hp) + ' | Enemy DMG:' + str(dmg))
class testEnemy(baseEnemy):
def __init__(self, name, lvl, hostile, x = 1, y = 1, ID = 0, img = 'img/test-enemy.png'):
super(testEnemy, self).__init__(name, lvl, hostile, x, y, img, ID)
self.dmg = self.lvl * 2
self.hp = self.lvl * 3
self.xp = self.hp + self.dmg
|
# Generated by Django 3.0.8 on 2020-07-23 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trips', '0005_auto_20200723_2323'),
]
operations = [
migrations.RemoveField(
model_name='trip',
name='status',
),
migrations.AddField(
model_name='tripguide',
name='status',
field=models.CharField(choices=[('PENDING', 'Pending for Acception'), ('ACCEPTED', 'Accepted by Guides'), ('REJECTED', 'Rejected by Guides'), ('IN PROGRESS', 'Planning/Guiding In Progress'), ('COMPLETED', 'Planning/Guiding Completed')], default='PENDING', max_length=32),
),
]
|
class Solution:
def multiply(self, num1: str, num2: str) -> str:
res = ''
if num1 == '0' or num2 == '0':
return '0'
lastsum = 0
for lastnum in num2[::-1]:
nowji = int(num1) * int(lastnum)
nowsum = nowji + lastsum
res = f"{nowsum % 10}" + res
lastsum = (nowsum - nowsum % 10) // 10
if lastsum != 0:
res = str(lastsum) + res
else:
pass
return res
print(Solution().multiply("666", "777"))
|
modo = raw_input("Cifrar(c) o descifrar(d)?: ")
cesar = input("Numero cesar: ")
cad = raw_input("Introduce cadena:")
def accion(modo, cadena, cesar):
"""Realiza el cifrado o descifrado de la cadena segun la eleccion del usuario"""
cifrado = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
if modo == "c":
# Paso por parametro la cadena de cifrado al reves ya que lo que quiero es aprovechar la capacidad de python para manejar indices negativos
cifrado = ",".join(reversed(cifrado))
cifrado = cifrado.split(",")
cadena_cifrada = cifrar(cadena, cesar, reversed(cifrado))
else:
if modo == "d":
cadena_cifrada = descifrar(cadena, cesar, cifrado)
return cadena_cifrada
def descifrar(cadena, cesar, cifrado):
"""Descifra la cadena introducida"""
cadena_cifrada = [] # Creo una lista en la que voy añadiendo los caracteres segun los cifro
for letra in cadena:
index = cifrado.index(letra) # Obtengo el indice del caracter en la cadena de cifrado
cadena_cifrada.append(cifrado[index-cesar]) # Le resto el numero cesar sin comprovar si daria un indice negativo puesto que quiero aprovechar esta caracteristica de python
return "".join(cadena_cifrada) # Convierto la lista cifrada en una cadena y hago que la funcion la devuelva
def cifrar(cadena, cesar, cifrado):
"""Cifra la cadena introducida"""
cadena_cifrada = [] # Creo una lista en la que voy añadiendo los caracteres segun los descifro
for letra in cadena:
indice = cifrado.index(letra) # Obtengo el indice del caracter en la cadena de cifrado
cadena_cifrada.append(cifrado[indice-cesar]) # Le resto el numero cesar sin comprovar si daria un indice negativo puesto que quiero aprovechar esta caracteristica de python
return "".join(cadena_cifrada) # Convierto la lista cifrada en una cadena y hago que la funcion la devuelva
accion(modo, cad, cesar)
print cadena
|
def formatuj(*args, **kwargs):
lista = []
wynik =''
for i in args:
for k, v in kwargs.items():
if f'${k}' in i:
lista.append(i.replace(f'${k}',str(v)))
for i in lista:
if lista.index(i) == len(lista)-1:
wynik += i
break
wynik += i + '\n'
return wynik
# form = []
# form_temp=[]
# wynik = ''
# for i in args:
# for k, v in kwargs.items():
# try:
# form_temp = i.split('$'+k)
# form.append(form_temp[0] + str(v) + form_temp[1])
# except:
# continue
#
# for i in form:
# if form.index(i) == len(form)-1:
# wynik += i
# break
# wynik += i + '\n'
#
# return wynik
print(formatuj('koszt $cena PLN','kwota $cena brutto', 'blabla $volvo blabla', cena=10, volvo=25))
def test_formatuj_podstawowy():
assert formatuj('koszt $cena PLN','kwota $cena brutto', cena=10) == 'koszt 10 PLN\nkwota 10 brutto'
def test_formatuj_wiecej_kluczy():
assert formatuj('koszt $cena PLN','kwota $cena brutto', 'blabla $volvo blabla', cena=10, volvo=25) == 'koszt 10 PLN\nkwota 10 brutto\nblabla 25 blabla' |
#!/bin/python3
import sys
# Grab the number of test cases
t = int(input().strip())
for a0 in range(t):
# Grab current test's set limit (n) and integer limit (k)
n,k = map(int, input().strip().split(' '))
# Loop over all combinations of a&b and print the largest value under k
currMax = 0
for a in range(1, n):
for b in range (a + 1, n + 1):
if a & b > currMax and a & b < k:
currMax = a & b
# print("A: " + str(a) + " B: " + str(b) + " A&B: " + str(a&b))
print(str(currMax))
# O(1) solution which takes advantage of the fact that we want to
# always try to get a value of k-1 or k-2 as the result.
# Very fast, but not very readable or maintainable...
# a = k - 1
# b = (~a) & -(~a)
#
# if (a | b) > n:
# print (str(a - 1))
# else:
# print (str(a))
|
from base64 import b64encode
from IPython.display import HTML
from PIL import Image
from tqdm import tqdm
import cv2
import numpy as np
import os
# helper function to display video
def display_avi_video(path):
'''Display video in Colab.'''
compressed_path = path.split('.')[0]
compressed_path = 'compressed_' + compressed_path + '.mp4'
if os.path.exists(compressed_path):
os.remove(compressed_path)
# Convert video
os.system(f"ffmpeg -i {path} -vcodec libx264 {compressed_path}")
# Show video
mp4 = open(compressed_path,'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
return HTML("""
<video width=400 controls>
<source src="%s" type="video/mp4">
</video>
""" % data_url)
def display_mp4_video(path):
# Show video
mp4 = open(path,'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
return HTML("""
<video width=400 controls>
<source src="%s" type="video/mp4">
</video>
""" % data_url)
# calculate the center of the bounding box predicted by the model
def center_distance(xyxy1, xyxy2):
'''Calculate the distance of the centers of the boxes.'''
a, b, c, d = xyxy1
x1 = int(np.mean([a, c]))
y1 = int(np.mean([b, d]))
e, f, g, h = xyxy2
x2 = int(np.mean([e, g]))
y2 = int(np.mean([f, h]))
dist = np.linalg.norm([x1 - x2, y1 - y2])
return dist, x1, y1, x2, y2
# detect the people in the frame without bird's eye processing
def detect_people_on_frame(img, confidence, distance):
'''Detect people on a frame and draw the rectangles and lines.'''
results = model([img[:, :, ::-1]]) # Pass the frame through the model and get the boxes
xyxy = results.xyxy[0].cpu().numpy() # xyxy are the box coordinates
# x1 (pixels) y1 (pixels) x2 (pixels) y2 (pixels) confidence class
# tensor([[7.47613e+02, 4.01168e+01, 1.14978e+03, 7.12016e+02, 8.71210e-01, 0.00000e+00],
# [1.17464e+02, 1.96875e+02, 1.00145e+03, 7.11802e+02, 8.08795e-01, 0.00000e+00],
# [4.23969e+02, 4.30401e+02, 5.16833e+02, 7.20000e+02, 7.77376e-01, 2.70000e+01],
# [9.81310e+02, 3.10712e+02, 1.03111e+03, 4.19273e+02, 2.86850e-01, 2.70000e+01]])
xyxy = xyxy[xyxy[:, 4] >= confidence] # Filter desired confidence
xyxy = xyxy[xyxy[:, 5] == 0] # Consider only people
xyxy = xyxy[:, :4]
colors = ['green']*len(xyxy)
for i in range(len(xyxy)):
for j in range(i+1, len(xyxy)):
# Calculate distance of the centers
dist, x1, y1, x2, y2 = center_distance(xyxy[i], xyxy[j])
if dist < distance:
# If dist < distance, boxes are red and a line is drawn
colors[i] = 'red'
colors[j] = 'red'
img = cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
for i, (x1, y1, x2, y2) in enumerate(xyxy):
# Draw the boxes
if colors[i] == 'green':
color = (0, 255, 0)
else:
color = (0, 0, 255)
img = cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
return img
# detect the people in the image without bird's eye processing and inference the result on image
def detect_people_on_picture(aPath, aConfidence, aDistance):
image_path = aPath
image = cv2.imread(image_path)
cv2.imshow(detect_people_on_frame(image, confidence = aConfidence, distance = aDistance))
# detect the people in the video without bird's eye processing
def detect_people_on_video(filename, confidence=0.9, distance=60):
'''Detect people on a video and draw the rectangles and lines.'''
# Capture video
cap = cv2.VideoCapture(filename)
# Get video properties
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if os.path.exists('output.avi'):
os.remove('output.avi')
out = cv2.VideoWriter('output.avi', fourcc, fps, (width, height))
# Iterate through frames and detect people
vidlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
with tqdm(total=vidlen) as pbar:
while cap.isOpened():
# Read a frame
ret, frame = cap.read()
# If it's ok
if ret == True:
frame = detect_people_on_frame(frame, confidence, distance)
# Write new video
out.write(frame)
pbar.update(1)
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
# calculate the euclidean distance between 2 points
def calculate_distance(point1, point2):
'''Calculate usual distance.'''
x1, y1 = point1
x2, y2 = point2
return np.linalg.norm([x1 - x2, y1 - y2])
# convert into birds-eye view
def convert_to_bird(centers, M):
'''Apply the perpective to the bird's-eye view.'''
centers = [cv2.perspectiveTransform(np.float32([[center]]), M) for center in centers.copy()]
centers = [list(center[0, 0]) for center in centers.copy()]
return centers
# perform detection using bird's-eye view on frame/image
def bird_detect_people_on_frame(img, confidence, distance, width, height, model,
region=None, dst=None):
violation_count = 0 # counter for number of violation
results = model([img[:, :, ::-1]]) # Pass the frame through the model and get the boxes
xyxy = results.xyxy[0].cpu().numpy() # xyxy are the box coordinates
xyxy = xyxy[xyxy[:, 4] >= confidence] # Filter desired confidence
xyxy = xyxy[xyxy[:, 5] == 0] # Consider only people
xyxy = xyxy[:, :4]
# Calculate the centers of the circles
# They will be the centers of the bottom of the boxes
centers = []
for x1, y1, x2, y2 in xyxy:
center = [np.mean([x1, x2]), y2]
centers.append(center)
# We create two transformations
if region is None:
# The region on the original image
region = np.float32([[width, 0], [width - 300, 1000], [0, 550], [width - 750, 0]])
if dst is None:
# The rectangle we want the image to be trasnformed to
dst = np.float32([[200, 0], [200, 600], [0, 600], [0, 0]])
# The first transformation is straightforward: the region to the rectangle
# as thin the example before
M = cv2.getPerspectiveTransform(region, dst)
# The second transformation is a trick, because, using the common transformation,
# we can't draw circles at left of the region.
# This way, we flip all things and draw the circle at right of the region,
# because we can do it.
region_flip = region*np.float32([-1, 1]) + np.float32([width, 0])
dst_flip = dst*np.float32([-1, 1]) + np.float32([width, 0])
M_flip = cv2.getPerspectiveTransform(region_flip, dst_flip)
# Convert to bird
# Now, the center of the circles will be positioned on the rectangle
# and we can calculate the usual distance
bird_centers = convert_to_bird(centers, M)
# We verify if the circles colide
# If so, they will be red
colors = ['green']*len(bird_centers)
for i in range(len(bird_centers)):
for j in range(i+1, len(bird_centers)):
dist = calculate_distance(bird_centers[i], bird_centers[j])
if dist < distance:
colors[i] = 'red'
colors[j] = 'red'
violation_count += 1
# We draw the circles
# Because we have two transformation, we will start with two empty
# images ("overlay" images) to draw the circles
overlay = np.zeros((3*width, 4*width, 3), np.uint8)
overlay_flip = np.zeros((3*width, 4*width, 3), np.uint8)
for i, bird_center in enumerate(bird_centers):
if colors[i] == 'green':
color = (0, 255, 0)
else:
color = (0, 0, 255)
x, y = bird_center
x = int(x)
y = int(y)
if x >= int(distance/2+15/2):
# If it's the case the circle is inside or at right of our region
# we can use the normal overlay image
overlay = cv2.circle(overlay, (x, y), int(distance/2),
color, 1, lineType=cv2.LINE_AA)
else:
# If the circle is at left of the region,
# we draw the circle inverted on the other overlay image
x = width - x
overlay_flip = cv2.circle(overlay_flip, (x, y), int(distance/2),
color, 1, lineType=cv2.LINE_AA)
# We apply the inverse transformation to the overlay
overlay = cv2.warpPerspective(overlay, M, (width, height),
cv2.INTER_NEAREST, cv2.WARP_INVERSE_MAP)
# We apply the inverse of the other transformation to the other overlay
overlay_flip = cv2.warpPerspective(overlay_flip, M_flip, (width, height),
cv2.INTER_NEAREST, cv2.WARP_INVERSE_MAP)
# Now we "unflip" what the second overlay
overlay_flip = cv2.flip(overlay_flip, 1)
# We add all images
img = cv2.addWeighted(img, 1, overlay, 1, 0)
img = cv2.addWeighted(img, 1, overlay_flip, 1, 0)
text = "Social Distancing Violations: {}".format(violation_count)
cv2.putText(img, text, (40, height - 75),
cv2.FONT_HERSHEY_DUPLEX, 2.5, (0, 0, 255), 3)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img, violation_count
# perform detection using bird's-eye view on video
def bird_detect_people_on_video(filename, confidence, distance, model):
total_count = []
# Capture video
cap = cv2.VideoCapture(filename)
# Get video properties
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
if os.path.exists('bird_output.avi'):
os.remove('bird_output.avi')
out = cv2.VideoWriter('bird_output.avi', fourcc, fps, (width, height))
# Iterate through frames
vidlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
with tqdm(total=vidlen) as pbar:
while cap.isOpened():
# Read frame
ret, frame = cap.read()
if ret == True:
# Detect people as a bird
frame, violation_count = bird_detect_people_on_frame(frame, confidence, distance,
width, height, model)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
total_count.append(violation_count)
# Write frame to new video
out.write(frame)
pbar.update(1)
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
return total_count
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.