code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/python3
#coding=utf-8
#Лицензия GPL v3
#Автор: Алексей Butylkus, https://vk.com/butpub, https://www.youtube.com/user/butylkus
import os, sys, time
import Adafruit_DHT as dht
from datetime import datetime
sensor = 11
pin = 14
while True:
humidity, temperature = dht.read_retry(sensor, pin)
if (humidity is not None) and (temperature is not None):
nowtime = datetime.strftime(datetime.now(), "%d.%m %H:%M:%S")
temperature = str(temperature)
humidity = str(humidity)
print(nowtime + "\t" + temperature + "C\t" + humidity +"%")
time.sleep(3)
| Butylkus/BuRasPi | 05-dht11.py | Python | gpl-3.0 | 621 |
#!/usr/bin/env python
# Plot a graph of Data which is comming in on the fly
# uses pylab
# Author: Norbert Feurle
# Date: 12.1.2012
# License: if you get any profit from this then please share it with me
import pylab
from pylab import *
xAchse=pylab.arange(0,100,1)
yAchse=pylab.array([0]*100)
fig = pylab.figure(1)
ax = fig.add_subplot(111)
ax.grid(True)
ax.set_title("Realtime Waveform Plot")
ax.set_xlabel("Time")
ax.set_ylabel("Amplitude")
ax.axis([0,100,-1.5,1.5])
line1=ax.plot(xAchse,yAchse,'-')
manager = pylab.get_current_fig_manager()
values=[]
values = [0 for x in range(100)]
Ta=0.01
fa=1.0/Ta
fcos=3.5
Konstant=cos(2*pi*fcos*Ta)
T0=1.0
T1=Konstant
def SinwaveformGenerator(arg):
global values,T1,Konstant,T0
#ohmegaCos=arccos(T1)/Ta
#print "fcos=", ohmegaCos/(2*pi), "Hz"
Tnext=((Konstant*T1)*2)-T0
if len(values)%100>70:
values.append(random()*2-1)
else:
values.append(Tnext)
T0=T1
T1=Tnext
def RealtimePloter(arg):
global values
CurrentXAxis=pylab.arange(len(values)-100,len(values),1)
line1[0].set_data(CurrentXAxis,pylab.array(values[-100:]))
ax.axis([CurrentXAxis.min(),CurrentXAxis.max(),-1.5,1.5])
manager.canvas.draw()
#manager.show()
timer = fig.canvas.new_timer(interval=50)
timer.add_callback(RealtimePloter, ())
timer2 = fig.canvas.new_timer(interval=50)
timer2.add_callback(SinwaveformGenerator, ())
timer.start()
timer2.start()
pylab.show() | dewtx29/python_ann | python/pydev_ann/Graph/RealTimeGraph/realtimeGraph4.py | Python | gpl-3.0 | 1,414 |
from django.db import models
# Create your models here.
class Currency(models.Model):
name = models.CharField(verbose_name='Название', max_length=255)
short_name = models.CharField(verbose_name='Сокращение', max_length=255)
is_main = models.BooleanField(verbose_name='Основная валюта', default=False)
def get_exchange_rate(self, date):
# Возвращает ближайший к date курс
return self.exchange_rate.filter(date__lte=date).order_by('-date')[:1]
@staticmethod
def get_main_currency():
try:
return Currency.objects.get(is_main=True)
except Currency.DoesNotExist:
return None
@staticmethod
def get_main_currency_id():
try:
return Currency.objects.get(is_main=True).id
except Currency.DoesNotExist:
return None
def __str__(self):
return self.name
class ExchangeRate(models.Model):
currency = models.ForeignKey(Currency, verbose_name='Валюта', related_name='exchange_rate')
rate = models.DecimalField(verbose_name='Курс', decimal_places=3, max_digits=10)
date = models.DateTimeField(verbose_name='Дата', auto_now_add=True)
| juntatalor/qexx | payment/models.py | Python | mit | 1,246 |
"""Management utilities."""
from fabric.contrib.console import confirm
from fabric.api import abort, env, local, settings, task, sudo, cd, lcd, put, run, prefix, get
from fabric.contrib.files import exists
from etapi.settings import ProdConfig
########## CONFIG
local_app_dir = '.'
local_config_dir = './config'
remote_app_home_dir = '/home/www'
remote_git_dir = '/home/git'
remote_app_dir = remote_app_home_dir + '/etapi'
remote_db_dir = '/var/apps/etapi/db'
remote_nginx_dir = '/etc/nginx/sites-enabled'
remote_supervisor_dir = '/etc/supervisor/conf.d'
env.user = 'pi'
env.hosts = ['etapi.duckdns.org']
env.activate = "source %s/%s" % (remote_app_dir, "env/bin/activate")
########## END CONFIG
########## HELPERS
def package_installed(pkg_name):
"""ref: http:superuser.com/questions/427318/#comment490784_427339"""
cmd_f = 'dpkg-query -l "%s" | grep -q ^.i'
cmd = cmd_f % (pkg_name)
with settings(warn_only=True):
result = sudo(cmd)
return result.succeeded
def yes_install(pkg_name):
"""ref: http://stackoverflow.com/a/10439058/1093087"""
sudo('apt-get --force-yes --yes install %s' % (pkg_name))
def install_node():
if exists("/opt/node") is False:
sudo("mkdir -p /opt/node")
with cd('/tmp'):
sudo("wget https://s3-eu-west-1.amazonaws.com/conoroneill.net/wp-content/uploads/2015/02/node-v0.12.0-linux-arm-pi.tar.gz")
sudo("tar xzf node-v0.12.0-linux-arm-pi.tar.gz")
sudo("cp -r node-v0.12.0-linux-arm-pi/* /opt/node")
sudo("rm -rf node-v0.12.0-linux-arm-pi*")
if exists("/usr/local/bin/node"):
sudo("rm /usr/local/bin/node")
if exists("/usr/local/bin/npm"):
sudo("rm /usr/local/bin/npm")
sudo("ln -s /opt/node/bin/node /usr/local/bin/node")
sudo("ln -s /opt/node/bin/npm /usr/local/bin/npm")
def add_remote():
"""
Add production Git repository to remotes
"""
with lcd(local_app_dir):
local('git remote add production pi@192.168.0.120:/home/git/etapi.git')
def create_db():
"""
Initialize the database.
"""
with cd(remote_app_dir):
with prefix(env.activate):
if exists(remote_db_dir + "/etapi.db") is False:
run('ETAPI_ENV=prod python manage.py createdb')
def drop_db():
"""
Drops the database.
"""
with cd(remote_app_dir):
with prefix(env.activate):
run('ETAPI_ENV=prod python manage.py dropdb')
def push_changes_to_production():
with lcd(local_app_dir):
local('git push production master')
def install_pip_requirements():
with cd(remote_app_dir):
with prefix(env.activate):
run('pip install -r requirements.txt')
def install_npm_packages():
with cd(remote_app_dir):
run('npm install')
def install_bower_packages():
with cd(remote_app_dir):
run('./node_modules/.bin/bower install')
def make_migrations():
with cd(remote_app_dir):
with prefix(env.activate):
run('ETAPI_ENV=prod python manage.py db upgrade')
def run_app():
""" Run the app! """
with cd(remote_app_dir):
sudo('supervisorctl start etapi')
def restart_app():
with cd(remote_app_dir):
sudo('supervisorctl restart etapi')
def copydb():
"""Get the production database"""
get(ProdConfig.DB_PATH, 'dev.db')
########## END HELPERS
########## BOOTSTRAP
PACKAGES = (
'python',
'python-dev',
'python-pip',
'python-virtualenv',
'nginx',
'gunicorn',
'supervisor',
'git',
)
def install_requirements():
""" Install required packages. """
sudo('apt-get update')
for package in PACKAGES:
if not package_installed(package):
yes_install(package)
def create_project_dir():
"""
1. Create required project directories and files
2. Create a virtualenv
"""
if exists(remote_app_home_dir) is False:
sudo('mkdir ' + remote_app_home_dir)
if exists(remote_app_dir) is False:
sudo('mkdir ' + remote_app_dir)
if exists(remote_app_dir + '/logs') is False:
sudo('mkdir ' + remote_app_dir + '/logs')
if exists(remote_db_dir) is False:
sudo('mkdir -p ' + remote_db_dir)
sudo('chown pi:pi ' + remote_db_dir + ' -R')
with cd(remote_app_dir):
if exists(remote_app_dir + '/env') is False:
sudo('virtualenv env')
# Change permissions
sudo('chown pi:pi ' + remote_app_home_dir + ' -R')
def configure_nginx():
"""
1. Remove default nginx config file
2. Create new config file
3. Setup new symbolic link
4. Copy local config to remote config
5. Restart nginx
"""
sudo('/etc/init.d/nginx start')
if exists('/etc/nginx/sites-enabled/default'):
sudo('rm /etc/nginx/sites-enabled/default')
if exists('/etc/nginx/sites-enabled/etapi') is False:
sudo('ln -s /etc/nginx/sites-available/etapi' +
' /etc/nginx/sites-enabled/etapi')
with lcd(local_config_dir):
with cd(remote_nginx_dir):
put('./etapi_nginx.conf', './etapi', use_sudo=True)
sudo('/etc/init.d/nginx restart')
def configure_supervisor():
"""
1. Create new supervisor config file
2. Copy local config to remote config
3. Register new command
"""
if exists('/etc/supervisor/conf.d/etapi.conf') is False:
with lcd(local_config_dir):
with cd(remote_supervisor_dir):
put('./etapi.conf', './', use_sudo=True)
sudo('supervisorctl reread')
sudo('supervisorctl update')
def configure_git():
"""
1. Setup bare Git repo
2. Create post-receive hook
"""
if exists(remote_git_dir) is False:
sudo('mkdir ' + remote_git_dir)
with cd(remote_git_dir):
sudo('mkdir etapi.git')
with cd('etapi.git'):
sudo('git init --bare')
with lcd(local_config_dir):
with cd('hooks'):
put('./post-receive', './', use_sudo=True)
sudo('chmod +x post-receive')
# Change permissions
sudo('chown pi:pi ' + remote_git_dir + ' -R')
def bootstrap():
install_requirements()
install_node()
# TODO: install bower
create_project_dir()
configure_nginx()
configure_supervisor()
configure_git()
push_changes_to_production()
install_pip_requirements()
create_db()
########## END BOOSTRAP
########## DEPLOYMENT
def pack():
# create a new source distribution as tarball
local('python setup.py sdist --formats=gztar', capture=False)
def deploy():
"""
1. Push changes to production
2. Install dependencies
3. Make migrations
3. Restart gunicorn via supervisor
"""
local("echo ------------------------")
local("echo DEPLOYING APP TO PRODUCTION")
local("echo Push changes to production")
push_changes_to_production()
# Install Python requirements
local("echo Installing Python requirements")
install_pip_requirements()
# Install NPM packages
local('echo Installing NPM packages')
#install_npm_packages()
# Install Bower packages
local('echo Installing Bower packages')
install_bower_packages()
# Make migrations
local('echo Make migrations')
make_migrations()
# Restart app
local('echo Restarting application')
restart_app()
local("echo DONE DEPLOYING APP TO PRODUCTION")
local("echo ------------------------")
########## END DEPLOYMENT
########## MANAGEMENT
def status():
""" Is our app live? """
sudo('supervisorctl status')
########## END MANAGEMENT
| hypebeast/etapi | fabfile.py | Python | bsd-3-clause | 7,752 |
#!/usr/bin/env python
"""
Copyright 2015 Scott Wales
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ftools import Parser, Format
def run_format(test):
parser = Parser().parse_string(test)
return str(Format(parser))
def test_basic():
test = """
PROGRAM foo
END PROGRAM
"""
expect = test
assert run_format(test) == expect
| ScottWales/ftools | test/format/test_base.py | Python | apache-2.0 | 892 |
import os
import os.path as op
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext import admin
from flask.ext.admin.contrib.sqla import ModelView
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['DATABASE_FILE'] = 'sample_db.sqlite'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + app.config['DATABASE_FILE']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Models
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(64))
email = db.Column(db.Unicode(64))
def __unicode__(self):
return self.name
class Page(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Unicode(64))
content = db.Column(db.UnicodeText)
def __unicode__(self):
return self.name
# Customized admin interface
class CustomView(ModelView):
list_template = 'list.html'
create_template = 'create.html'
edit_template = 'edit.html'
class UserAdmin(CustomView):
column_searchable_list = ('name',)
column_filters = ('name', 'email')
# Flask views
@app.route('/')
def index():
return '<a href="/admin/">Click me to get to Admin!</a>'
# Create admin with custom base template
admin = admin.Admin(app, base_template='layout.html')
# Add views
admin.add_view(UserAdmin(User, db.session))
admin.add_view(CustomView(Page, db.session))
def build_sample_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie','Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
last_names = [
'Brown', 'Smith', 'Patel', 'Jones', 'Williams', 'Johnson', 'Taylor', 'Thomas',
'Roberts', 'Khan', 'Lewis', 'Jackson', 'Clarke', 'James', 'Phillips', 'Wilson',
'Ali', 'Mason', 'Mitchell', 'Rose', 'Davis', 'Davies', 'Rodriguez', 'Cox', 'Alexander'
]
for i in range(len(first_names)):
user = User()
user.name = first_names[i] + " " + last_names[i]
user.email = first_names[i].lower() + "@example.com"
db.session.add(user)
sample_text = [
{
'title': "de Finibus Bonorum et Malorum - Part I",
'content': "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \
mollit anim id est laborum."
},
{
'title': "de Finibus Bonorum et Malorum - Part II",
'content': "Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque \
laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto \
beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur \
aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi \
nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, \
adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam \
aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam \
corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum \
iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum \
qui dolorem eum fugiat quo voluptas nulla pariatur?"
},
{
'title': "de Finibus Bonorum et Malorum - Part III",
'content': "At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium \
voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati \
cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id \
est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam \
libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod \
maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. \
Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet \
ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur \
a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis \
doloribus asperiores repellat."
}
]
for entry in sample_text:
page = Page()
page.title = entry['title']
page.content = entry['content']
db.session.add(page)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = op.realpath(os.path.dirname(__file__))
database_path = op.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_sample_db()
# Start app
app.run(debug=True)
| stevehof/location-ninja | lib/examples/layout/simple.py | Python | gpl-3.0 | 6,066 |
#!/usr/bin/env python
from generator.actions import Actions
import random
import struct
import string
import sys
class carstate():
def __init__(self):
self.speed = 0
self.fuel = 65535
self.rpm = 0
self.info_state = 0
self.info_volume = 0
self.car_power = 0
self.gear = TIACA.GEARS_PARK
self.gear_state = TIACA.GEARS_UNCLUTCH
self.window_state = TIACA.WINDOWS_UP
self.moonroof_state = TIACA.MOONROOF_CLOSED
self.trunk_state = TIACA.TRUNK_CLOSED
self.alarm_state = TIACA.ALARM_UNSET
self.trunk_load = 0
self.boom = False
self.bt_metadata = ""
def setPowerState(self, state):
self.car_power = state
def __str__(self):
return struct.pack("<BHhBBBBBBBBBI32s", self.speed, self.fuel, self.rpm, self.info_state, self.info_volume, self.car_power,
self.gear, self.gear_state, self.window_state, self.moonroof_state, self.trunk_state, self.alarm_state, self.trunk_load, self.bt_metadata)
class TIACA(Actions):
GEARS_ACCEL = 0x70
GEARS_SLOWDOWN = 0x71
GEARS_SHIFTUP = 0x72
GEARS_SHIFTDOWN = 0x73
GEARS_CLUTCH = 0x74
GEARS_UNCLUTCH = 0x75
GEARS_PARK = 0x10
GEARS_FIRST = 0x20
GEARS_SECOND = 0x30
GEARS_THIRD = 0x40
GEARS_FOURTH = 0x50
GEARS_FIFTH = 0x60
CAR_OFF = 0x10
CAR_ON = 0x20
CAR_ACC = 0x30
WINDOWS_DOWN = 0xf0
WINDOWS_UP = 0xe0
MOONROOF_OPEN = 0xd0
MOONROOF_CLOSED = 0xc0
TRUNK_OPEN = 0xb0
TRUNK_CLOSED = 0xa0
TRUNK_LOAD = 0x90
TRUNK_UNLOAD = 0x80
INFO_XM = 0x05
INFO_AM = 0x01
INFO_FM = 0x02
INFO_AUX = 0x03
INFO_BT = 0x04
INFO_VOLUP = 0x81
INFO_VOLDOWN = 0x91
INFO_OFF = 0xff
ALARM_UNSET = 0xf9
ALARM_SET = 0xf8
CHANGE_STATE = 0xcc
CAR_END = 0xdd
#b
#B
#h
#H
#i
#I unsigned
def start(self):
self.carstate = carstate()
# self.carstate.
pass
def carOff(self):
self.write("%c\n" % self.CAR_OFF)
if(self.carstate.gear == self.GEARS_PARK):
self.carstate.setPowerState(self.CAR_OFF)
def carOn(self):
self.write("%c\n" % self.CAR_ON)
self.carstate.setPowerState(self.CAR_ON)
def carAcc(self):
self.write("%c\n" % self.CAR_ACC)
if(self.carstate.car_power == self.CAR_OFF or (self.carstate.car_power == self.CAR_ON and self.carstate.gear == self.GEARS_PARK)):
self.carstate.setPowerState(self.CAR_ACC)
def end(self):
if(self.carstate.boom == True):
return
self.write("%c\n" % self.CAR_END)
self.read(length=len(str(self.carstate)), expect=str(self.carstate))
def windowsUp(self):
self.write("%c\n" % self.WINDOWS_UP)
self.carstate.window_state = self.WINDOWS_UP
def windowsDown(self):
self.write("%c\n" % self.WINDOWS_DOWN)
self.carstate.window_state = self.WINDOWS_DOWN
def changeInfo(self):
state = random.choice([self.INFO_OFF, self.INFO_AM, self.INFO_FM, self.INFO_XM, self.INFO_AUX])
self.write("%c\n" % state)
if(self.carstate.gear > self.GEARS_THIRD and self.carstate.speed > 30):
self.carstate.info_state = state
if(state == self.INFO_OFF):
self.carstate.info_volume = 0
def btInfo(self):
metadata = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(31))
tosend = struct.pack("<BB31s", int(self.INFO_BT), 31, metadata)
self.write(tosend + "\n")
if(self.carstate.gear > self.GEARS_THIRD and self.carstate.speed > 30):
self.carstate.bt_metadata = metadata
self.carstate.info_state = self.INFO_BT
def carPowered(self):
if(self.carstate.gear != self.GEARS_PARK):
amount = (self.carstate.rpm / 100) + self.carstate.speed + (self.carstate.trunk_load / 100)
self.carstate.fuel -= amount
if(self.carstate.window_state == self.WINDOWS_DOWN):
self.carstate.fuel -= 5;
if(self.carstate.moonroof_state == self.MOONROOF_OPEN):
self.carstate.fuel -= 2;
if(self.carstate.info_state != self.INFO_OFF or self.carstate.info_state != self.INFO_AM):
if(self.carstate.info_volume > 20):
self.carstate.speed += 1
self.carstate.rpm += 25
def openTrunk(self):
self.write("%c\n" % self.TRUNK_OPEN)
self.carstate.trunk_state = self.TRUNK_OPEN
def closeTrunk(self):
self.write("%c\n" % self.TRUNK_CLOSED)
self.carstate.trunk_state = self.TRUNK_CLOSED
def loadTrunk(self):
weight = random.choice(range(11,100))
tosend = struct.pack("<BB", int(self.TRUNK_LOAD), weight)
self.write(tosend + "\n")
self.carstate.trunk_load += weight
def unloadTrunk(self):
weight = random.choice(range(11,100))
tosend = struct.pack("<BB", int(self.TRUNK_UNLOAD), weight)
self.write(tosend + "\n")
if(self.carstate.trunk_load < weight):
self.carstate.trunk_load = 0
else:
self.carstate.trunk_load -= weight
def setAlarm(self):
self.write("%c\n" % self.ALARM_SET)
self.carstate.alarm_state = self.ALARM_SET
def unsetAlarm(self):
self.write("%c\n" % self.ALARM_UNSET)
self.carstate.alarm_state = self.ALARM_UNSET
def changeVolume(self):
if(self.carstate.info_state != self.INFO_OFF):
adjustDirection = random.choice([self.INFO_VOLUP, self.INFO_VOLDOWN])
amount = random.choice(range(10))
tosend = struct.pack("<BB", int(adjustDirection), amount)
self.write(tosend + "\n")
if(adjustDirection == self.INFO_VOLDOWN):
if(self.carstate.info_volume < amount):
self.carstate.info_volume = 0
else:
self.carstate.info_volume -= amount
else:
self.carstate.info_volume += amount
else:
self.write("???\n")
def alarmTrunk(self):
self.write("%c%c\n" % (self.ALARM_SET, self.TRUNK_OPEN))
self.read(expect="***** STACK SMASHING DETECTED! *****", delim="\n")
self.carstate.boom = True
def shiftUp(self):
tosend = ""
if(self.carstate.rpm > 500 and self.carstate.rpm < 6000):
if(self.carstate.gear < self.GEARS_FIFTH):
tosend += "%c" % self.GEARS_CLUTCH
while(self.carstate.rpm < 800):
tosend+= "%c" % self.GEARS_ACCEL
self.carstate.rpm += 50
tosend += "%c%c" % (self.GEARS_SHIFTUP, self.GEARS_UNCLUTCH)
while(self.carstate.speed < ((self.carstate.gear / 0x16) * 10)):
tosend+= "%c" % self.GEARS_ACCEL
self.carstate.speed+=1
tosend+="\n"
self.write(tosend)
self.carstate.rpm -= 750
self.carstate.gear += 0x10
self.carstate.gear_state = self.GEARS_UNCLUTCH
else:
self.write("@@@@@@@@@@@\n")
def shiftDown(self):
if(self.carstate.gear > self.GEARS_PARK):
self.write("%c" % self.GEARS_CLUTCH)
while(self.carstate.rpm > 5250):
self.write("%c" % self.GEARS_SLOWDOWN)
self.carstate.rpm-=25
self.write("%c%c\n" % (self.GEARS_SHIFTDOWN, self.GEARS_UNCLUTCH))
self.carstate.rpm+= 750
self.carstate.gear -= 0x10
self.carstate.gear_state = self.GEARS_UNCLUTCH
def revUp(self):
tosend = "?"
while(self.carstate.rpm < 550):
tosend+="%c" % self.GEARS_ACCEL
if(self.carstate.gear_state == self.GEARS_UNCLUTCH):
self.carstate.rpm+=25
self.carstate.speed+=1
else:
self.carstate.rpm+=50
self.write(tosend + "\n")
def speedUp(self):
self.write("%c\n" % self.GEARS_ACCEL)
if(self.carstate.gear_state == self.GEARS_UNCLUTCH):
self.carstate.speed+=1
self.carstate.rpm+=25
else:
self.carstate.rpm+=50
| f0rki/cb-multios | original-challenges/TIACA/poller/for-testing/machine.py | Python | mit | 7,012 |
import functools
import os
import threading
from collections import defaultdict
from funcy import cached_property, wrap_prop
from dvc.progress import DEFAULT_CALLBACK
from dvc.scheme import Schemes
from .fsspec_wrapper import ObjectFSWrapper
_AWS_CONFIG_PATH = os.path.join(os.path.expanduser("~"), ".aws", "config")
# pylint:disable=abstract-method
class BaseS3FileSystem(ObjectFSWrapper):
scheme = Schemes.S3
REQUIRES = {"s3fs": "s3fs", "boto3": "boto3"}
PARAM_CHECKSUM = "etag"
_GRANTS = {
"grant_full_control": "GrantFullControl",
"grant_read": "GrantRead",
"grant_read_acp": "GrantReadACP",
"grant_write_acp": "GrantWriteACP",
}
_TRANSFER_CONFIG_ALIASES = {
"max_queue_size": "max_io_queue",
"max_concurrent_requests": "max_concurrency",
"multipart_threshold": "multipart_threshold",
"multipart_chunksize": "multipart_chunksize",
}
def _split_s3_config(self, s3_config):
"""Splits the general s3 config into 2 different config
objects, one for transfer.TransferConfig and other is the
general session config"""
from boto3.s3.transfer import TransferConfig
from dvc.utils import conversions
config, transfer_config = {}, {}
for key, value in s3_config.items():
if key in self._TRANSFER_CONFIG_ALIASES:
if key in {"multipart_chunksize", "multipart_threshold"}:
# cast human readable sizes (like 24MiB) to integers
value = conversions.human_readable_to_bytes(value)
else:
value = int(value)
transfer_config[self._TRANSFER_CONFIG_ALIASES[key]] = value
else:
config[key] = value
# pylint: disable=attribute-defined-outside-init
self._transfer_config = TransferConfig(**transfer_config)
return config
def _load_aws_config_file(self, profile):
from botocore.configloader import load_config
# pylint: disable=attribute-defined-outside-init
self._transfer_config = None
config_path = os.environ.get("AWS_CONFIG_FILE", _AWS_CONFIG_PATH)
if not os.path.exists(config_path):
return {}
config = load_config(config_path)
profile_config = config["profiles"].get(profile or "default")
if not profile_config:
return {}
s3_config = profile_config.get("s3", {})
return self._split_s3_config(s3_config)
def _prepare_credentials(self, **config):
from dvc.config import ConfigError
from dvc.utils.flatten import flatten, unflatten
login_info = defaultdict(dict)
# credentials
login_info["key"] = config.get("access_key_id")
login_info["secret"] = config.get("secret_access_key")
login_info["token"] = config.get("session_token")
# session configuration
login_info["profile"] = config.get("profile")
login_info["use_ssl"] = config.get("use_ssl", True)
# extra client configuration
client = login_info["client_kwargs"]
client["region_name"] = config.get("region")
client["endpoint_url"] = config.get("endpointurl")
client["verify"] = config.get("ssl_verify")
# timeout configuration
config_kwargs = login_info["config_kwargs"]
config_kwargs["read_timeout"] = config.get("read_timeout")
config_kwargs["connect_timeout"] = config.get("connect_timeout")
# encryptions
additional = login_info["s3_additional_kwargs"]
additional["ServerSideEncryption"] = config.get("sse")
additional["SSEKMSKeyId"] = config.get("sse_kms_key_id")
additional["ACL"] = config.get("acl")
for grant_option, grant_key in self._GRANTS.items():
if config.get(grant_option):
if additional["ACL"]:
raise ConfigError(
"`acl` and `grant_*` AWS S3 config options "
"are mutually exclusive"
)
additional[grant_key] = config[grant_option]
# config kwargs
session_config = login_info["config_kwargs"]
session_config["s3"] = self._load_aws_config_file(
login_info["profile"]
)
shared_creds = config.get("credentialpath")
if shared_creds:
os.environ.setdefault("AWS_SHARED_CREDENTIALS_FILE", shared_creds)
if (
client["region_name"] is None
and session_config["s3"].get("region_name") is None
and os.getenv("AWS_REGION") is None
):
# Enable bucket region caching
login_info["cache_regions"] = config.get("cache_regions", True)
config_path = config.get("configpath")
if config_path:
os.environ.setdefault("AWS_CONFIG_FILE", config_path)
return unflatten(
{
key: value
for key, value in flatten(login_info).items()
if value is not None
}
)
@wrap_prop(threading.Lock())
@cached_property
def fs(self):
from s3fs import S3FileSystem as _S3FileSystem
return _S3FileSystem(**self.fs_args)
@classmethod
def _strip_protocol(cls, path: str) -> str:
from fsspec.utils import infer_storage_options
return infer_storage_options(path)["path"]
def unstrip_protocol(self, path):
return "s3://" + path.lstrip("/")
def _translate_exceptions(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as exc:
from s3fs.errors import translate_boto_error
raise translate_boto_error(exc)
return wrapper
class S3FileSystem(BaseS3FileSystem): # pylint:disable=abstract-method
@wrap_prop(threading.Lock())
@cached_property
def s3(self):
import boto3
login_info = self.fs_args
client_kwargs = login_info.get("client_kwargs", {})
session_opts = {
"profile_name": login_info.get("profile"),
"region_name": client_kwargs.get("region_name"),
}
if "key" in login_info:
session_opts["aws_access_key_id"] = login_info["key"]
if "secret" in login_info:
session_opts["aws_secret_access_key"] = login_info["secret"]
if "token" in login_info:
session_opts["aws_session_token"] = login_info["token"]
session = boto3.session.Session(**session_opts)
return session.resource(
"s3",
endpoint_url=client_kwargs.get("endpoint_url"),
use_ssl=login_info["use_ssl"],
verify=client_kwargs.get("verify"),
)
def _get_obj(self, path):
bucket_name, key, _ = self.fs.split_path(path)
bucket = self.s3.Bucket(bucket_name)
return bucket.Object(key)
@_translate_exceptions
def put_file(
self, from_file, to_info, callback=DEFAULT_CALLBACK, **kwargs
):
callback.set_size(os.path.getsize(from_file))
obj = self._get_obj(to_info)
obj.upload_file(
from_file,
Callback=callback.relative_update,
ExtraArgs=self.fs_args.get("s3_additional_kwargs"),
Config=self._transfer_config,
)
self.fs.invalidate_cache(self.path.parent(to_info))
@_translate_exceptions
def get_file(
self, from_info, to_info, callback=DEFAULT_CALLBACK, **kwargs
):
obj = self._get_obj(from_info)
callback.set_size(obj.content_length)
obj.download_file(to_info, Callback=callback.relative_update)
| efiop/dvc | dvc/fs/s3.py | Python | apache-2.0 | 7,787 |
#!/usr/bin/env python
"""
Local Schedule Object
"""
import calendar
from time import mktime as _mktime
from ..debugging import bacpypes_debugging, ModuleLogger
from ..core import deferred
from ..task import OneShotTask
from ..primitivedata import Atomic, Null, Unsigned, Date, Time
from ..constructeddata import Array
from ..object import get_datatype, ScheduleObject
from .object import CurrentPropertyListMixIn
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# match_date
#
def match_date(date, date_pattern):
"""
Match a specific date, a four-tuple with no special values, with a date
pattern, four-tuple possibly having special values.
"""
# unpack the date and pattern
year, month, day, day_of_week = date
year_p, month_p, day_p, day_of_week_p = date_pattern
# check the year
if year_p == 255:
# any year
pass
elif year != year_p:
# specific year
return False
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the day
if day_p == 255:
# any day
pass
elif day_p == 32:
# last day of the month
last_day = calendar.monthrange(year + 1900, month)[1]
if day != last_day:
return False
elif day_p == 33:
# odd days of the month
if (day % 2) == 0:
return False
elif day_p == 34:
# even days of the month
if (day % 2) == 1:
return False
elif day != day_p:
# specific day
return False
# check the day of week
if day_of_week_p == 255:
# any day of the week
pass
elif day_of_week != day_of_week_p:
# specific day of the week
return False
# all tests pass
return True
#
# match_date_range
#
def match_date_range(date, date_range):
"""
Match a specific date, a four-tuple with no special values, with a DateRange
object which as a start date and end date.
"""
return (date[:3] >= date_range.startDate[:3]) \
and (date[:3] <= date_range.endDate[:3])
#
# match_weeknday
#
def match_weeknday(date, weeknday):
"""
Match a specific date, a four-tuple with no special values, with a
BACnetWeekNDay, an octet string with three (unsigned) octets.
"""
# unpack the date
year, month, day, day_of_week = date
last_day = calendar.monthrange(year + 1900, month)[1]
# unpack the date pattern octet string
weeknday_unpacked = [ord(c) for c in weeknday]
month_p, week_of_month_p, day_of_week_p = weeknday_unpacked
# check the month
if month_p == 255:
# any month
pass
elif month_p == 13:
# odd months
if (month % 2) == 0:
return False
elif month_p == 14:
# even months
if (month % 2) == 1:
return False
elif month != month_p:
# specific month
return False
# check the week of the month
if week_of_month_p == 255:
# any week
pass
elif week_of_month_p == 1:
# days numbered 1-7
if (day > 7):
return False
elif week_of_month_p == 2:
# days numbered 8-14
if (day < 8) or (day > 14):
return False
elif week_of_month_p == 3:
# days numbered 15-21
if (day < 15) or (day > 21):
return False
elif week_of_month_p == 4:
# days numbered 22-28
if (day < 22) or (day > 28):
return False
elif week_of_month_p == 5:
# days numbered 29-31
if (day < 29) or (day > 31):
return False
elif week_of_month_p == 6:
# last 7 days of this month
if (day < last_day - 6):
return False
elif week_of_month_p == 7:
# any of the 7 days prior to the last 7 days of this month
if (day < last_day - 13) or (day > last_day - 7):
return False
elif week_of_month_p == 8:
# any of the 7 days prior to the last 14 days of this month
if (day < last_day - 20) or (day > last_day - 14):
return False
elif week_of_month_p == 9:
# any of the 7 days prior to the last 21 days of this month
if (day < last_day - 27) or (day > last_day - 21):
return False
# check the day
if day_of_week_p == 255:
# any day
pass
elif day_of_week != day_of_week_p:
# specific day
return False
# all tests pass
return True
#
# date_in_calendar_entry
#
@bacpypes_debugging
def date_in_calendar_entry(date, calendar_entry):
if _debug: date_in_calendar_entry._debug("date_in_calendar_entry %r %r", date, calendar_entry)
match = False
if calendar_entry.date:
match = match_date(date, calendar_entry.date)
elif calendar_entry.dateRange:
match = match_date_range(date, calendar_entry.dateRange)
elif calendar_entry.weekNDay:
match = match_weeknday(date, calendar_entry.weekNDay)
else:
raise RuntimeError("")
if _debug: date_in_calendar_entry._debug(" - match: %r", match)
return match
#
# datetime_to_time
#
def datetime_to_time(date, time):
"""Take the date and time 4-tuples and return the time in seconds since
the epoch as a floating point number."""
if (255 in date) or (255 in time):
raise RuntimeError("specific date and time required")
time_tuple = (
date[0]+1900, date[1], date[2],
time[0], time[1], time[2],
0, 0, -1,
)
return _mktime(time_tuple)
#
# LocalScheduleObject
#
@bacpypes_debugging
class LocalScheduleObject(CurrentPropertyListMixIn, ScheduleObject):
def __init__(self, **kwargs):
if _debug: LocalScheduleObject._debug("__init__ %r", kwargs)
# make sure present value was provided
if 'presentValue' not in kwargs:
raise RuntimeError("presentValue required")
if not isinstance(kwargs['presentValue'], Atomic):
raise TypeError("presentValue must be an Atomic value")
# continue initialization
ScheduleObject.__init__(self, **kwargs)
# attach an interpreter task
self._task = LocalScheduleInterpreter(self)
# add some monitors to check the reliability if these change
for prop in ('weeklySchedule', 'exceptionSchedule', 'scheduleDefault'):
self._property_monitors[prop].append(self._check_reliability)
# check it now
self._check_reliability()
def _check_reliability(self, old_value=None, new_value=None):
"""This function is called when the object is created and after
one of its configuration properties has changed. The new and old value
parameters are ignored, this is called after the property has been
changed and this is only concerned with the current value."""
if _debug: LocalScheduleObject._debug("_check_reliability %r %r", old_value, new_value)
try:
schedule_default = self.scheduleDefault
if schedule_default is None:
raise ValueError("scheduleDefault expected")
if not isinstance(schedule_default, Atomic):
raise TypeError("scheduleDefault must be an instance of an atomic type")
schedule_datatype = schedule_default.__class__
if _debug: LocalScheduleObject._debug(" - schedule_datatype: %r", schedule_datatype)
if (self.weeklySchedule is None) and (self.exceptionSchedule is None):
raise ValueError("schedule required")
# check the weekly schedule values
if self.weeklySchedule:
for daily_schedule in self.weeklySchedule:
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleObject._debug(" - daily time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
elif 255 in time_value.time:
if _debug: LocalScheduleObject._debug(" - wildcard in time")
raise ValueError("must be a specific time")
# check the exception schedule values
if self.exceptionSchedule:
for special_event in self.exceptionSchedule:
for time_value in special_event.listOfTimeValues:
if _debug: LocalScheduleObject._debug(" - special event time_value: %r", time_value)
if time_value is None:
pass
elif not isinstance(time_value.value, (Null, schedule_datatype)):
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
schedule_datatype,
time_value.__class__,
)
raise TypeError("wrong type")
# check list of object property references
obj_prop_refs = self.listOfObjectPropertyReferences
if obj_prop_refs:
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
raise RuntimeError("no external references")
# get the datatype of the property to be written
obj_type = obj_prop_ref.objectIdentifier[0]
datatype = get_datatype(obj_type, obj_prop_ref.propertyIdentifier)
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if issubclass(datatype, Array) and (obj_prop_ref.propertyArrayIndex is not None):
if obj_prop_ref.propertyArrayIndex == 0:
datatype = Unsigned
else:
datatype = datatype.subtype
if _debug: LocalScheduleObject._debug(" - datatype: %r", datatype)
if datatype is not schedule_datatype:
if _debug: LocalScheduleObject._debug(" - wrong type: expected %r, got %r",
datatype,
schedule_datatype,
)
raise TypeError("wrong type")
# all good
self.reliability = 'noFaultDetected'
if _debug: LocalScheduleObject._debug(" - no fault detected")
except Exception as err:
if _debug: LocalScheduleObject._debug(" - exception: %r", err)
self.reliability = 'configurationError'
#
# LocalScheduleInterpreter
#
@bacpypes_debugging
class LocalScheduleInterpreter(OneShotTask):
def __init__(self, sched_obj):
if _debug: LocalScheduleInterpreter._debug("__init__ %r", sched_obj)
OneShotTask.__init__(self)
# reference the schedule object to update
self.sched_obj = sched_obj
# add a monitor for the present value
sched_obj._property_monitors['presentValue'].append(self.present_value_changed)
sched_obj._property_monitors['weeklySchedule'].append(self.schedule_changed)
sched_obj._property_monitors['exceptionSchedule'].append(self.schedule_changed)
# call to interpret the schedule
deferred(self.process_task)
def present_value_changed(self, old_value, new_value):
"""This function is called when the presentValue of the local schedule
object has changed, both internally by this interpreter, or externally
by some client using WriteProperty."""
if _debug: LocalScheduleInterpreter._debug("present_value_changed %s %s", old_value, new_value)
# if this hasn't been added to an application, there's nothing to do
if not self.sched_obj._app:
if _debug: LocalScheduleInterpreter._debug(" - no application")
return
# process the list of [device] object property [array index] references
obj_prop_refs = self.sched_obj.listOfObjectPropertyReferences
if not obj_prop_refs:
if _debug: LocalScheduleInterpreter._debug(" - no writes defined")
return
# primitive values just set the value part
new_value = new_value.value
# loop through the writes
for obj_prop_ref in obj_prop_refs:
if obj_prop_ref.deviceIdentifier:
if _debug: LocalScheduleInterpreter._debug(" - no externals")
continue
# get the object from the application
obj = self.sched_obj._app.get_object_id(obj_prop_ref.objectIdentifier)
if not obj:
if _debug: LocalScheduleInterpreter._debug(" - no object")
continue
# try to change the value
try:
obj.WriteProperty(
obj_prop_ref.propertyIdentifier,
new_value,
arrayIndex=obj_prop_ref.propertyArrayIndex,
priority=self.sched_obj.priorityForWriting,
)
if _debug: LocalScheduleInterpreter._debug(" - success")
except Exception as err:
if _debug: LocalScheduleInterpreter._debug(" - error: %r", err)
def schedule_changed(self, old_value, new_value):
"""This function is called when the weeklySchedule or the exceptionSchedule
property of the local schedule object has changed, both internally by
this interpreter, or externally by some client using WriteProperty."""
if _debug:
LocalScheduleInterpreter._debug(
"schedule_changed(%s) %s %s", self.sched_obj.objectName,
old_value, new_value,
)
# if this hasn't been added to an application, there's nothing to do
if not self.sched_obj._app:
if _debug: LocalScheduleInterpreter._debug(" - no application")
return
# real work done by process_task
self.process_task()
def process_task(self):
if _debug: LocalScheduleInterpreter._debug("process_task(%s)", self.sched_obj.objectName)
# check for a valid configuration
if self.sched_obj.reliability != 'noFaultDetected':
if _debug: LocalScheduleInterpreter._debug(" - fault detected")
return
# get the date and time from the device object in case it provides
# some custom functionality
if self.sched_obj._app and self.sched_obj._app.localDevice:
current_date = self.sched_obj._app.localDevice.localDate
if _debug: LocalScheduleInterpreter._debug(" - current_date: %r", current_date)
current_time = self.sched_obj._app.localDevice.localTime
if _debug: LocalScheduleInterpreter._debug(" - current_time: %r", current_time)
else:
# get the current date and time, as provided by the task manager
current_date = Date().now().value
if _debug: LocalScheduleInterpreter._debug(" - current_date: %r", current_date)
current_time = Time().now().value
if _debug: LocalScheduleInterpreter._debug(" - current_time: %r", current_time)
# evaluate the time
current_value, next_transition = self.eval(current_date, current_time)
if _debug: LocalScheduleInterpreter._debug(" - current_value, next_transition: %r, %r", current_value, next_transition)
### set the present value
self.sched_obj.presentValue = current_value
# compute the time of the next transition
transition_time = datetime_to_time(current_date, next_transition)
# install this to run again
self.install_task(transition_time)
def eval(self, edate, etime):
"""Evaluate the schedule according to the provided date and time and
return the appropriate present value, or None if not in the effective
period."""
if _debug: LocalScheduleInterpreter._debug("eval %r %r", edate, etime)
# reference the schedule object
sched_obj = self.sched_obj
if _debug: LocalScheduleInterpreter._debug(" sched_obj: %r", sched_obj)
# verify the date falls in the effective period
if not match_date_range(edate, sched_obj.effectivePeriod):
return None
# the event priority is a list of values that are in effect for
# exception schedules with the special event priority, see 135.1-2013
# clause 7.3.2.23.10.3.8, Revision 4 Event Priority Test
event_priority = [None] * 16
next_day = (24, 0, 0, 0)
next_transition_time = [None] * 16
# check the exception schedule values
if sched_obj.exceptionSchedule:
for special_event in sched_obj.exceptionSchedule:
if _debug: LocalScheduleInterpreter._debug(" - special_event: %r", special_event)
# check the special event period
special_event_period = special_event.period
if special_event_period is None:
raise RuntimeError("special event period required")
match = False
calendar_entry = special_event_period.calendarEntry
if calendar_entry:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
else:
# get the calendar object from the application
calendar_object = sched_obj._app.get_object_id(special_event_period.calendarReference)
if not calendar_object:
raise RuntimeError("invalid calendar object reference")
if _debug: LocalScheduleInterpreter._debug(" - calendar_object: %r", calendar_object)
for calendar_entry in calendar_object.dateList:
if _debug: LocalScheduleInterpreter._debug(" - calendar_entry: %r", calendar_entry)
match = date_in_calendar_entry(edate, calendar_entry)
if match:
break
# didn't match the period, try the next special event
if not match:
if _debug: LocalScheduleInterpreter._debug(" - no matching calendar entry")
continue
# event priority array index
priority = special_event.eventPriority - 1
if _debug: LocalScheduleInterpreter._debug(" - priority: %r", priority)
# look for all of the possible times
for time_value in special_event.listOfTimeValues:
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - relinquish exception @ %r", tval)
event_priority[priority] = None
next_transition_time[priority] = None
else:
if _debug: LocalScheduleInterpreter._debug(" - consider exception @ %r", tval)
event_priority[priority] = time_value.value
next_transition_time[priority] = next_day
else:
next_transition_time[priority] = tval
break
# assume the next transition will be at the start of the next day
earliest_transition = next_day
# check if any of the special events came up with something
for priority_value, next_transition in zip(event_priority, next_transition_time):
if next_transition is not None:
earliest_transition = min(earliest_transition, next_transition)
if priority_value is not None:
if _debug: LocalScheduleInterpreter._debug(" - priority_value: %r", priority_value)
return priority_value, earliest_transition
# start out with the default
daily_value = sched_obj.scheduleDefault
# check the daily schedule
if sched_obj.weeklySchedule:
daily_schedule = sched_obj.weeklySchedule[edate[3]]
if _debug: LocalScheduleInterpreter._debug(" - daily_schedule: %r", daily_schedule)
# look for all of the possible times
for time_value in daily_schedule.daySchedule:
if _debug: LocalScheduleInterpreter._debug(" - time_value: %r", time_value)
tval = time_value.time
if tval <= etime:
if isinstance(time_value.value, Null):
if _debug: LocalScheduleInterpreter._debug(" - back to normal @ %r", tval)
daily_value = sched_obj.scheduleDefault
else:
if _debug: LocalScheduleInterpreter._debug(" - new value @ %r", tval)
daily_value = time_value.value
else:
earliest_transition = min(earliest_transition, tval)
break
# return what was matched, if anything
return daily_value, earliest_transition
| JoelBender/bacpypes | py27/bacpypes/local/schedule.py | Python | mit | 22,187 |
"""Response classes.
The seek_wrapper code is not used if you're using UserAgent with
.set_seekable_responses(False), or if you're using the urllib2-level interface
HTTPEquivProcessor. Class closeable_response is instantiated by some handlers
(AbstractHTTPHandler), but the closeable_response interface is only depended
upon by Browser-level code. Function upgrade_response is only used if you're
using Browser.
Copyright 2006 John J. Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
from cStringIO import StringIO
import copy, mimetools, urllib2
def len_of_seekable(file_):
# this function exists because evaluation of len(file_.getvalue()) on every
# .read() from seek_wrapper would be O(N**2) in number of .read()s
pos = file_.tell()
file_.seek(0, 2) # to end
try:
return file_.tell()
finally:
file_.seek(pos)
# XXX Andrew Dalke kindly sent me a similar class in response to my request on
# comp.lang.python, which I then proceeded to lose. I wrote this class
# instead, but I think he's released his code publicly since, could pinch the
# tests from it, at least...
# For testing seek_wrapper invariant (note that
# test_urllib2.HandlerTest.test_seekable is expected to fail when this
# invariant checking is turned on). The invariant checking is done by module
# ipdc, which is available here:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/436834
## from ipdbc import ContractBase
## class seek_wrapper(ContractBase):
class seek_wrapper:
"""Adds a seek method to a file object.
This is only designed for seeking on readonly file-like objects.
Wrapped file-like object must have a read method. The readline method is
only supported if that method is present on the wrapped object. The
readlines method is always supported. xreadlines and iteration are
supported only for Python 2.2 and above.
Public attributes:
wrapped: the wrapped file object
is_closed: true iff .close() has been called
WARNING: All other attributes of the wrapped object (ie. those that are not
one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
are passed through unaltered, which may or may not make sense for your
particular file object.
"""
# General strategy is to check that cache is full enough, then delegate to
# the cache (self.__cache, which is a cStringIO.StringIO instance). A seek
# position (self.__pos) is maintained independently of the cache, in order
# that a single cache may be shared between multiple seek_wrapper objects.
# Copying using module copy shares the cache in this way.
def __init__(self, wrapped):
self.wrapped = wrapped
self.__read_complete_state = [False]
self.__is_closed_state = [False]
self.__have_readline = hasattr(self.wrapped, "readline")
self.__cache = StringIO()
self.__pos = 0 # seek position
def invariant(self):
# The end of the cache is always at the same place as the end of the
# wrapped file (though the .tell() method is not required to be present
# on wrapped file).
return self.wrapped.tell() == len(self.__cache.getvalue())
def close(self):
self.wrapped.close()
self.is_closed = True
def __getattr__(self, name):
if name == "is_closed":
return self.__is_closed_state[0]
elif name == "read_complete":
return self.__read_complete_state[0]
wrapped = self.__dict__.get("wrapped")
if wrapped:
return getattr(wrapped, name)
return getattr(self.__class__, name)
def __setattr__(self, name, value):
if name == "is_closed":
self.__is_closed_state[0] = bool(value)
elif name == "read_complete":
if not self.is_closed:
self.__read_complete_state[0] = bool(value)
else:
self.__dict__[name] = value
def seek(self, offset, whence=0):
assert whence in [0,1,2]
# how much data, if any, do we need to read?
if whence == 2: # 2: relative to end of *wrapped* file
if offset < 0: raise ValueError("negative seek offset")
# since we don't know yet where the end of that file is, we must
# read everything
to_read = None
else:
if whence == 0: # 0: absolute
if offset < 0: raise ValueError("negative seek offset")
dest = offset
else: # 1: relative to current position
pos = self.__pos
if pos < offset:
raise ValueError("seek to before start of file")
dest = pos + offset
end = len_of_seekable(self.__cache)
to_read = dest - end
if to_read < 0:
to_read = 0
if to_read != 0:
self.__cache.seek(0, 2)
if to_read is None:
assert whence == 2
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__pos = self.__cache.tell() - offset
else:
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
# Don't raise an exception even if we've seek()ed past the end
# of .wrapped, since fseek() doesn't complain in that case.
# Also like fseek(), pretend we have seek()ed past the end,
# i.e. not:
#self.__pos = self.__cache.tell()
# but rather:
self.__pos = dest
else:
self.__pos = dest
def tell(self):
return self.__pos
def __copy__(self):
cpy = self.__class__(self.wrapped)
cpy.__cache = self.__cache
cpy.__read_complete_state = self.__read_complete_state
cpy.__is_closed_state = self.__is_closed_state
return cpy
def get_data(self):
pos = self.__pos
try:
self.seek(0)
return self.read(-1)
finally:
self.__pos = pos
def read(self, size=-1):
pos = self.__pos
end = len_of_seekable(self.__cache)
available = end - pos
# enough data already cached?
if size <= available and size != -1:
self.__cache.seek(pos)
self.__pos = pos+size
return self.__cache.read(size)
# no, so read sufficient data from wrapped file and cache it
self.__cache.seek(0, 2)
if size == -1:
self.__cache.write(self.wrapped.read())
self.read_complete = True
else:
to_read = size - available
assert to_read > 0
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.read(size)
self.__pos = self.__cache.tell()
assert self.__pos == pos + len(data)
return data
def readline(self, size=-1):
if not self.__have_readline:
raise NotImplementedError("no readline method on wrapped object")
# line we're about to read might not be complete in the cache, so
# read another line first
pos = self.__pos
self.__cache.seek(0, 2)
data = self.wrapped.readline()
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.readline()
if size != -1:
r = data[:size]
self.__pos = pos+size
else:
r = data
self.__pos = pos+len(data)
return r
def readlines(self, sizehint=-1):
pos = self.__pos
self.__cache.seek(0, 2)
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__cache.seek(pos)
data = self.__cache.readlines(sizehint)
self.__pos = self.__cache.tell()
return data
def __iter__(self): return self
def next(self):
line = self.readline()
if line == "": raise StopIteration
return line
xreadlines = __iter__
def __repr__(self):
return ("<%s at %s whose wrapped object = %r>" %
(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
class response_seek_wrapper(seek_wrapper):
"""
Supports copying response objects and setting response body data.
"""
def __init__(self, wrapped):
seek_wrapper.__init__(self, wrapped)
self._headers = self.wrapped.info()
def __copy__(self):
cpy = seek_wrapper.__copy__(self)
# copy headers from delegate
cpy._headers = copy.copy(self.info())
return cpy
# Note that .info() and .geturl() (the only two urllib2 response methods
# that are not implemented by seek_wrapper) must be here explicitly rather
# than by seek_wrapper's __getattr__ delegation) so that the nasty
# dynamically-created HTTPError classes in get_seek_wrapper_class() get the
# wrapped object's implementation, and not HTTPError's.
def info(self):
return self._headers
def geturl(self):
return self.wrapped.geturl()
def set_data(self, data):
self.seek(0)
self.read()
self.close()
cache = self._seek_wrapper__cache = StringIO()
cache.write(data)
self.seek(0)
class eoffile:
# file-like object that always claims to be at end-of-file...
def read(self, size=-1): return ""
def readline(self, size=-1): return ""
def __iter__(self): return self
def next(self): return ""
def close(self): pass
class eofresponse(eoffile):
def __init__(self, url, headers, code, msg):
self._url = url
self._headers = headers
self.code = code
self.msg = msg
def geturl(self): return self._url
def info(self): return self._headers
class closeable_response:
"""Avoids unnecessarily clobbering urllib.addinfourl methods on .close().
Only supports responses returned by mechanize.HTTPHandler.
After .close(), the following methods are supported:
.read()
.readline()
.info()
.geturl()
.__iter__()
.next()
.close()
and the following attributes are supported:
.code
.msg
Also supports pickling (but the stdlib currently does something to prevent
it: http://python.org/sf/1144636).
"""
# presence of this attr indicates is useable after .close()
closeable_response = None
def __init__(self, fp, headers, url, code, msg):
self._set_fp(fp)
self._headers = headers
self._url = url
self.code = code
self.msg = msg
def _set_fp(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
self.__iter__ = self.fp.__iter__
self.next = self.fp.next
def __repr__(self):
return '<%s at %s whose fp = %r>' % (
self.__class__.__name__, hex(abs(id(self))), self.fp)
def info(self):
return self._headers
def geturl(self):
return self._url
def close(self):
wrapped = self.fp
wrapped.close()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
self._set_fp(new_wrapped)
def __getstate__(self):
# There are three obvious options here:
# 1. truncate
# 2. read to end
# 3. close socket, pickle state including read position, then open
# again on unpickle and use Range header
# XXXX um, 4. refuse to pickle unless .close()d. This is better,
# actually ("errors should never pass silently"). Pickling doesn't
# work anyway ATM, because of http://python.org/sf/1144636 so fix
# this later
# 2 breaks pickle protocol, because one expects the original object
# to be left unscathed by pickling. 3 is too complicated and
# surprising (and too much work ;-) to happen in a sane __getstate__.
# So we do 1.
state = self.__dict__.copy()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
state["wrapped"] = new_wrapped
return state
def test_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
return make_response(data, headers, url, code, msg)
def test_html_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
headers += [("Content-type", "text/html")]
return make_response(data, headers, url, code, msg)
def make_response(data, headers, url, code, msg):
"""Convenient factory for objects implementing response interface.
data: string containing response body data
headers: sequence of (name, value) pairs
url: URL of response
code: integer response code (e.g. 200)
msg: string response code message (e.g. "OK")
"""
mime_headers = make_headers(headers)
r = closeable_response(StringIO(data), mime_headers, url, code, msg)
return response_seek_wrapper(r)
def make_headers(headers):
"""
headers: sequence of (name, value) pairs
"""
hdr_text = []
for name_value in headers:
hdr_text.append("%s: %s" % name_value)
return mimetools.Message(StringIO("\n".join(hdr_text)))
# Rest of this module is especially horrible, but needed, at least until fork
# urllib2. Even then, may want to preseve urllib2 compatibility.
def get_seek_wrapper_class(response):
# in order to wrap response objects that are also exceptions, we must
# dynamically subclass the exception :-(((
if (isinstance(response, urllib2.HTTPError) and
not hasattr(response, "seek")):
if response.__class__.__module__ == "__builtin__":
exc_class_name = response.__class__.__name__
else:
exc_class_name = "%s.%s" % (
response.__class__.__module__, response.__class__.__name__)
class httperror_seek_wrapper(response_seek_wrapper, response.__class__):
# this only derives from HTTPError in order to be a subclass --
# the HTTPError behaviour comes from delegation
_exc_class_name = exc_class_name
def __init__(self, wrapped):
response_seek_wrapper.__init__(self, wrapped)
# be compatible with undocumented HTTPError attributes :-(
self.hdrs = wrapped.info()
self.filename = wrapped.geturl()
def __repr__(self):
return (
"<%s (%s instance) at %s "
"whose wrapped object = %r>" % (
self.__class__.__name__, self._exc_class_name,
hex(abs(id(self))), self.wrapped)
)
wrapper_class = httperror_seek_wrapper
else:
wrapper_class = response_seek_wrapper
return wrapper_class
def seek_wrapped_response(response):
"""Return a copy of response that supports seekable response interface.
Accepts responses from both mechanize and urllib2 handlers.
Copes with both ordinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
if not hasattr(response, "seek"):
wrapper_class = get_seek_wrapper_class(response)
response = wrapper_class(response)
assert hasattr(response, "get_data")
return response
def upgrade_response(response):
"""Return a copy of response that supports Browser response interface.
Browser response interface is that of "seekable responses"
(response_seek_wrapper), plus the requirement that responses must be
useable after .close() (closeable_response).
Accepts responses from both mechanize and urllib2 handlers.
Copes with both ordinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
wrapper_class = get_seek_wrapper_class(response)
if hasattr(response, "closeable_response"):
if not hasattr(response, "seek"):
response = wrapper_class(response)
assert hasattr(response, "get_data")
return copy.copy(response)
# a urllib2 handler constructed the response, i.e. the response is an
# urllib.addinfourl or a urllib2.HTTPError, instead of a
# _Util.closeable_response as returned by e.g. mechanize.HTTPHandler
try:
code = response.code
except AttributeError:
code = None
try:
msg = response.msg
except AttributeError:
msg = None
# may have already-.read() data from .seek() cache
data = None
get_data = getattr(response, "get_data", None)
if get_data:
data = get_data()
response = closeable_response(
response.fp, response.info(), response.geturl(), code, msg)
response = wrapper_class(response)
if data:
response.set_data(data)
return response
| ppizarror/TerminalDictionary | lib/mechanize/_response.py | Python | gpl-2.0 | 17,803 |
#!/usr/bin/python3
import datetime #for current date and time
import os #getpid()
import paths #global paths
import queue #for sequencing output messages
import re #need some regex support
import threading #current thread identification
#log level for determining how important something is
class ll:
info = "info"
warn = "warning"
fatal = "fatal"
system = "system"
input = "input"
reset = "reset"
#colors used for logging
class fg_colors:
black = "\x1b[30m"
red = "\x1b[31m"
green = "\x1b[32m"
yellow = "\x1b[33m"
blue = "\x1b[34m"
magenta = "\x1b[35m"
cyan = "\x1b[36m"
white = "\x1b[37m"
reset = "\x1b[0m"
#globals
g_proc_name = "unknown"
g_log_name = None
g_log_fobj = None
g_output_queue = queue.Queue()
g_lock = threading.Lock()
###############################################################################
# initialization, only called once at startup
###############################################################################
def init(proc_name):
global g_proc_name
global g_log_name
global g_log_fobj
count = 0
today = datetime.date.today()
g_log_name = paths.logfile_directory + "/" + proc_name + paths.logfile_extenstion + str(count)
g_proc_name = proc_name
#make sure sombody else doesn't have this set up
if g_proc_name == "unknown":
print("fatal: logger already initialized (%s)" % g_proc_name)
quit()
#make sure the logging directory exists
if os.path.exists(paths.logfile_directory) != True:
os.mkdir(paths.logfile_directory)
g_log_name += ("_" + str(today))
while os.path.isfile(g_log_name) == True:
count += 1
g_log_name = paths.logfile_directory + \
"/" + \
proc_name + \
paths.logfile_extenstion + \
str(count) + \
"_" + \
str(today)
print("logger: opening log file %s write/append" % g_log_name)
g_log_fobj = open(g_log_name, "a")
g_log_fobj.close()
print("logger: started ok, switching to logged mode")
l(ll.system, "logger: system date and time at start: %s\n" % str(datetime.datetime.now()))
###############################################################################
# termination, clears the output queue mostly
###############################################################################
def shutdown():
global g_lock
l(ll.info, "logger: got termination request\n")
#try to acquire the lock but do it with a timeout, if somebody isn't responding
#it sucks to be them (timeout in seconds)
g_lock.acquire(True, 2.0)
while g_output_queue.empty() == False:
to_print = g_output_queue.get()
l_raw(to_print)
__write_to_file(to_print)
g_lock.release()
print("logger: terminated")
###############################################################################
# log something to the given outputs
###############################################################################
def l(log_level, *args):
global g_output_queue
global g_lock
new_message = __create_prefix(log_level)
for i in args:
new_message += i
g_output_queue.put(new_message)
#makes sure we get the whole message out without threads stomping on each other
g_lock.acquire()
while g_output_queue.empty() == False:
to_print = g_output_queue.get()
l_raw(to_print)
__write_to_file(to_print)
g_lock.release()
###############################################################################
# log to the given outputs without prepending info (useful for formatting)
###############################################################################
def l_raw(*args):
print(*args, end = '')
###############################################################################
# get input from stdin, with message
###############################################################################
def g(*args):
global g_lock
to_print = ""
new_message = __create_prefix(ll.input)
for i in args:
new_message += i
#print the message to the console
g_lock.acquire()
#get everything out of the queue first
while g_output_queue.empty() == False:
to_print = g_output_queue.get()
l_raw(to_print)
__write_to_file(to_print)
#now handle our message
l_raw(new_message)
__write_to_file(new_message)
g_lock.release()
#note we don't lock here, we will probably be polling on this for a long time
to_return = g_raw()
#write out to disk the input we got, append a new line since it is stripped automatically
g_lock.acquire()
__write_to_file(to_return + "\n", False)
g_lock.release()
#give the called what we got from the terminal
return to_return
###############################################################################
# log to the given outputs without prepending info (useful for formatting)
###############################################################################
def g_raw():
return input()
###############################################################################
# sets the output string format
###############################################################################
def __set_format(log_level):
if (log_level == ll.info):
return fg_colors.white
elif (log_level == ll.reset):
return fg_colors.reset
elif (log_level == ll.warn):
return fg_colors.yellow
elif (log_level == ll.fatal):
return fg_colors.red
elif (log_level == ll.system):
return fg_colors.cyan
elif (log_level == ll.input):
return fg_colors.magenta
else:
return fg_colors.green
###############################################################################
# sets the output string format string
###############################################################################
def __get_message_string(log_level):
if (log_level == ll.info):
return ll.info
elif (log_level == ll.reset):
return fg_colors.reset
elif (log_level == ll.warn):
return fg_colors.yellow
elif (log_level == ll.fatal):
return fg_colors.red
elif (log_level == ll.system):
return fg_colors.cyan
elif (log_level == ll.input):
return fg_colors.magenta
else:
return fg_colors.green
###############################################################################
# sets the output string format string
###############################################################################
def __create_prefix(log_level):
new_message = __set_format(log_level)
new_message += ( \
"[" + \
g_proc_name + \
":" + \
str(os.getpid()) + \
":" + \
threading.current_thread().getName() + \
"]")
new_message += (" [" + \
log_level + \
"] ")
new_message += __set_format(ll.reset)
return new_message
###############################################################################
# write a given string to our logifle
###############################################################################
def __write_to_file(string, show_time = True):
global g_log_name
global g_log_fobj
now = datetime.datetime.now()
str2 = str(now) + " "
#remove the escape characters before we write to a file
ansi_escape = re.compile(r"\x1b[^m]*m")
string = ansi_escape.sub("", string)
#write it to the file
g_log_fobj = open(g_log_name, "a")
#sometimes we don't want the get (g())
if show_time == True:
g_log_fobj.write(str2)
g_log_fobj.write(string)
g_log_fobj.close()
| zebra6/tux_2.0 | util/p_log.py | Python | apache-2.0 | 7,824 |
"""A selection of useful functions for optics, especially Fourier optics. The
documentation is designed to be used with sphinx (still lots to do)
Note that this comes directly from a preliminary version of the astro-optics
repository. TODO: Replace this with either a release version of astro-optics
or an appropriate link.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
from scipy import optimize
def azimuthalAverage(image, center=None, stddev=False, returnradii=False, return_nr=False,
binsize=0.5, weights=None, steps=False, interpnan=False, left=None, right=None, return_max=False):
"""
Calculate the azimuthally averaged radial profile.
NB: This was found online and should be properly credited! Modified by MJI
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fractional pixels).
stddev - if specified, return the azimuthal standard deviation instead of the average
returnradii - if specified, return (radii_array,radial_profile)
return_nr - if specified, return number of pixels per radius *and* radius
binsize - size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large
weights - can do a weighted average instead of a simple average if this keyword parameter
is set. weights.shape must = image.shape. weighted stddev is undefined, so don't
set weights and stddev.
steps - if specified, will return a double-length bin array and radial
profile so you can plot a step-form radial profile (which more accurately
represents what's going on)
interpnan - Interpolate over NAN values, i.e. bins where there is no data?
left,right - passed to interpnan; they set the extrapolated values
return_max - (MJI) Return the maximum index.
If a bin contains NO DATA, it will have a NAN value because of the
divide-by-sum-of-weights component. I think this is a useful way to denote
lack of data, but users let me know if an alternative is prefered...
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
if weights is None:
weights = np.ones(image.shape)
elif stddev:
raise ValueError("Weighted standard deviation is not defined.")
# the 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize)+1)
maxbin = nbins * binsize
bins = np.linspace(0,maxbin,nbins+1)
# but we're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:]+bins[:-1])/2.0
# Find out which radial bin each point in the map belongs to
whichbin = np.digitize(r.flat,bins)
# how many per bin (i.e., histogram)?
# there are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.bincount(whichbin)[1:]
# recall that bins are from 1 to nbins (which is expressed in array terms by arange(nbins)+1 or xrange(1,nbins+1) )
# radial_prof.shape = bin_centers.shape
if stddev:
radial_prof = np.array([image.flat[whichbin==b].std() for b in xrange(1,nbins+1)])
elif return_max:
radial_prof = np.array([np.append((image*weights).flat[whichbin==b],-np.inf).max() for b in xrange(1,nbins+1)])
else:
radial_prof = np.array([(image*weights).flat[whichbin==b].sum() / weights.flat[whichbin==b].sum() for b in xrange(1,nbins+1)])
#import pdb; pdb.set_trace()
if interpnan:
radial_prof = np.interp(bin_centers,bin_centers[radial_prof==radial_prof],radial_prof[radial_prof==radial_prof],left=left,right=right)
if steps:
xarr = np.array(zip(bins[:-1],bins[1:])).ravel()
yarr = np.array(zip(radial_prof,radial_prof)).ravel()
return xarr,yarr
elif returnradii:
return bin_centers,radial_prof
elif return_nr:
return nr,bin_centers,radial_prof
else:
return radial_prof
def fresnel(wf, m_per_pix, d, wave):
"""Propagate a wave by Fresnel diffraction
Parameters
----------
wf: float array
Wavefront, i.e. a complex electric field in the scalar approximation.
m_per_pix: float
Scale of the pixels in the input wavefront in metres.
d: float
Distance to propagate the wavefront.
wave: float
Wavelength in metres.
Returns
-------
wf_new: float array
Wavefront after propagating.
"""
#Notation on Mike's board
sz = wf.shape[0]
if (wf.shape[0] != wf.shape[1]):
print("ERROR: Input wavefront must be square")
raise UserWarning
#The code below came from the board, i.e. via Huygen's principle.
#We got all mixed up when converting to Fourier transform co-ordinates.
#Co-ordinate axis of the wavefront. Not that 0 must be in the corner.
#x = (((np.arange(sz)+sz/2) % sz) - sz/2)*m_per_pix
#xy = np.meshgrid(x,x)
#rr =np.sqrt(xy[0]**2 + xy[1]**2)
#h_func = np.exp(1j*np.pi*rr**2/wave/d)
#h_ft = np.fft.fft2(h_func)
#Co-ordinate axis of the wavefront Fourier transform. Not that 0 must be in the corner.
#x is in cycles per wavefront dimension.
x = (((np.arange(sz)+sz/2) % sz) - sz/2)/m_per_pix/sz
xy = np.meshgrid(x,x)
uu =np.sqrt(xy[0]**2 + xy[1]**2)
h_ft = np.exp(1j*np.pi*uu**2*wave*d)
g_ft = np.fft.fft2(np.fft.fftshift(wf))*h_ft
wf_new = np.fft.ifft2(g_ft)
return np.fft.fftshift(wf_new)
def curved_wf(sz,m_per_pix,f_length,wave):
"""A curved wavefront centered on the *middle*
of the python array.
Try this at home:
The wavefront phase we want is:
phi = alpha*n**2, with
alpha = 0.5*m_per_pix**2/wave/f_length
"""
x = np.arange(sz) - sz/2
xy = np.meshgrid(x,x)
rr =np.sqrt(xy[0]**2 + xy[1]**2)
phase = 0.5*m_per_pix**2/wave/f_length*rr**2
return np.exp(2j*np.pi*phase)
def kmf(sz):
"""This function creates a periodic wavefront produced by Kolmogorov turbulence.
It SHOULD normalised so that the variance at a distance of 1 pixel is 1 radian^2,
but this is totally wrong now. The correct normalisation comes from an
empirical calculation, scaled like in the IDL code.
Parameters
----------
sz: int
Size of the 2D array
Returns
-------
wavefront: float array (sz,sz)
2D array wavefront.
"""
xy = np.meshgrid(np.arange(sz/2 + 1)/float(sz), (((np.arange(sz) + sz/2) % sz)-sz/2)/float(sz))
dist2 = np.maximum( xy[1]**2 + xy[0]**2, 1e-12)
ft_wf = np.exp(2j * np.pi * np.random.random((sz,sz/2+1)))*dist2**(-11.0/12.0)*sz/15.81
ft_wf[0,0]=0
return np.fft.irfft2(ft_wf)
def test_kmf(sz,ntests):
vars = np.zeros(ntests)
for i in range(ntests):
wf = kmf(sz)
vars[i] = 0.5* ( np.mean((wf[1:,:] - wf[:-1,:])**2) + \
np.mean((wf[:,1:] - wf[:,:-1])**2) )
print("Mean var: {0:7.3e} Sdev var: {1:7.3e}".format(np.mean(vars),np.std(vars)))
def moffat(theta, hw, beta=4.0):
"""This creates a moffatt function for simulating seeing.
The output is an array with the same dimensions as theta.
Total Flux" is set to 1 - this only applies if sampling
of thetat is 1 per unit area (e.g. arange(100)).
From Racine (1996), beta=4 is a good approximation for seeing
Parameters
----------
theta: float or float array
Angle at which to calculate the moffat profile (same units as hw)
hw: float
Half-width of the profile
beta: float
beta parameters
"""
denom = (1 + (2**(1.0/beta) - 1)*(theta/hw)**2)**beta
return (2.0**(1.0/beta)-1)*(beta-1)/np.pi/hw**2/denom
def moffat2d(sz,hw, beta=4.0):
"""A 2D version of a moffat function
"""
x = np.arange(sz) - sz/2.0
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
return moffat(r, hw, beta=beta)
def circle(dim,width):
"""This function creates a circle.
Parameters
----------
dim: int
Size of the 2D array
width: int
diameter of the circle
Returns
-------
pupil: float array (sz,sz)
2D array circular pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
circle = ((xx**2+yy**2) < (width/2.0)**2).astype(float)
return circle
def square(dim, width):
"""This function creates a square.
Parameters
----------
dim: int
Size of the 2D array
width: int
width of the square
Returns
-------
pupil: float array (sz,sz)
2D array square pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy < width/2) * (yy > (-width/2)) * (xx < width/2) * (xx > (-width/2)))
square = np.zeros((dim,dim))
square[w] = 1.0
return square
def hexagon(dim, width):
"""This function creates a hexagon.
Parameters
----------
dim: int
Size of the 2D array
width: int
flat-to-flat width of the hexagon
Returns
-------
pupil: float array (sz,sz)
2D array hexagonal pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy < width/2) * (yy > (-width/2)) * \
(yy < (width-np.sqrt(3)*xx)) * (yy > (-width+np.sqrt(3)*xx)) * \
(yy < (width+np.sqrt(3)*xx)) * (yy > (-width-np.sqrt(3)*xx)))
hex = np.zeros((dim,dim))
hex[w]=1.0
return hex
def snell(u, f, n_i, n_f):
"""Snell's law at an interface between two dielectrics
Parameters
----------
u: float array(3)
Input unit vector
f: float array(3)
surface normal unit vector
n_i: float
initial refractive index
n_f: float
final refractive index.
"""
u_p = u - np.sum(u*f)*f
u_p /= np.sqrt(np.sum(u_p**2))
theta_i = np.arccos(np.sum(u*f))
theta_f = np.arcsin(n_i*np.sin(theta_i)/n_f)
v = u_p*np.sin(theta_f) + f*np.cos(theta_f)
return v
def grating_sim(u, l, s, ml_d, refract=False):
"""This function computes an output unit vector based on an input unit
vector and grating properties.
Math: v \cdot l = u \cdot l (reflection)
v \cdot s = u \cdot s + ml_d
The blaze wavelength is when m \lambda = 2 d sin(theta)
i.e. ml_d = 2 sin(theta)
x : to the right
y : out of page
z : down the page
Parameters
----------
u: float array(3)
initial unit vector
l: float array(3)
unit vector along grating lines
s: float array(3)
unit vector along grating surface, perpendicular to lines
ml_d: float
order * \lambda/d
refract: bool
Is the grating a refractive grating?
"""
if (np.abs(np.sum(l*s)) > 1e-3):
print('Error: input l and s must be orthogonal!')
raise UserWarning
n = np.cross(s,l)
if refract:
n *= -1
v_l = np.sum(u*l)
v_s = np.sum(u*s) + ml_d
v_n = np.sqrt(1-v_l**2 - v_s**2)
v = v_l*l + v_s*s + v_n*n
return v
def rotate_xz(u, theta_deg):
"""Rotates a vector u in the x-z plane, clockwise where x is up and
z is right"""
th = np.radians(theta_deg)
M = np.array([[np.cos(th),0,np.sin(th)],[0,1,0],[-np.sin(th),0,np.cos(th)]])
return np.dot(M, u)
def nglass(l, glass='sio2'):
"""Refractive index of fused silica and other glasses. Note that C is
in microns^{-2}
Parameters
----------
l: wavelength
"""
try:
nl = len(l)
except:
l = [l]
nl=1
l = np.array(l)
if (glass == 'sio2'):
B = np.array([0.696166300, 0.407942600, 0.897479400])
C = np.array([4.67914826e-3,1.35120631e-2,97.9340025])
elif (glass == 'bk7'):
B = np.array([1.03961212,0.231792344,1.01046945])
C = np.array([6.00069867e-3,2.00179144e-2,1.03560653e2])
elif (glass == 'nf2'):
B = np.array( [1.39757037,1.59201403e-1,1.26865430])
C = np.array( [9.95906143e-3,5.46931752e-2,1.19248346e2])
else:
print("ERROR: Unknown glass {0:s}".format(glass))
raise UserWarning
n = np.ones(nl)
for i in range(len(B)):
n += B[i]*l**2/(l**2 - C[i])
return np.sqrt(n)
def join_bessel(U,V,j):
"""In order to solve the Laplace equation in cylindrical co-ordinates, both the
electric field and its derivative must be continuous at the edge of the fiber...
i.e. the Bessel J and Bessel K have to be joined together.
The solution of this equation is the n_eff value that satisfies this continuity
relationship"""
W = np.sqrt(V**2 - U**2)
return U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
def neff(V, accurate_roots=True):
"""Find the effective indices of all modes for a given value of
the fiber V number. """
delu = 0.04
U = np.arange(delu/2,V,delu)
W = np.sqrt(V**2 - U**2)
all_roots=np.array([])
n_per_j=np.array([],dtype=int)
n_modes=0
for j in range(int(V+1)):
f = U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
crossings = np.where(f[0:-1]*f[1:] < 0)[0]
roots = U[crossings] - f[crossings]*( U[crossings+1] - U[crossings] )/( f[crossings+1] - f[crossings] )
if accurate_roots:
for i,root in enumerate(roots):
roots[i] = optimize.newton(join_bessel, root, args=(V,j))
#import pdb; pdb.set_trace()
if (j == 0):
n_modes = n_modes + len(roots)
n_per_j = np.append(n_per_j, len(roots))
else:
n_modes = n_modes + 2*len(roots)
n_per_j = np.append(n_per_j, len(roots)) #could be 2*length(roots) to account for sin and cos.
all_roots = np.append(all_roots,roots)
return all_roots, n_per_j
def mode_2d(V, r, j=0, n=0, sampling=0.3, sz=1024):
"""Create a 2D mode profile.
Parameters
----------
V: Fiber V number
r: core radius in microns
sampling: microns per pixel
n: radial order of the mode (0 is fundumental)
j: azimuthal order of the mode (0 is pure radial modes)
TODO: Nonradial modes."""
#First, find the neff values...
u_all,n_per_j = neff(V)
ix = np.sum(n_per_j[0:j]) + n
U0 = u_all[ix]
W0 = np.sqrt(V**2 - U0**2)
x = (np.arange(sz)-sz/2)*sampling/r
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
win = np.where(r < 1)
wout = np.where(r >= 1)
the_mode = np.zeros( (sz,sz) )
the_mode[win] = special.jn(j,r[win]*U0)
scale = special.jn(j,U0)/special.kn(j,W0)
the_mode[wout] = scale * special.kn(j,r[wout]*W0)
return the_mode/np.sqrt(np.sum(the_mode**2))
def compute_v_number(wavelength_in_mm, core_radius, numerical_aperture):
"""Computes the V number (can be interpreted as a kind of normalized optical frequency) for an optical fibre
Parameters
----------
wavelength_in_mm: float
The wavelength of light in mm
core_radius: float
The core radius of the fibre in mm
numerical_aperture: float
The numerical aperture of the optical fibre, defined be refractive indices of the core and cladding
Returns
-------
v: float
The v number of the fibre
"""
v = 2 * np.pi / wavelength_in_mm * core_radius * numerical_aperture
return v
def shift_and_ft(im):
"""Sub-pixel shift an image to the origin and Fourier-transform it
Parameters
----------
im: (ny,nx) float array
ftpix: optional ( (nphi) array, (nphi) array) of Fourier sampling points.
If included, the mean square Fourier phase will be minimised.
Returns
----------
ftim: (ny,nx/2+1) complex array
"""
ny = im.shape[0]
nx = im.shape[1]
im = regrid_fft(im,(3*ny,3*nx))
shifts = np.unravel_index(im.argmax(), im.shape)
im = np.roll(np.roll(im,-shifts[0]+1,axis=0),-shifts[1]+1,axis=1)
im = rebin(im,(ny,nx))
ftim = np.fft.rfft2(im)
return ftim
def rebin(a, shape):
"""Re-bins an image to a new (smaller) image with summing
Originally from:
http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
Parameters
----------
a: array
Input image
shape: (xshape,yshape)
New shape
"""
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
def regrid_fft(im,new_shape):
"""Regrid onto a larger number of pixels using an fft. This is optimal
for Nyquist sampled data.
Parameters
----------
im: array
The input image.
new_shape: (new_y,new_x)
The new shape
Notes
------
TODO: This should work with an arbitrary number of dimensions
"""
ftim = np.fft.rfft2(im)
new_ftim = np.zeros((new_shape[0], new_shape[1]/2 + 1),dtype='complex')
new_ftim[0:ftim.shape[0]/2,0:ftim.shape[1]] = \
ftim[0:ftim.shape[0]/2,0:ftim.shape[1]]
new_ftim[new_shape[0]-ftim.shape[0]/2:,0:ftim.shape[1]] = \
ftim[ftim.shape[0]/2:,0:ftim.shape[1]]
return np.fft.irfft2(new_ftim)
| mikeireland/pymfe | pymfe/optics.py | Python | mit | 17,674 |
import webbrowser
import fresh_tomatoes
"""Defines the class movie to show the attributes of the movies represented by
the multiple instances of the class"""
class Movie():
def __init__(self, movie_title, movie_storyline, poster_image,
trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
""" Multiple instances of the Class Movie to represent the favorite movies;
Elements serially are the 'movie title', 'description', 'movie image',
and 'youtube url' for the trailer"""
# Instance for the movie 'Toy Story'
toy_story = Movie("Toy Story",
"A story of a boy and his toys that come to life",
"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg", # noqa
"https://www.youtube.com/watch?v=vwyZH85NQC4") # noqa
# Instance for the movie 'Avatar'
avatar = Movie("Avatar",
"A marine on an alien planet",
"http://www.impawards.com/2009/posters/avatar_xlg.jpg", # noqa
"https://www.youtube.com/watch?v=5PSNL1qE6VY") # noqa
# Instance for the movie 'Interstellar'
interstellar = Movie("Interstellar",
"A group of astronauts who travel through a wormhole"
" in search of a new home for humanity.",
"https://upload.wikimedia.org/wikipedia/en/b/bc/Interstellar_film_poster.jpg", # noqa
"https://www.youtube.com/watch?v=zSWdZVtXT7E") # noqa
# Instance for the movie 'Bose-The forgotten hero'
netaji_bose = Movie("Netaji Bose, The forgotten hero",
"The film depicts the life of the Indian independence "
"leader Subhas Chandra Bose",
"https://upload.wikimedia.org/wikipedia/en/f/f8/Bosefilm.jpg", # noqa
"https://www.youtube.com/watch?v=dfzCuNElusk") # noqa
# Instance for the movie 'La La Land'
lala_land = Movie("La La Land",
"A jazz pianist and an aspiring actress who meet and"
" fall in love in Los Angeles.",
"https://upload.wikimedia.org/wikipedia/en/a/ab/La_La_Land_%28film%29.png", # noqa
"https://www.youtube.com/watch?v=0pdqf4P9MB8") # noqa
# Instance for the movie Inside Out
inside_out = Movie("Inside Out",
"The film is set in the mind of a young girl named Riley"
" Andersen where five personified emotions; Joy, Sadness,"
" Anger, Fear and Disgust try to lead her through life",
"https://upload.wikimedia.org/wikipedia/en/0/0a/Inside_Out_%282015_film%29_poster.jpg", # noqa
"https://www.youtube.com/watch?v=yRUAzGQ3nSY") # noqa
'''Grouping all instances in a list "movies"'''
movies = [toy_story, avatar, interstellar, netaji_bose, lala_land, inside_out]
'''Calling the Output'''
fresh_tomatoes.open_movies_page(movies)
| TanayChowdhury/movie_list | media.py | Python | bsd-3-clause | 3,199 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JSON Web Token (JWT) in compact serialization format.
Jwt uses jws underneath. The difference between jws and jwt is that
jws only verifies the signature while jwt verifies both the signature and
claims as defined at https://tools.ietf.org/html/rfc7519#section-4.1. In
particular, in addition to signature verification, jwt does the following:
1. Verify expected issuer, subjects and list of audiences. However, the
verification is **optional** because one, jwt does not know what your expected
issuer, subject and list of audiences are and second, RFCs do not mandate
these claims. As a consequence, when you construct the verifier:
+ If you do not specify these fields, jwt does *not** know how to verify
them, and hence does **not** verify them.
+ If you specify these fields, the verification is automatic and mandatory.
2. When 'exp', 'nbf' are in the claims, jwt automatically verifies them.
3. If you use your own claims that aren't defined at
https://tools.ietf.org/html/rfc7519#section-4.1, jwt does not know how to
verify them. You have to verify them yourselves after signature verification
and RFC claims verification.
"""
__author__ = "quannguyen@google.com (Quan Nguyen)"
import json
import jws
from . import jwsutil
from .exceptions import SecurityException
import six
import datetime
import calendar
class JwtPublicKeyVerify(object):
"""JWT Public Key Verifier which verifies both the signature and claims."""
def __init__(self,
jwk_set,
issuer=None,
subject=None,
audiences=None,
clock_skew_tolerance=0):
"""Constructor for JwtPublicKeyVerify.
Args:
jwk_set: a JwkSet.
issuer: string, the issuer claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.1.
subject: string, the subject claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.2.
audiences: list of string, the audiences claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.3.
clock_skew_tolerance: integer, the clock skew that the verifier tolerates.
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.verifier = jws.JwsPublicKeyVerify(jwk_set)
self.issuer = issuer
self.subject = subject
self.audiences = audiences
self.clock_skew_tolerance = clock_skew_tolerance
def verify(self, token):
"""Verifies whether the token is signed with the corresponding private key and whether the payload's claims are valid.
Args:
token: bytes, the JWS compact serialization token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Returns:
dict, the deserialized JSON payload in the token.
Raises:
SecurityException: when the token is invalid
"""
try:
payload = json.loads(self.verifier.verify(token).decode("utf-8"))
if _verify_claims(payload, self.issuer, self.subject, self.audiences,
self.clock_skew_tolerance):
return payload
else:
raise SecurityException("Invalid token")
except SecurityException as e:
raise e
except:
raise SecurityException("Invalid token")
class JwtPublicKeySign(object):
"""Jwt public key signer that suppports both Ecdsa and Rsa signature schemes.
"""
def __init__(self, jwk_set):
"""Constructor for JwtPublicKeySign.
Args:
jwk_set: a JwkSet.
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.signer = jws.JwsPublicKeySign(jwk_set)
def sign(self, header, payload):
"""Computes the signed jwt as defined at rfc7515#section-7.1.
Args:
header: dict, dictionary of header to convert to JSON and sign.
payload: dict, dictionary of the payload to conert to JSON and sign.
Returns:
bytes, the signed token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Raises:
SecurityException: if the header's algorithm or kid does not match the
key's.
"""
return self.signer.sign(header, payload)
class JwtMacVerify(object):
"""Jwt Mac Verifier that verifies both message authentication code and claims."""
def __init__(self,
jwk_set,
issuer=None,
subject=None,
audiences=None,
clock_skew_tolerance=0):
"""Constructor for JwtMacVerify.
Args:
jwk_set: a JwkSet.
issuer: string, the issuer claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.1.
subject: string, the subject claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.2.
audiences: list of string, the audiences claim as defined at
https://tools.ietf.org/html/rfc7519#section-4.1.3.
clock_skew_tolerance: integer, the clock skew that the verifier tolerates.
Raises:
UnsupportedAlgorithm: if the algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not Rsa or
Ecdsa key.
"""
self.verifier = jws.JwsMacVerify(jwk_set)
self.issuer = issuer
self.subject = subject
self.audiences = audiences
self.clock_skew_tolerance = clock_skew_tolerance
def verify(self, token):
"""Verifies whether the token was authenticated with mac and whether the payload's claims are valid.
Args:
token: bytes, the JWS compact serialization token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Returns:
dict, the deserialized JSON payload in the token.
Raises:
SecurityException: when the token is not valid.
"""
try:
payload = json.loads(self.verifier.verify(token).decode("utf-8"))
if _verify_claims(payload, self.issuer, self.subject, self.audiences,
self.clock_skew_tolerance):
return payload
else:
raise SecurityException("Invalid token")
except SecurityException as e:
raise e
except:
raise SecurityException("Invalid token")
class JwtMacAuthenticator(object):
"""Jws Mac Authenticator that authenticates jwt token."""
def __init__(self, jwk_set):
"""Constructor for JwtMacAuthenticator.
Args:
jwk_set: a JwkSet.
Raises:
UnsupportedAlgorithm: if the key.algorihtm is not defined at
https://tools.ietf.org/html/rfc7518#section-3.1 or if jwk is not symmetric
Hmac key.
"""
self.authenticator = jws.JwsMacAuthenticator(jwk_set)
def authenticate(self, header, payload):
"""Computes the authenticated jwt as defined at rfc7515#section-7.1.
Args:
header: dict, dictionary of header to convert to JSON and sign.
payload: dict, dictionary of payload to convert to JSON and sign.
Returns:
bytes, the authenticated token as defined at
https://tools.ietf.org/html/rfc7515#section-7.1.
Raises:
SecurityException: if the header's algorithm or kid does not match the
key's.
"""
return self.authenticator.authenticate(header, payload)
def _get_unix_timestamp():
return calendar.timegm(datetime.datetime.utcnow().utctimetuple())
def _verify_claims(payload, issuer, subject, audiences, clock_skew_tolerance):
if issuer is not None:
if payload.get("iss", None) is None:
return False
if not isinstance(payload["iss"],
six.string_types) or payload["iss"] != issuer:
return False
if subject is not None:
if payload.get("sub", None) is None:
return False
if not isinstance(payload["sub"],
six.string_types) or payload["sub"] != subject:
return False
if audiences is not None:
if payload.get("aud", None) is None:
return False
if not isinstance(payload["aud"], six.string_types) or not any(
payload["aud"] == s for s in audiences):
return False
now = _get_unix_timestamp()
if payload.get("exp", None) is not None and isinstance(
payload["exp"], six.integer_types):
if now > int(payload["exp"]) + clock_skew_tolerance:
return False
if payload.get("nbf", None) is not None and isinstance(
payload["nbf"], six.integer_types):
if now < int(payload["nbf"]) - clock_skew_tolerance:
return False
return True
| google/jws | jws/jwt.py | Python | apache-2.0 | 9,120 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the @{$reading_data$reading data how-to}
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_store_sparse = sparse_ops._add_sparse_to_tensors_map
_store_many_sparse = sparse_ops._add_many_sparse_to_tensors_map
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
# pylint: enable=protected-access
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob), or 1D tensor of file patterns.
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching the pattern(s).
"""
with ops.name_scope(name, "matching_filenames", [pattern]) as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Note: creates local counter `epochs`. Use `local_variables_initializer()` to
initialize local variables.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.name_scope(name, "limit_epochs", [tensor]) as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(
zero64, name="epochs", trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def input_producer(input_tensor,
element_shape=None,
num_epochs=None,
shuffle=True,
seed=None,
capacity=32,
shared_name=None,
summary_name=None,
name=None,
cancel_op=None):
"""Output the rows of `input_tensor` to a queue for an input pipeline.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
input_tensor: A tensor with the rows to produce. Must be at least
one-dimensional. Must either have a fully-defined shape, or
`element_shape` must be defined.
element_shape: (Optional.) A `TensorShape` representing the shape of a
row of `input_tensor`, if it cannot be inferred.
num_epochs: (Optional.) An integer. If specified `input_producer` produces
each row of `input_tensor` `num_epochs` times before generating an
`OutOfRange` error. If not specified, `input_producer` can cycle through
the rows of `input_tensor` an unlimited number of times.
shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
within each epoch.
seed: (Optional.) An integer. The seed to use if `shuffle` is true.
capacity: (Optional.) The capacity of the queue to be used for buffering
the input.
shared_name: (Optional.) If set, this queue will be shared under the given
name across multiple sessions.
summary_name: (Optional.) If set, a scalar summary for the current queue
size will be generated, using this name as part of the tag.
name: (Optional.) A name for queue.
cancel_op: (Optional.) Cancel op for the queue
Returns:
A queue with the output rows. A `QueueRunner` for the queue is
added to the current `QUEUE_RUNNER` collection of the current
graph.
Raises:
ValueError: If the shape of the input cannot be inferred from the arguments.
"""
with ops.name_scope(name, "input_producer", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
if not element_shape.is_fully_defined():
raise ValueError("Either `input_tensor` must have a fully defined shape "
"or `element_shape` must be specified")
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[input_tensor.dtype.base_dtype],
shapes=[element_shape],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enq], cancel_op=cancel_op))
if summary_name is not None:
summary.scalar(summary_name,
math_ops.cast(q.size(), dtypes.float32) * (1. / capacity))
return q
def string_input_producer(string_tensor,
num_epochs=None,
shuffle=True,
seed=None,
capacity=32,
shared_name=None,
name=None,
cancel_op=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an `OutOfRange` error. If not specified,
`string_input_producer` can cycle through the strings in `string_tensor`
an unlimited number of times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
cancel_op: Cancel op for the queue (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Raises:
ValueError: If the string_tensor is a null Python list. At runtime,
will fail with an assertion if string_tensor becomes a null tensor.
"""
not_null_err = "string_input_producer requires a non-null input tensor"
if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
raise ValueError(not_null_err)
with ops.name_scope(name, "input_producer", [string_tensor]) as name:
string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.greater(array_ops.size(string_tensor), 0),
[not_null_err])]):
string_tensor = array_ops.identity(string_tensor)
return input_producer(
input_tensor=string_tensor,
element_shape=[],
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
capacity=capacity,
shared_name=shared_name,
name=name,
summary_name="fraction_of_%d_full" % capacity,
cancel_op=cancel_op)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.name_scope(name, "input_producer", [limit]) as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, "fraction_of_%d_full" % capacity, name)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
Raises:
ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
"""
with ops.name_scope(name, "input_producer", tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
class _SparseMetaData(object):
"""Store information about the Tensor: Is it sparse?, map_op, and rank."""
def __init__(self, sparse, map_op, rank):
"""Create the metadata.
Args:
sparse: Python boolean.
map_op: The `Operation` that created the `SparseTensorsMap` in question.
This Op contains information about the underlying Map object and the
dtype of the original data.
rank: The statically known rank of the `SparseTensor`.
"""
self._sparse = sparse
self._map_op = map_op
self._rank = rank
def __eq__(self, other):
if self.sparse != other.sparse:
return False
if not self.sparse:
return True
# If map_ops are not the same, the data source is not the same.
if (self.map_op is not None) != (other.map_op is not None):
return False
if self.map_op != other.map_op:
return False
if not self.rank.is_compatible_with(other.rank):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "[SparseMetaData(%s, %s, %s)]" % (self.sparse, self.map_op.name,
self.rank)
def merge_with(self, other):
if self != other:
raise ValueError("SparseMetaData objects are incompatible: %s vs. %s"
% (self, other))
if self.sparse:
self.rank.merge_with(other.rank)
return self
@property
def map_op(self):
return self._map_op
@property
def sparse(self):
return self._sparse
@property
def rank(self):
return self._rank
def _as_tensor_list(tensors):
if isinstance(tensors, dict):
return [tensors[k] for k in sorted(tensors)]
else:
return tensors
def _as_tensor_list_list(tensors_list):
if not tensors_list:
raise ValueError("Expected at least one set of tensors")
if isinstance(tensors_list[0], dict):
expected_keys = set(tensors_list[0].keys())
for tensors in tensors_list[1:]:
if set(tensors.keys()) != expected_keys:
raise ValueError("All dictionaries in tensors_list must have "
"the same keys")
return [_as_tensor_list(tensors) for tensors in tensors_list]
else:
return tensors_list
def _as_original_type(original_tensors, tensor_list):
if isinstance(original_tensors, dict):
if len(original_tensors) == 1:
# tensor_list is bogusly returned as a single tensor if only one tensor
# was enqueued. Make it a list again. See b/28117485.
tensor_list = [tensor_list]
return {k: tensor_list[i]
for i, k in enumerate(sorted(original_tensors))}
else:
return tensor_list
def _smart_cond(pred, if_true, if_false):
"""A `tf.cond` that does nothing when the condition is static."""
pred = ops.convert_to_tensor(pred)
static_pred = tensor_util.constant_value(pred)
if static_pred is not None:
if static_pred:
return if_true()
else:
return if_false()
else:
return control_flow_ops.cond(
pred,
if_true,
if_false)
def _store_sparse_tensors(tensor_list, enqueue_many, keep_input,
shared_map_ops=None):
"""Store SparseTensors for feeding into batch, etc.
If `shared_map_ops` is provided, the underlying `SparseTensorsMap` objects
are reused (shared). This argument is useful for, e.g., `batch_join`
where multiple enqueue operations write to the same Queue component,
and another (dequeue) thread reads from that same location and must then
restore the associated `SparseTensor` objects. In this case, the sparse
restore must have a single `SparseTensorMap` from which to read out the
handles; so a single `SparseTensorMap` must be shared for storing
across the multiple enqueue operations. This sharing is performed by
calling `_store_sparse_tensors` the first time with `shared_map_ops=None`,
and then in subsequent times with this value set to the list of `Operation`
objects created in the first call.
Args:
tensor_list: List of `Tensor` and `SparseTensor` objects.
enqueue_many: Python `Boolean`.
keep_input: Must be a scalar bool Tensor (not a Python bool). If False,
don't store.
shared_map_ops: (optional) List of `Operation` objects from a previous
call to `_store_sparse_tensors`. If not `None`, the op types should be
one of `AddSparseToTensorsMap` or `AddManySparseToTensorsMap` in the
locations corresponding to `SparseTensors` in `tensor_list`.
Returns:
A tuple `(stored_list, sparse_info_list)` where `stored_list` is a list
of `Tensor` objects (same length as `tensor_list`) and `sparse_info_list`
is a list of the same length of `_SparseMetaData` objects.
"""
maybe_shared_map_ops = shared_map_ops or [None] * len(tensor_list)
def _sparse_meta_data(t, storing_op, map_op):
if not isinstance(t, sparse_tensor.SparseTensor):
return _SparseMetaData(False, None, None)
rank = t.dense_shape.get_shape().with_rank(1)[0]
if enqueue_many:
rank -= 1
# If a shared map_op was provided, use that. Otherwise use the name of
# the operation used to store the SparseTensor.
return _SparseMetaData(
sparse=True, map_op=map_op or storing_op, rank=rank)
def _maybe_store(t, shared_map_op):
"""Store Sparse tensor, if necessary."""
if not isinstance(t, sparse_tensor.SparseTensor):
return t
map_op_name = shared_map_op.name if shared_map_op else None
def _maybe_store_sparse(t, map_op_name, keep_input):
"""Conditionally store a single sparse Tensor."""
return _smart_cond(
keep_input,
lambda: _store_sparse(t, shared_name=map_op_name),
lambda: constant_op.constant(-1, dtypes.int64))
def _maybe_store_many_sparse(t, map_op_name, keep_input):
"""Conditionally store multiple sparse Tensors."""
out_tensor = _smart_cond(
keep_input,
lambda: _store_many_sparse(t, shared_name=map_op_name),
lambda: -1 * array_ops.ones(array_ops.shape(t)[0:1], dtypes.int64))
out_tensor.set_shape([None]) # necessary when t.ndims is unknown
return out_tensor
if keep_input.get_shape().ndims == 1:
t = sparse_ops.sparse_retain(t, keep_input)
store_f = lambda t, name, _: _store_many_sparse(t, shared_name=name)
elif enqueue_many:
store_f = _maybe_store_many_sparse
else:
store_f = _maybe_store_sparse
return store_f(t, map_op_name, keep_input)
stored_list = [
_maybe_store(t, shared_map_op) for t, shared_map_op
in zip(tensor_list, maybe_shared_map_ops)]
# Since the output of `_store{_many}_sparse is wrapped in a tf.cond `Merge`,
# we can't just get the Op of the resulting tensor.
def _sparse_op(stored):
for input_tensor in stored.op.inputs:
if input_tensor.op.type in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
return input_tensor.op
# If there was no sparse input, then the original stored Tensor wasn't
# sparse and we can just return the original Tensor's Op.
return stored.op
sparse_info_list = [
_sparse_meta_data(t, _sparse_op(stored), shared_map_op)
for t, stored, shared_map_op
in zip(tensor_list, stored_list, maybe_shared_map_ops)]
# Expand dims of stored tensors by 1 for proper enqueue shape
stored_list = [
array_ops.expand_dims(s, [-1]) if s_info.sparse else s
for s, s_info in zip(stored_list, sparse_info_list)]
return stored_list, sparse_info_list
def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):
"""Store SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _store_sparse_tensors(
tensor_list_list[0], enqueue_many, keep_input)
stored_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input,
[st.map_op for st in sparse_info_list])
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
stored_list_list.append(s)
return (stored_list_list, sparse_info_list)
def _restore_sparse_tensors(stored_list, sparse_info_list):
"""Restore SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(stored_list, collections.Sequence)
if not received_sequence:
stored_list = (stored_list,)
tensors = [
_restore_sparse(sparse_map_op=info.map_op,
sparse_handles=array_ops.squeeze(s, [1]),
rank=(info.rank + 1).value)
if info.sparse else s
for (s, info) in zip(stored_list, sparse_info_list)]
return tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _validate_keep_input(keep_input, enqueue_many):
"""Validate `keep_input` argument to conditional batching functions."""
keep_input = ops.convert_to_tensor(keep_input)
if keep_input.get_shape().ndims is None:
raise ValueError(
"`keep_input` dimensions must be known at graph construction.")
if not enqueue_many and keep_input.get_shape().ndims == 1:
raise ValueError(
"`keep_input` cannot be a vector when `enqueue_many=False`.")
if keep_input.get_shape().ndims > 1:
raise ValueError("`keep_input` must be 0 or 1 dimensions.")
return keep_input
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
"""Calculate and merge the shapes of incoming tensors.
Args:
tensor_list_list: List of tensor lists.
shapes: List of shape tuples corresponding to tensors within the lists.
enqueue_many: Boolean describing whether shapes will be enqueued as
batches or individual entries.
Returns:
A list of shapes aggregating shape inference info from `tensor_list_list`,
or returning `shapes` if it is not `None`.
Raises:
ValueError: If any of the inferred shapes in `tensor_list_list` lack a
well defined rank.
"""
if shapes is None:
len0 = len(tensor_list_list[0])
for tl in tensor_list_list:
for i in xrange(len0):
if tl[i].get_shape().ndims is None:
raise ValueError("Cannot infer Tensor's rank: %s" % tl[i])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(len0)]
return shapes
def _select_which_to_enqueue(tensor_list, keep_input):
"""Select which examples to enqueue based on vector `keep_input`."""
select_i = math_ops.cast(keep_input, dtypes.int32)
tensor_list = [
data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1]
for x in tensor_list]
return tensor_list
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.get_shape().ndims == 1:
enqueue_ops = [enqueue_fn(_select_which_to_enqueue(x, keep_input))
for x in tensor_list_list]
else:
enqueue_ops = [_smart_cond(
keep_input,
lambda: enqueue_fn(tl), # pylint:disable=cell-var-from-loop
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
"""Enqueue `tensor_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.get_shape().ndims == 1:
enqueue_ops = [
enqueue_fn(_select_which_to_enqueue(tensor_list, keep_input))] * threads
else:
enqueue_ops = [_smart_cond(
keep_input,
lambda: enqueue_fn(tensor_list),
control_flow_ops.no_op)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
def _batch(tensors, batch_size, keep_input, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `batch` and `maybe_batch`."""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "batch", list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) *
(1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def _batch_join(tensors_list, batch_size, keep_input, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Helper function for `batch_join` and `maybe_batch_join`."""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) *
(1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
def batch(tensors, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches of tensors in `tensors`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensors` is assumed to represent a single
example. An input tensor with shape `[x, y, z]` will be output as a tensor
with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of
examples, where the first dimension is indexed by example, and all members of
`tensors` should have the same size in the first dimension. If an input
tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x,
y, z]`. The `capacity` argument controls the how long the prefetching is
allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors` (except if
the input is a list of one element, then it returns a tensor, not a list).
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _batch(
tensors,
batch_size,
keep_input=True,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def maybe_batch(tensors, keep_input, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Conditionally creates batches of tensors based on `keep_input`.
See docstring in `batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresonding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _batch(
tensors,
batch_size,
keep_input,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, dynamic_pad=False, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.train.batch()`.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensors_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors_list` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have value `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
return _batch_join(
tensors_list,
batch_size,
keep_input=True,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Runs a list of tensors to conditionally fill a queue to create batches.
See docstring in `batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresonding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
return _batch_join(
tensors_list,
batch_size,
keep_input,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Creates batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresonding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.train.shuffle_batch()`.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors_list`.
`len(tensors_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y, z]`
will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape` method will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
def maybe_shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Create batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresonding value in `keep_input` is `True`. This tensor essentially acts
as a filtering mechanism.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
| eerwitt/tensorflow | tensorflow/python/training/input.py | Python | apache-2.0 | 60,007 |
import calendar
import datetime
import platform
import time
import urllib
import urlparse
import warnings
import stripe
from stripe import error, http_client, version, util
from stripe.multipart_data_generator import MultipartDataGenerator
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _api_encode(data):
for key, value in data.iteritems():
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, 'stripe_id'):
yield (key, value.stripe_id)
elif isinstance(value, list) or isinstance(value, tuple):
for subvalue in value:
yield ("%s[]" % (key,), util.utf8(subvalue))
elif isinstance(value, dict):
subdict = dict(('%s[%s]' % (key, subkey), subvalue) for
subkey, subvalue in value.iteritems())
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = '%s&%s' % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class APIRequestor(object):
def __init__(self, key=None, client=None, api_base=None, account=None):
if api_base:
self.api_base = api_base
else:
self.api_base = stripe.api_base
self.api_key = key
self.stripe_account = account
from stripe import verify_ssl_certs
self._client = client or http_client.new_default_http_client(
verify_ssl_certs=verify_ssl_certs)
@classmethod
def api_url(cls, url=''):
warnings.warn(
'The `api_url` class method of APIRequestor is '
'deprecated and will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return '%s%s' % (stripe.api_base, url)
@classmethod
def _deprecated_encode(cls, stk, key, value):
warnings.warn(
'The encode_* class methods of APIRequestor are deprecated and '
'will be removed in version 2.0. '
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning, stacklevel=2)
stk.extend(_api_encode({key: value}))
@classmethod
def encode_dict(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_list(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_datetime(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode_none(cls, stk, key, value):
cls._deprecated_encode(stk, key, value)
@classmethod
def encode(cls, d):
"""
Internal: encode a string for url representation
"""
warnings.warn(
'The `encode` class method of APIRequestor is deprecated and '
'will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return urllib.urlencode(list(_api_encode(d)))
@classmethod
def build_url(cls, url, params):
warnings.warn(
'The `build_url` class method of APIRequestor is deprecated and '
'will be removed in version 2.0.'
'If you need public access to this function, please email us '
'at support@stripe.com.',
DeprecationWarning)
return _build_api_url(url, cls.encode(params))
def request(self, method, url, params=None, headers=None):
rbody, rcode, my_api_key = self.request_raw(
method.lower(), url, params, headers)
resp = self.interpret_response(rbody, rcode)
return resp, my_api_key
def handle_api_error(self, rbody, rcode, resp):
try:
err = resp['error']
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody, rcode, resp)
if rcode in [400, 404]:
raise error.InvalidRequestError(
err.get('message'), err.get('param'), rbody, rcode, resp)
elif rcode == 401:
raise error.AuthenticationError(
err.get('message'), rbody, rcode, resp)
elif rcode == 402:
raise error.CardError(err.get('message'), err.get('param'),
err.get('code'), rbody, rcode, resp)
else:
raise error.APIError(err.get('message'), rbody, rcode, resp)
def request_raw(self, method, url, params=None, supplied_headers=None):
"""
Mechanism for issuing an API call
"""
from stripe import api_version
if self.api_key:
my_api_key = self.api_key
else:
from stripe import api_key
my_api_key = api_key
if my_api_key is None:
raise error.AuthenticationError(
'No API key provided. (HINT: set your API key using '
'"stripe.api_key = <API-KEY>"). You can generate API keys '
'from the Stripe web interface. See https://stripe.com/api '
'for details, or email support@stripe.com if you have any '
'questions.')
abs_url = '%s%s' % (self.api_base, url)
encoded_params = urllib.urlencode(list(_api_encode(params or {})))
if method == 'get' or method == 'delete':
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == 'post':
if supplied_headers is not None and \
supplied_headers.get("Content-Type") == \
"multipart/form-data":
generator = MultipartDataGenerator()
generator.add_params(params or {})
post_data = generator.get_post_data()
supplied_headers["Content-Type"] = \
"multipart/form-data; boundary=%s" % (generator.boundary,)
else:
post_data = encoded_params
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Stripe bindings. Please contact support@stripe.com for '
'assistance.' % (method,))
ua = {
'bindings_version': version.VERSION,
'lang': 'python',
'publisher': 'stripe',
'httplib': self._client.name,
}
for attr, func in [['lang_version', platform.python_version],
['platform', platform.platform],
['uname', lambda: ' '.join(platform.uname())]]:
try:
val = func()
except Exception, e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
'X-Stripe-Client-User-Agent': util.json.dumps(ua),
'User-Agent': 'Stripe/v1 PythonBindings/%s' % (version.VERSION,),
'Authorization': 'Bearer %s' % (my_api_key,)
}
if self.stripe_account:
headers['Stripe-Account'] = self.stripe_account
if method == 'post':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
if api_version is not None:
headers['Stripe-Version'] = api_version
if supplied_headers is not None:
for key, value in supplied_headers.items():
headers[key] = value
rbody, rcode = self._client.request(
method, abs_url, headers, post_data)
util.logger.info(
'API request to %s returned (response code, response body) of '
'(%d, %r)',
abs_url, rcode, rbody)
return rbody, rcode, my_api_key
def interpret_response(self, rbody, rcode):
try:
if hasattr(rbody, 'decode'):
rbody = rbody.decode('utf-8')
resp = util.json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody, rcode)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp)
return resp
# Deprecated request handling. Will all be removed in 2.0
def _deprecated_request(self, impl, method, url, headers, params):
warnings.warn(
'The *_request functions of APIRequestor are deprecated and '
'will be removed in version 2.0. Please use the client classes '
' in `stripe.http_client` instead',
DeprecationWarning, stacklevel=2)
method = method.lower()
if method == 'get' or method == 'delete':
if params:
url = self.build_url(url, params)
post_data = None
elif method == 'post':
post_data = self.encode(params)
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Stripe bindings. Please contact support@stripe.com for '
'assistance.' % (method,))
client = impl(verify_ssl_certs=self._client._verify_ssl_certs)
return client.request(method, url, headers, post_data)
def _deprecated_handle_error(self, impl, *args):
warnings.warn(
'The handle_*_error functions of APIRequestor are deprecated and '
'will be removed in version 2.0. Please use the client classes '
' in `stripe.http_client` instead',
DeprecationWarning, stacklevel=2)
client = impl(verify_ssl_certs=self._client._verify_ssl_certs)
return client._handle_request_error(*args)
def requests_request(self, meth, abs_url, headers, params):
from stripe.http_client import RequestsClient
return self._deprecated_request(RequestsClient, meth, abs_url,
headers, params)
def handle_requests_error(self, err):
from stripe.http_client import RequestsClient
return self._deprecated_handle_error(RequestsClient, err)
def pycurl_request(self, meth, abs_url, headers, params):
from stripe.http_client import PycurlClient
return self._deprecated_request(PycurlClient, meth, abs_url,
headers, params)
def handle_pycurl_error(self, err):
from stripe.http_client import PycurlClient
return self._deprecated_handle_error(PycurlClient, err)
def urlfetch_request(self, meth, abs_url, headers, params):
from stripe.http_client import UrlFetchClient
return self._deprecated_request(UrlFetchClient, meth, abs_url,
headers, params)
def handle_urlfetch_error(self, err, abs_url):
from stripe.http_client import UrlFetchClient
return self._deprecated_handle_error(UrlFetchClient, err, abs_url)
def urllib2_request(self, meth, abs_url, headers, params):
from stripe.http_client import Urllib2Client
return self._deprecated_request(Urllib2Client, meth, abs_url,
headers, params)
def handle_urllib2_error(self, err, abs_url):
from stripe.http_client import Urllib2Client
return self._deprecated_handle_error(Urllib2Client, err)
| boneil3/hyperAdmit | stripe/api_requestor.py | Python | mit | 12,207 |
"""
This is your project's main settings file that can be committed to your
repo. If you need to override a setting locally, use local.py
"""
import os
import logging
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# Your project root
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
SUPPORTED_NONLOCALES = ['media', 'admin', 'static']
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Defines the views served for root URLs.
ROOT_URLCONF = 'django_project.urls'
# Application definition
INSTALLED_APPS = (
# Django contrib apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.syndication',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes
'djcelery',
'debug_toolbar',
'compressor',
# Database migrations
'south',
# Application base, containing global templates.
'base',
# Local apps, referenced via appname
)
# Place bcrypt first in the list, so it will be the default password hashing
# mechanism
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
# Set this to true if you are using https
SESSION_COOKIE_SECURE = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.example.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.example.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
]
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
def custom_show_toolbar(request):
""" Only show the debug toolbar to users with the superuser flag. """
return request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'django_project.settings.base.custom_show_toolbar',
'HIDE_DJANGO_SQL': True,
'TAG': 'body',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
}
# DEBUG_TOOLBAR_PANELS = (
# #'debug_toolbar_user_panel.panels.UserPanel',
# 'debug_toolbar.panels.version.VersionDebugPanel',
# 'debug_toolbar.panels.timer.TimerDebugPanel',
# 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
# 'debug_toolbar.panels.headers.HeaderDebugPanel',
# 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
# 'debug_toolbar.panels.template.TemplateDebugPanel',
# 'debug_toolbar.panels.sql.SQLDebugPanel',
# 'debug_toolbar.panels.signals.SignalDebugPanel',
# 'debug_toolbar.panels.logger.LoggingPanel',
# )
# Specify a custom user model to use
#AUTH_USER_MODEL = 'accounts.MyUser'
FILE_UPLOAD_PERMISSIONS = 0o0664
# The WSGI Application to use for runserver
WSGI_APPLICATION = 'django_project.wsgi.application'
# Define your database connections
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = False
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# Hardcoded values can leak through source control.
# This is an example method of getting the value from an environment setting.
# Uncomment to use, and then make sure you set the SECRET_KEY environment variable.
# This is good to use in production, and on services that support it such as Heroku.
#SECRET_KEY = get_env_setting('SECRET_KEY')
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
INTERNAL_IPS = ('127.0.0.1')
# Enable this option for memcached
#CACHE_BACKEND= "memcached://127.0.0.1:11211/"
# Set this to true if you use a proxy that sets X-Forwarded-Host
#USE_X_FORWARDED_HOST = False
SERVER_EMAIL = "webmaster@example.com"
DEFAULT_FROM_EMAIL = "webmaster@example.com"
SYSTEM_EMAIL_PREFIX = "[django_project]"
## Log settings
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_django_project" # Make this unique to your project.
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'django_project': {
'level': "DEBUG"
}
}
}
LOGIN_REDIRECT_URL = '/'
# Common Event Format logging parameters
#CEF_PRODUCT = 'django_project'
#CEF_VENDOR = 'Your Company'
#CEF_VERSION = '0'
#CEF_DEVICE_VERSION = '0'
| TomWerner/sms_finance_tracker | django_project/settings/base.py | Python | bsd-3-clause | 9,651 |
#!-*- coding=utf8 -*-
from graphviz import Digraph
import queue
import random
class Node(object):
def __init__(self, value):
self.left = None
self.right = None
self.factor = 0
self.parent = None
self.value = value
def get_parent(self):
return self.parent
def get_sibling(self):
parent = self.parent
if not parent:
return None
if parent.left is self:
return parent.right
return parent.left
def get_grandparent(self):
parent = self.parent
if not parent:
return None
return parent.parent
def get_uncle(self):
parent = self.parent
if not parent:
return None
if not parent.parent:
return None
if parent is parent.parent.left:
return parent.parent.right
return parent.parent.left
def __str__(self):
p = self.parent.value if self.parent else None
return "parent: %s self:%s factor:%s"%(p, self.value, self.factor)
def __repr__(self):
return str(self)
class AVLTree(object):
def __init__(self):
self.root = None
def rotate_left(self, n):
p = n.get_parent()
r = n.right
assert(not r is None)
n.right = r.left
if r.left:
r.left.parent = n
n.parent = r
r.left = n
r.parent = p
if not p:
self.root = r
elif p.left is n:
p.left = r
else:
p.right = r
def rotate_right(self, n):
p = n.get_parent()
l = n.left
assert(not l is None)
n.left = l.right
if l.right:
l.right.parent = n
n.parent = l
l.right = n
l.parent = p
if not p:
self.root = l
elif p.left is n:
p.left = l
else:
p.right = l
#---------------------------------------------------------
def _push_node(self, node):
n = self.root
if not n:
self.root = node
return
while True:
if node.value <= n.value:
if n.left is None:
n.left = node
node.parent = n
break
n = n.left
else:
if n.right is None:
n.right = node
node.parent = n
break
n = n.right
def _balance_tree(self, node):
n = node
if n.get_sibling():
n.parent.factor = 0
return
while n.parent:
p = n.parent
if p.left is n:
p.factor -= 1
else:
p.factor += 1
if not p.factor:
break
if abs(p.factor) <= 1:
n = p
continue
if p.factor < -1 and n.factor < 0:
n.factor = 0
p.factor = 0
self.rotate_right(p)
break
if p.factor > 1 and n.factor > 0:
n.factor = 0
p.factor = 0
self.rotate_left(p)
break
if p.factor < -1 and n.factor > 0:
r = n.right
factor = r.factor
n.factor = 0 if factor <= 0 else -1
self.rotate_left(n)
r.factor = 0
p.factor = 0 if factor >= 0 else 1
self.rotate_right(p)
break
if p.factor > 1 and n.factor < 0:
l = n.left
factor = l.factor
n.factor = 0 if factor >= 0 else 1
self.rotate_right(n)
l.factor = 0
p.factor = 0 if factor <= 0 else -1
self.rotate_left(p)
break
raise Erro
def add(self, node):
self._push_node(node)
self._balance_tree(node)
if __name__ == "__main__":
def print_tree(tree, dot, index):
def get_sign(index, node):
return str(index) + str(id(node))
q = queue.Queue()
q.put(tree.root)
while not q.empty():
node = q.get()
sign = get_sign(index, node)
dot.node(sign, "%s(%s)"%(node.value, node.factor))
if node.left:
dot.edge(sign, get_sign(index, node.left))
q.put(node.left)
if node.right:
dot.edge(sign, get_sign(index, node.right))
q.put(node.right)
return dot
dots = Digraph()
tree = AVLTree()
for i, value in enumerate(xrange(24)):
value = random.randint(0, 10000)
tree.add(Node(value))
dot = Digraph()
print_tree(tree, dot, i)
dots.subgraph(graph=dot)
dots.render(view=True)
| pihou/wheels | avltree.py | Python | gpl-3.0 | 4,947 |
# The clearinghouse testlib must be imported first.
from clearinghouse.tests import testlib
from clearinghouse.tests import mocklib
from clearinghouse.common.api import maindb
from clearinghouse.common.exceptions import *
from clearinghouse.common.util import validations
from clearinghouse.website.control import interface
from clearinghouse.website.tests import testutil
import unittest
mocklib.mock_lockserver_calls()
class SeattleGeniTestCase(unittest.TestCase):
def setUp(self):
# Setup a fresh database for each test.
testlib.setup_test_db()
def tearDown(self):
# Cleanup the test database.
testlib.teardown_test_db()
def test_regenerate_user_key(self):
pubkey = "1 2"
privkey = "3 4 5"
donor_key = "6 7"
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation",
pubkey, privkey, donor_key)
userport = user.usable_vessel_port
vesselcount = 4
# Have every vessel acquisition to the backend request succeed.
calls_results = [True] * vesselcount
mocklib.mock_backend_acquire_vessel(calls_results)
testutil.create_nodes_on_different_subnets(vesselcount, [userport])
# Acquire all vessels on behalf of this user.
all_vessels_list = interface.acquire_vessels(user, vesselcount, 'rand')
# Release 2 vessels.
released_vessels_list = all_vessels_list[:2]
kept_vessels_list = all_vessels_list[2:]
interface.release_vessels(user, released_vessels_list)
# Ensure all of the vessels are marked as having user keys in sync.
for vessel in all_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertTrue(vessel.user_keys_in_sync)
# We expect a single key to be generated through the keygen api (the new
# user public key).
mocklib.mock_keygen_generate_keypair([("55 66", "77 88 99")])
interface.change_user_keys(user, pubkey=None)
# Get a new user object from the database.
user = maindb.get_user(user.username)
# Make sure the user's key changed.
self.assertEqual(user.user_pubkey, "55 66")
self.assertEqual(user.user_privkey, "77 88 99")
# Make sure that all of the vessels the user has access to (and no other
# vessels) are marked as needing user keys to be sync'd.
# Ensure all of the vessels are marked as having user keys in sync.
for vessel in kept_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertFalse(vessel.user_keys_in_sync)
for vessel in released_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertTrue(vessel.user_keys_in_sync)
def test_set_user_key(self):
pubkey = "1 2"
privkey = "3 4 5"
donor_key = "6 7"
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation",
pubkey, privkey, donor_key)
userport = user.usable_vessel_port
vesselcount = 4
# Have every vessel acquisition to the backend request succeed.
calls_results = [True] * vesselcount
mocklib.mock_backend_acquire_vessel(calls_results)
testutil.create_nodes_on_different_subnets(vesselcount, [userport])
# Acquire all vessels on behalf of this user.
all_vessels_list = interface.acquire_vessels(user, vesselcount, 'rand')
# Release 2 vessels.
released_vessels_list = all_vessels_list[:2]
kept_vessels_list = all_vessels_list[2:]
interface.release_vessels(user, released_vessels_list)
# Ensure all of the vessels are marked as having user keys in sync.
for vessel in all_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertTrue(vessel.user_keys_in_sync)
# We expect no keys to be generated through the keygen api.
mocklib.mock_keygen_generate_keypair([])
interface.change_user_keys(user, pubkey="55 66")
# Get a new user object from the database.
user = maindb.get_user(user.username)
# Make sure the user's key changed.
self.assertEqual(user.user_pubkey, "55 66")
self.assertEqual(user.user_privkey, None)
# Make sure that all of the vessels the user has access to (and no other
# vessels) are marked as needing user keys to be sync'd.
# Ensure all of the vessels are marked as having user keys in sync.
for vessel in kept_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertFalse(vessel.user_keys_in_sync)
for vessel in released_vessels_list:
# Get a fresh vessel from the db.
vessel = maindb.get_vessel(vessel.node.node_identifier, vessel.name)
self.assertTrue(vessel.user_keys_in_sync)
def test_set_invalid_user_key(self):
pubkey = "1 2"
privkey = "3 4 5"
donor_key = "6 7"
# Create a user who will be doing the acquiring.
user = maindb.create_user("testuser", "password", "example@example.com", "affiliation",
pubkey, privkey, donor_key)
func = interface.change_user_keys
args = (user, "abc")
self.assertRaises(ValidationError, func, *args)
def run_test():
unittest.main()
if __name__ == "__main__":
run_test()
| SensibilityTestbed/clearinghouse | website/tests/ut_interface_changeuserkey.py | Python | mit | 5,700 |
from base_head_app import app, db
| jstacoder/flask-manage | flask_mrbob/templates/project/+project.name+/__init__.py | Python | bsd-3-clause | 34 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the class Updater, which tries to make creating
Telegram bots intuitive."""
import logging
import os
import ssl
from threading import Thread, Lock, current_thread, Event
from time import sleep
import subprocess
from signal import signal, SIGINT, SIGTERM, SIGABRT
from queue import Queue
from telegram import Bot, TelegramError, NullHandler
from telegram.ext import dispatcher, Dispatcher, JobQueue
from telegram.error import Unauthorized, InvalidToken
from telegram.utils.webhookhandler import (WebhookServer, WebhookHandler)
logging.getLogger(__name__).addHandler(NullHandler())
class Updater(object):
"""
This class, which employs the Dispatcher class, provides a frontend to
telegram.Bot to the programmer, so they can focus on coding the bot. Its
purpose is to receive the updates from Telegram and to deliver them to said
dispatcher. It also runs in a separate thread, so the user can interact
with the bot, for example on the command line. The dispatcher supports
handlers for different kinds of data: Updates from Telegram, basic text
commands and even arbitrary types.
The updater can be started as a polling service or, for production, use a
webhook to receive updates. This is achieved using the WebhookServer and
WebhookHandler classes.
Attributes:
Args:
token (Optional[str]): The bot's token given by the @BotFather
base_url (Optional[str]):
workers (Optional[int]): Amount of threads in the thread pool for
functions decorated with @run_async
bot (Optional[Bot]):
job_queue_tick_interval(Optional[float]): The interval the queue should
be checked for new tasks. Defaults to 1.0
Raises:
ValueError: If both `token` and `bot` are passed or none of them.
"""
def __init__(self, token=None, base_url=None, workers=4, bot=None):
if (token is None) and (bot is None):
raise ValueError('`token` or `bot` must be passed')
if (token is not None) and (bot is not None):
raise ValueError('`token` and `bot` are mutually exclusive')
if bot is not None:
self.bot = bot
else:
self.bot = Bot(token, base_url)
self.update_queue = Queue()
self.job_queue = JobQueue(self.bot)
self.__exception_event = Event()
self.dispatcher = Dispatcher(self.bot,
self.update_queue,
job_queue=self.job_queue,
workers=workers,
exception_event=self.__exception_event)
self.last_update_id = 0
self.logger = logging.getLogger(__name__)
self.running = False
self.is_idle = False
self.httpd = None
self.__lock = Lock()
self.__threads = []
""":type: list[Thread]"""
def _init_thread(self, target, name, *args, **kwargs):
thr = Thread(target=self._thread_wrapper, name=name, args=(target,) + args, kwargs=kwargs)
thr.start()
self.__threads.append(thr)
def _thread_wrapper(self, target, *args, **kwargs):
thr_name = current_thread().name
self.logger.debug('{0} - started'.format(thr_name))
try:
target(*args, **kwargs)
except Exception:
self.__exception_event.set()
self.logger.exception('unhandled exception')
raise
self.logger.debug('{0} - ended'.format(thr_name))
def start_polling(self,
poll_interval=0.0,
timeout=10,
network_delay=5.,
clean=False,
bootstrap_retries=0):
"""
Starts polling updates from Telegram.
Args:
poll_interval (Optional[float]): Time to wait between polling updates from Telegram in
seconds. Default is 0.0
timeout (Optional[float]): Passed to Bot.getUpdates
network_delay (Optional[float]): Passed to Bot.getUpdates
clean (Optional[bool]): Whether to clean any pending updates on Telegram servers before
actually starting to poll. Default is False.
bootstrap_retries (Optional[int]): Whether the bootstrapping phase of the `Updater`
will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(self._start_polling, "updater", poll_interval, timeout,
network_delay, bootstrap_retries, clean)
# Return the update queue so the main thread can insert updates
return self.update_queue
def start_webhook(self,
listen='127.0.0.1',
port=80,
url_path='',
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (Optional[str]): IP-Address to listen on
port (Optional[int]): Port the bot should be listening on
url_path (Optional[str]): Path inside url
cert (Optional[str]): Path to the SSL certificate file
key (Optional[str]): Path to the SSL key file
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting the webhook. Default
is False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
webhook_url (Optional[str]): Explicitly specifiy the webhook url.
Useful behind NAT, reverse proxy, etc. Default is derived from
`listen`, `port` & `url_path`.
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher"),
self._init_thread(self._start_webhook, "updater", listen, port, url_path, cert,
key, bootstrap_retries, clean, webhook_url)
# Return the update queue so the main thread can insert updates
return self.update_queue
def _start_polling(self, poll_interval, timeout, network_delay, bootstrap_retries, clean):
"""
Thread target of thread 'updater'. Runs in background, pulls
updates from Telegram and inserts them in the update queue of the
Dispatcher.
"""
cur_interval = poll_interval
self.logger.debug('Updater thread started')
self._bootstrap(bootstrap_retries, clean=clean, webhook_url='')
while self.running:
try:
updates = self.bot.getUpdates(self.last_update_id,
timeout=timeout,
network_delay=network_delay)
except TelegramError as te:
self.logger.error("Error while getting Updates: {0}".format(te))
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(te)
cur_interval = self._increase_poll_interval(cur_interval)
else:
if not self.running:
if len(updates) > 0:
self.logger.debug('Updates ignored and will be pulled '
'again on restart.')
break
if updates:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
cur_interval = poll_interval
sleep(cur_interval)
@staticmethod
def _increase_poll_interval(current_interval):
# increase waiting times on subsequent errors up to 30secs
if current_interval == 0:
current_interval = 1
elif current_interval < 30:
current_interval += current_interval / 2
elif current_interval > 30:
current_interval = 30
return current_interval
def _start_webhook(self, listen, port, url_path, cert, key, bootstrap_retries, clean,
webhook_url):
self.logger.debug('Updater thread started')
use_ssl = cert is not None and key is not None
if not url_path.startswith('/'):
url_path = '/{0}'.format(url_path)
# Create and start server
self.httpd = WebhookServer((listen, port), WebhookHandler, self.update_queue, url_path)
if use_ssl:
self._check_ssl_cert(cert, key)
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, 'rb'))
elif clean:
self.logger.warning("cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping")
self.httpd.serve_forever(poll_interval=1)
def _check_ssl_cert(self, cert, key):
# Check SSL-Certificate with openssl, if possible
try:
exit_code = subprocess.call(
["openssl", "x509", "-text", "-noout", "-in", cert],
stdout=open(os.devnull, 'wb'),
stderr=subprocess.STDOUT)
except OSError:
exit_code = 0
if exit_code is 0:
try:
self.httpd.socket = ssl.wrap_socket(self.httpd.socket,
certfile=cert,
keyfile=key,
server_side=True)
except ssl.SSLError as error:
self.logger.exception('Failed to init SSL socket')
raise TelegramError(str(error))
else:
raise TelegramError('SSL Certificate invalid')
@staticmethod
def _gen_webhook_url(listen, port, url_path):
return 'https://{listen}:{port}{path}'.format(listen=listen, port=port, path=url_path)
def _bootstrap(self, max_retries, clean, webhook_url, cert=None):
retries = 0
while 1:
try:
if clean:
# Disable webhook for cleaning
self.bot.setWebhook(webhook_url='')
self._clean_updates()
self.bot.setWebhook(webhook_url=webhook_url, certificate=cert)
except (Unauthorized, InvalidToken):
raise
except TelegramError:
msg = 'error in bootstrap phase; try={0} max_retries={1}'.format(retries,
max_retries)
if max_retries < 0 or retries < max_retries:
self.logger.warning(msg)
retries += 1
else:
self.logger.exception(msg)
raise
else:
break
sleep(1)
def _clean_updates(self):
self.logger.debug('Cleaning updates from Telegram server')
updates = self.bot.getUpdates()
while updates:
updates = self.bot.getUpdates(updates[-1].update_id + 1)
def stop(self):
"""
Stops the polling/webhook thread, the dispatcher and the job queue
"""
self.job_queue.stop()
with self.__lock:
if self.running or dispatcher.ASYNC_THREADS:
self.logger.debug('Stopping Updater and Dispatcher...')
self.running = False
self._stop_httpd()
self._stop_dispatcher()
self._join_threads()
# async threads must be join()ed only after the dispatcher thread was joined,
# otherwise we can still have new async threads dispatched
self._join_async_threads()
def _stop_httpd(self):
if self.httpd:
self.logger.debug('Waiting for current webhook connection to be '
'closed... Send a Telegram message to the bot to exit '
'immediately.')
self.httpd.shutdown()
self.httpd = None
def _stop_dispatcher(self):
self.logger.debug('Requesting Dispatcher to stop...')
self.dispatcher.stop()
def _join_async_threads(self):
with dispatcher.ASYNC_LOCK:
threads = list(dispatcher.ASYNC_THREADS)
total = len(threads)
# Stop all threads in the thread pool by put()ting one non-tuple per thread
for i in range(total):
dispatcher.ASYNC_QUEUE.put(None)
for i, thr in enumerate(threads):
self.logger.debug('Waiting for async thread {0}/{1} to end'.format(i + 1, total))
thr.join()
dispatcher.ASYNC_THREADS.remove(thr)
self.logger.debug('async thread {0}/{1} has ended'.format(i + 1, total))
def _join_threads(self):
for thr in self.__threads:
self.logger.debug('Waiting for {0} thread to end'.format(thr.name))
thr.join()
self.logger.debug('{0} thread has ended'.format(thr.name))
self.__threads = []
def signal_handler(self, signum, frame):
self.is_idle = False
if self.running:
self.stop()
else:
self.logger.warning('Exiting immediately!')
import os
os._exit(1)
def idle(self, stop_signals=(SIGINT, SIGTERM, SIGABRT)):
"""
Blocks until one of the signals are received and stops the updater
Args:
stop_signals: Iterable containing signals from the signal module
that should be subscribed to. Updater.stop() will be called on
receiving one of those signals. Defaults to (SIGINT, SIGTERM,
SIGABRT)
"""
for sig in stop_signals:
signal(sig, self.signal_handler)
self.is_idle = True
while self.is_idle:
sleep(1)
| franciscod/python-telegram-bot | telegram/ext/updater.py | Python | gpl-2.0 | 16,495 |
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# Danimar Ribeiro <danimaribeiro@gmail.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp import models, fields
class GithubIntegration(models.Model):
_name = 'github.integration'
_rec_name = 'repo_name'
username = fields.Char(u'Usuário', size=100, required=True)
password = fields.Char(u'Senha', size=100, required=True)
owner_name = fields.Char(u'Dono Repositório')
repo_name = fields.Char(u'Repositório', size=100, required=True)
track_branch = fields.Boolean(u'Rastrear mudanças')
| Trust-Code/trust-addons | trust_tasks_github/models/github_integration.py | Python | agpl-3.0 | 1,993 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000462.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | biomodels/BIOMD0000000462 | BIOMD0000000462/model.py | Python | cc0-1.0 | 427 |
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from peer import *
class Ping(Peer):
Ping = Pillow.InOut
def __init__(self, *args):
Peer.__init__(self, *args)
self._catch(Ping.In.Ping, self.message)
def message(self, pillow, feathers):
self._throw(Ping.Out.Ping, int(feathers)+1);
| FreshXOpenSource/wallaby-base | wallaby/pf/peer/ping.py | Python | bsd-2-clause | 353 |
import math
import random
import itertools
import sys
import thread
import time
from globalvals import *
from pointstream import PointStream
import dac
from entities.asteroid import Asteroid
ps = PointStream()
DRAW = ps
def dac_thread():
global DRAW
while True:
try:
d = dac.DAC(dac.find_first_dac())
d.play_stream(ps)
except Exception as e:
# import sys, traceback
# print '\n---------------------'
# print 'Exception: %s' % e
# print '- - - - - - - - - - -'
# traceback.print_tb(sys.exc_info()[2])
# print "\n"
raise
def spawn(x,y,xVel,yVel):
radius = random.randint(ASTEROID_MIN_RADIUS, ASTEROID_MAX_RADIUS)
e = Asteroid(x, y, r=CMAX, g=CMAX, b=0, radius=radius)
e.xVel = xVel
e.yVel = yVel
#e.thetaRate = random.uniform(-math.pi/100, math.pi/100)
e.thetaRate = random.uniform(ASTEROID_SPIN_VEL_MAG_MIN,
ASTEROID_SPIN_VEL_MAG_MAX)
e.thetaRate *= 1 if random.randint(0, 1) else -1
return e
def spawn_enemy():
x, y, xVel, yVel = (0, 0, 0, 0)
spawnType = random.randint(0, 7)
"""
SPAWN LOCATION -- corners and edges
"""
if spawnType == 0:
# TOP RIGHT
x = MIN_X
y = MAX_Y
xVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 1:
# BOTTOM RIGHT
x = MIN_X
y = MIN_Y
xVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 2:
# BOTTOM LEFT
x = MAX_X
y = MIN_Y
xVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 3:
# TOP LEFT
x = MAX_X
y = MAX_Y
xVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 4:
# TOP EDGE
x = random.randint(MIN_X, MAX_X)
y = MAX_Y
xVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
xVel *= 1 if random.randint(0, 1) else -1
yVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 5:
# RIGHT EDGE
x = MIN_X
y = random.randint(MIN_Y, MAX_Y)
xVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel *= 1 if random.randint(0, 1) else -1
elif spawnType == 6:
# BOTTOM EDGE
x = random.randint(MIN_X, MAX_X)
y = MIN_Y
xVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
xVel *= 1 if random.randint(0, 1) else -1
yVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
elif spawnType == 7:
# LEFT EDGE
x = MAX_X
y = random.randint(MIN_Y, MAX_Y)
xVel = -random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel = random.randint(ASTEROID_VEL_MAG_MIN, ASTEROID_VEL_MAG_MAX)
yVel *= 1 if random.randint(0, 1) else -1
e = spawn(x,y,xVel,yVel)
return e
def run_thread():
DRAW.objects.append(spawn(0,0,0,0))
while True:
for obj in DRAW.objects:
x = obj.x
y = obj.y
x += obj.xVel
y += obj.yVel
if x < MIN_X or x > MAX_X or y < MIN_Y or y > MAX_Y :
obj.destroy = True
continue
obj.x = x
obj.y = y
obj.theta += obj.thetaRate
def main():
thread.start_new_thread(dac_thread, ())
thread.start_new_thread(run_thread, ())
while True:
time.sleep(20000000)
if __name__ == '__main__':
main() | topher515/laser-fingers | main.py | Python | mit | 4,031 |
import uuid
from datetime import datetime
from unittest import TestCase
import pytz
import colander
from unicore.comments.service.models import (
COMMENT_MAX_LENGTH, COMMENT_CONTENT_TYPES, COMMENT_MODERATION_STATES,
COMMENT_STREAM_STATES)
from unicore.comments.service.schema import (
Comment, Flag, BannedUser, StreamMetadata)
from unicore.comments.service.tests.test_models import (
comment_data as comment_model_data,
flag_data as flag_model_data,
banneduser_data as banneduser_model_data,
streammetadata_data as streammetadata_model_data)
def simple_serialize(data):
for key in data.keys():
value = data[key]
if isinstance(value, bool):
data[key] = 'true' if value else 'false'
elif isinstance(value, int):
data[key] = str(value)
elif isinstance(value, datetime):
data[key] = value.isoformat()
elif isinstance(value, uuid.UUID):
data[key] = value.hex
elif isinstance(value, dict):
data[key] = value.copy()
else:
data[key] = unicode(value)
comment_data = comment_model_data.copy()
flag_data = flag_model_data.copy()
banneduser_data = banneduser_model_data.copy()
streammetadata_data = streammetadata_model_data.copy()
for data in (comment_data, flag_data, banneduser_data, streammetadata_data):
simple_serialize(data)
class CommentTestCase(TestCase):
def test_deserialize(self):
schema = Comment().bind()
clean = schema.deserialize(comment_data)
# must remove flag_count so that it doesn't get updated directly
self.assertNotIn('flag_count', clean)
# check typed fields
self.assertIsInstance(clean.pop('submit_datetime'), datetime)
self.assertEqual(clean.pop('is_removed'), False)
self.assertEqual(len(clean), len(comment_model_data) - 3)
self.assertDictContainsSubset(clean, comment_model_data)
# check that missing required fields raise an exception
incomplete_data = comment_data.copy()
required_fields = (
'app_uuid', 'content_uuid', 'user_uuid', 'comment', 'user_name',
'submit_datetime', 'content_type', 'content_title', 'content_url',
'locale')
for field in required_fields:
del incomplete_data[field]
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(required_fields))
# check that missing fields with model defaults are dropped
missing_data = comment_data.copy()
fields_with_model_default = (
'uuid', 'flag_count', 'is_removed', 'moderation_state',
'ip_address')
for field in fields_with_model_default:
del missing_data[field]
clean = schema.deserialize(missing_data)
for field in fields_with_model_default:
self.assertNotIn(field, clean)
def test_serialize(self):
schema = Comment(include_all=True).bind()
clean = schema.serialize(comment_model_data)
self.assertEqual(clean, comment_data)
# check that flag_count got serialized
self.assertIn('flag_count', clean)
# check that missing/None fields are 'None'
missing_and_none_data = comment_model_data.copy()
del missing_and_none_data['ip_address']
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
missing_and_none_data['ip_address'] = None
clean = schema.serialize(missing_and_none_data)
self.assertEqual(clean['ip_address'], 'None')
class FlagTestCase(TestCase):
def test_deserialize(self):
schema = Flag().bind()
clean = schema.deserialize(flag_data)
self.assertEqual(
clean.pop('submit_datetime'),
flag_model_data['submit_datetime'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(flag_model_data) - 1)
self.assertDictContainsSubset(clean, flag_model_data)
# check that missing required fields raise an exception
# all flag fields are required
incomplete_data = {}
try:
schema.deserialize(incomplete_data)
self.fail('Expected colander.Invalid to be raised')
except colander.Invalid as e:
self.assertEqual(len(e.children), len(flag_data))
def test_serialize(self):
schema = Flag().bind()
clean = schema.serialize(flag_model_data)
self.assertEqual(clean, flag_data)
class BannedUserTestCase(TestCase):
def test_deserialize(self):
schema = BannedUser().bind()
clean = schema.deserialize(banneduser_data)
self.assertEqual(
clean.pop('created'),
banneduser_model_data['created'].replace(tzinfo=pytz.UTC))
self.assertEqual(len(clean), len(banneduser_model_data) - 1)
self.assertDictContainsSubset(clean, banneduser_model_data)
copy = banneduser_data.copy()
del copy['created']
clean = schema.deserialize(copy)
self.assertNotIn('created', clean)
def test_serialize(self):
schema = BannedUser().bind()
clean = schema.serialize(banneduser_model_data)
self.assertEqual(clean, banneduser_data)
class StreamMetadataTestCase(TestCase):
def test_deserialize(self):
schema = StreamMetadata().bind()
clean = schema.deserialize(streammetadata_data)
self.assertEqual(clean, streammetadata_model_data)
copy = streammetadata_data.copy()
del copy['metadata']
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
# dropped because unknown and no X- prefix
copy['metadata'] = {'unknown': 'value'}
clean = schema.deserialize(copy)
self.assertEqual(clean.get('metadata', None), {})
def test_serialize(self):
schema = StreamMetadata().bind()
clean = schema.serialize(streammetadata_model_data)
self.assertEqual(clean, streammetadata_data)
class ValidatorTestCase(TestCase):
schema_flag = Flag().bind()
schema_comment = Comment().bind()
schema_streammetadata = StreamMetadata().bind()
def setUp(self):
self.data_flag = flag_data.copy()
self.data_comment = comment_data.copy()
def test_uuid_validator(self):
self.data_flag['app_uuid'] = 'notauuid'
self.assertRaisesRegexp(
colander.Invalid, "'app_uuid'",
self.schema_flag.deserialize, self.data_flag)
def test_comment_uuid_validator(self):
comment_uuid = self.data_flag['comment_uuid']
schema = Flag().bind(comment_uuid=comment_uuid)
self.assertEqual(
schema.deserialize(self.data_flag)['comment_uuid'],
uuid.UUID(comment_uuid))
other_uuid = uuid.uuid4().hex
schema = Flag().bind(comment_uuid=other_uuid)
self.assertRaisesRegexp(
colander.Invalid, "is not one of %s" % uuid.UUID(other_uuid),
schema.deserialize, self.data_flag)
def test_ip_address_validator(self):
self.data_comment['ip_address'] = 'notanipaddress'
self.assertRaisesRegexp(
colander.Invalid, "'ip_address'",
self.schema_comment.deserialize, self.data_comment)
def test_locale_validator(self):
self.data_comment['locale'] = 'notalocale'
self.assertRaisesRegexp(
colander.Invalid, "'locale'",
self.schema_comment.deserialize, self.data_comment)
def test_comment_validator(self):
for val in ('', 'a' * (COMMENT_MAX_LENGTH + 1)):
self.data_comment['comment'] = val
self.assertRaisesRegexp(
colander.Invalid, "'comment'",
self.schema_comment.deserialize, self.data_comment)
def test_content_type_validator(self):
self.data_comment['content_type'] = 'notacontenttype'
types = ', '.join(COMMENT_CONTENT_TYPES)
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % types,
self.schema_comment.deserialize, self.data_comment)
def test_content_url_validator(self):
self.data_comment['content_url'] = 'notacontenturl'
self.assertRaisesRegexp(
colander.Invalid, "'content_url'",
self.schema_comment.deserialize, self.data_comment)
def test_moderation_state_validator(self):
self.data_comment['moderation_state'] = 'notamoderationstate'
states = ', '.join(map(lambda t: t[0], COMMENT_MODERATION_STATES))
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % states,
self.schema_comment.deserialize, self.data_comment)
def test_stream_state_validator(self):
smd_data = streammetadata_data.copy()
smd_data['metadata'] = smd_data['metadata'].copy()
smd_data['metadata']['state'] = 'invalid'
states = ', '.join(COMMENT_STREAM_STATES)
self.assertRaisesRegexp(
colander.Invalid, 'is not one of %s' % states,
self.schema_streammetadata.deserialize, smd_data)
| universalcore/unicore.comments | unicore/comments/service/tests/test_schema.py | Python | bsd-2-clause | 9,303 |
"""Batch Normalization for TensorFlow.
Parag K. Mital, Jan 2016.
"""
import tensorflow as tf
def batch_norm(x, phase_train, scope='bn', affine=True):
"""
Batch normalization on convolutional maps.
from: https://stackoverflow.com/questions/33949786/how-could-i-
use-batch-normalization-in-tensorflow
Only modified to infer shape from input tensor x.
Parameters
----------
x
Tensor, 4D BHWD input maps
phase_train
boolean tf.Variable, true indicates training phase
scope
string, variable scope
affine
whether to affine-transform outputs
Return
------
normed
batch-normalized maps
"""
with tf.variable_scope(scope):
shape = x.get_shape().as_list()
beta = tf.Variable(tf.constant(0.0, shape=[shape[-1]]),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=[shape[-1]]),
name='gamma', trainable=affine)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9)
ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)
def mean_var_with_update():
"""Summary
Returns
-------
name : TYPE
Description
"""
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema_mean, ema_var))
normed = tf.nn.batch_norm_with_global_normalization(
x, mean, var, beta, gamma, 1e-3, affine)
return normed
| apoorva-sharma/deep-frame-interpolation | tensorflow_tutorials-master/python/libs/batch_norm.py | Python | mit | 1,891 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.test import RequestFactory
from django.urls import reverse
from mock import MagicMock
from nav.models.profiles import Account
from nav.web.info.room.views import create_csv
from nav.web.info.searchproviders import SearchProvider
#########
# Tests #
#########
def test_search_for_nonascii_characters_should_not_crash(client):
url = reverse('info-search') + '?query=æøå'
response = client.get(url)
assert response.status_code == 200
def test_failing_searchprovider_should_not_crash_search_page(
client, failing_searchprovider):
url = reverse('info-search') + '?query=Da%20Truf'
response = client.get(url)
assert response.status_code == 200
def test_failures_should_be_mentioned_in_search_page(client,
failing_searchprovider):
url = reverse('info-search') + '?query=Da%20Truf'
response = client.get(url)
assert failing_searchprovider in response.content.decode('utf-8')
def test_room_csv_download_should_not_produce_bytestring_representations():
factory = RequestFactory()
request = factory.post(
reverse("room-csv"), data={"roomid": "myroom", "rows": "one;two;three\n"}
)
request.account = Account.objects.get(pk=Account.ADMIN_ACCOUNT)
request.session = MagicMock()
response = create_csv(request) # type: django.http.response.HttpResponse
assert not response.content.startswith(b"b'")
############
# Fixtures #
############
@pytest.fixture
def failing_searchprovider():
"""
Inserts (into NAV's list of search providers to use) a provider that
raises an exception.
"""
from django.conf import settings
provider = '{module}.{klass}'.format(module=__name__,
klass=FailingSearchProvider.__name__)
if provider not in settings.SEARCHPROVIDERS:
settings.SEARCHPROVIDERS.append(provider)
yield provider
if provider in settings.SEARCHPROVIDERS:
index = settings.SEARCHPROVIDERS.index(provider)
del settings.SEARCHPROVIDERS[index]
class FailingSearchProvider(SearchProvider):
"""A search provider that only raises exceptions"""
def fetch_results(self):
raise Exception("Riddikulus")
| UNINETT/nav | tests/integration/web/info_test.py | Python | gpl-2.0 | 2,334 |
from django.conf import settings
from django.utils import timezone
from ..compat import is_authenticated
from ..models import SessionProfile
from .base import Base
class SessionProfileStore(Base):
"""
Backend that saves the link between session_key and user in the databse.
"""
def save_session(self, request):
if not hasattr(request, 'user'):
return
store = self.get_session_store(request)
if store is not None and store.session_key is not None:
sp, _ = SessionProfile.objects.get_or_create(session_key=store.session_key)
if is_authenticated(request.user):
if sp.user != request.user:
sp.user = request.user
sp.save()
elif sp.user is not None:
sp.user = None
sp.save()
def purge_for_user(self, user):
SessionProfile.objects.filter(user=user).delete()
def clear_expired(self):
if settings.SESSION_ENGINE in [
'django.contrib.sessions.backends.db', 'django.contrib.sessions.backends.cached_db']:
from django.contrib.sessions.models import Session
all_sessions = Session.objects.values_list('session_key')
expired = all_sessions.filter(expire_date__lte=timezone.now())
SessionProfile.objects.filter(session_key__in=expired).delete()
SessionProfile.objects.exclude(session_key__in=all_sessions).delete()
else:
raise NotImplementedError('The session engine %s is not supported' % settings.SESSION_ENGINE)
| modelbrouwers/django-sessionprofile | sessionprofile/backends/db.py | Python | mit | 1,604 |
#!/bin/python
import xml.etree.ElementTree as et
import sys
import os
def usage():
print "Usage: "
print "p4.py <file path of PublishProfile file>"
def parse(path):
path = os.path.abspath(sys.argv[1])
print "Opening " + path + "\n"
doc = et.parse(path)
root = doc.getroot()
element = root.findall("publishProfile[@publishMethod='FTP']")[0]
print "Server " + element.attrib["publishUrl"]
print "User " + element.attrib["userName"]
print "Password " + element.attrib["userPWD"]
def main():
if len(sys.argv)> 1:
parse(sys.argv[1])
else:
usage()
main()
| rbnswartz/p4 | p4.py | Python | mit | 617 |
# Ant
#
# Copyright (c) 2015, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
__all__ = ["program", "scripting", "utilities"]
| Tigge/antfs-cli | antfs_cli/__init__.py | Python | mit | 1,169 |
#
# Copyright (c) 2013-2018 Balabit
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import re
import os.path
import nose
import typesafety
# Monkey-patching the _exc_info_to_string is the only simple way to influence
# how the exception gets formatted. We can't really replace the object with a
# subclass (we use this in a nose plugin, which is one of the many classes
# modifying the TestResult class).
class ExceptionStringConverter: # pylint: disable=W0212
TRACE_LINE_PATTERN = re.compile(r'^ File "([^"]*)"')
@classmethod
def wrap_results_object(cls, result):
result._exc_info_to_string = cls(result._exc_info_to_string)
def __init__(self, original_converter):
self.__original_converter = original_converter
def __call__(self, err, test):
res = []
skip_line = False
for line in self.__original_converter(err, test).split('\n'):
if skip_line:
skip_line = False
continue
match = self.TRACE_LINE_PATTERN.match(line)
if match is not None and self.__should_skip(match.group(1)):
skip_line = True
continue
res.append(line)
return '\n'.join(res)
def __should_skip(self, filename):
relative = os.path.relpath(filename, typesafety.__path__[0])
return not relative.startswith('..')
class TypesafetyPlugin(nose.plugins.Plugin):
name = 'typesafety'
__enabled_for = ()
keep_typesafety_trace = False
enabled = False
def __init__(self, *args, activate=None, **kwargs):
super().__init__(*args, **kwargs)
self.__activate = activate or typesafety.activate
def options(self, parser, env):
parser.add_option(
'-T', '--enable-typesafety', action='append', metavar='MODULE',
help='Enable typesafety for the given modules'
)
parser.add_option(
'--keep-typesafety-trace', action='store_true',
help='Do not hide typesafety traceback frames ' +
'(useful when debugging typesafety)'
)
def configure(self, options, conf):
if options.enable_typesafety:
self.enabled = True
self.__enabled_for = tuple(
mod.split('.') for mod in options.enable_typesafety
)
try:
self.__activate(filter_func=self.__check_need_activate)
except RuntimeError:
# Nose plugin was already enabled in a different thread
return
self.keep_typesafety_trace = options.keep_typesafety_trace
# This is an interface function that cannot be removed (see the nose
# plugin documentation for the meaning of this function).
def prepareTestResult(self, result): # pylint: disable=C0103
if not self.keep_typesafety_trace:
ExceptionStringConverter.wrap_results_object(result)
def __check_need_activate(self, module_name):
module_name = module_name.split('.')
return any(
module_name[:len(name)] == name for name in self.__enabled_for
)
| herczy/typesafety | typesafety/noseplugin.py | Python | lgpl-2.1 | 3,825 |
from monthly_budget.models.income import Income
from monthly_budget.models.tax_bracket import TaxBracket
from monthly_budget.config import standard_deductions
from monthly_budget.config import tax_brackets
# todo COL deductions module?
# todo apply tax bracket to Income
# Step Create an Income object
# Step Pass income to Budget to give back spendable money
# Step Add each liability to your liabilities
# Step Apply athe burndown
income = Income(rate=16, frequency='hourly', hours=50)
salary = income.get_salary()
print(salary)
tax_bracket = TaxBracket(bracket=tax_brackets['single'], deduction=standard_deductions['single'])
net_income = tax_bracket.estimate_net_income(gross_income=salary)
print(net_income)
| cwiki/monthly_budget | monthly_budget/app.py | Python | mit | 722 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.utils import six
from django.utils.functional import cached_property, lazy, lazy_property
class FunctionalTestCase(unittest.TestCase):
def test_lazy(self):
t = lazy(lambda: tuple(range(3)), list, tuple)
for a, b in zip(t(), range(3)):
self.assertEqual(a, b)
def test_lazy_base_class(self):
"""lazy also finds base class methods in the proxy object"""
class Base(object):
def base_method(self):
pass
class Klazz(Base):
pass
t = lazy(lambda: Klazz(), Klazz)()
self.assertIn('base_method', dir(t))
def test_lazy_base_class_override(self):
"""lazy finds the correct (overridden) method implementation"""
class Base(object):
def method(self):
return 'Base'
class Klazz(Base):
def method(self):
return 'Klazz'
t = lazy(lambda: Klazz(), Base)()
self.assertEqual(t.method(), 'Klazz')
def test_lazy_property(self):
class A(object):
def _get_do(self):
raise NotImplementedError
def _set_do(self, value):
raise NotImplementedError
do = lazy_property(_get_do, _set_do)
class B(A):
def _get_do(self):
return "DO IT"
with self.assertRaises(NotImplementedError):
A().do
self.assertEqual(B().do, 'DO IT')
def test_lazy_object_to_string(self):
class Klazz(object):
if six.PY3:
def __str__(self):
return "Î am ā Ǩlâzz."
def __bytes__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
else:
def __unicode__(self):
return "Î am ā Ǩlâzz."
def __str__(self):
return b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz."
t = lazy(lambda: Klazz(), Klazz)()
self.assertEqual(six.text_type(t), "Î am ā Ǩlâzz.")
self.assertEqual(six.binary_type(t), b"\xc3\x8e am \xc4\x81 binary \xc7\xa8l\xc3\xa2zz.")
def test_cached_property(self):
"""
cached_property caches its value and that it behaves like a property
"""
class A(object):
@cached_property
def value(self):
"""Here is the docstring..."""
return 1, object()
def other_value(self):
return 1
other = cached_property(other_value, name='other')
# docstring should be preserved
self.assertEqual(A.value.__doc__, "Here is the docstring...")
a = A()
# check that it is cached
self.assertEqual(a.value, a.value)
# check that it returns the right thing
self.assertEqual(a.value[0], 1)
# check that state isn't shared between instances
a2 = A()
self.assertNotEqual(a.value, a2.value)
# check that it behaves like a property when there's no instance
self.assertIsInstance(A.value, cached_property)
# check that overriding name works
self.assertEqual(a.other, 1)
self.assertTrue(callable(a.other_value))
def test_lazy_equality(self):
"""
== and != work correctly for Promises.
"""
lazy_a = lazy(lambda: 4, int)
lazy_b = lazy(lambda: 4, int)
lazy_c = lazy(lambda: 5, int)
self.assertEqual(lazy_a(), lazy_b())
self.assertNotEqual(lazy_b(), lazy_c())
def test_lazy_repr_text(self):
original_object = 'Lazy translation text'
lazy_obj = lazy(lambda: original_object, six.text_type)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_int(self):
original_object = 15
lazy_obj = lazy(lambda: original_object, int)
self.assertEqual(repr(original_object), repr(lazy_obj()))
def test_lazy_repr_bytes(self):
original_object = b'J\xc3\xbcst a str\xc3\xadng'
lazy_obj = lazy(lambda: original_object, bytes)
self.assertEqual(repr(original_object), repr(lazy_obj()))
| frishberg/django | tests/utils_tests/test_functional.py | Python | bsd-3-clause | 4,317 |
from django.db import models
from instruments.base import BaseTable
class Kiswahili_WS(BaseTable):
item_1_choices = [('produces', 'produces')]
item_1 = models.CharField(max_length=8, choices=item_1_choices, null=True)
item_2_choices = [('produces', 'produces')]
item_2 = models.CharField(max_length=8, choices=item_2_choices, null=True)
item_3_choices = [('produces', 'produces')]
item_3 = models.CharField(max_length=8, choices=item_3_choices, null=True)
item_4_choices = [('produces', 'produces')]
item_4 = models.CharField(max_length=8, choices=item_4_choices, null=True)
item_5_choices = [('produces', 'produces')]
item_5 = models.CharField(max_length=8, choices=item_5_choices, null=True)
item_6_choices = [('produces', 'produces')]
item_6 = models.CharField(max_length=8, choices=item_6_choices, null=True)
item_7_choices = [('produces', 'produces')]
item_7 = models.CharField(max_length=8, choices=item_7_choices, null=True)
item_8_choices = [('produces', 'produces')]
item_8 = models.CharField(max_length=8, choices=item_8_choices, null=True)
item_9_choices = [('produces', 'produces')]
item_9 = models.CharField(max_length=8, choices=item_9_choices, null=True)
item_10_choices = [('produces', 'produces')]
item_10 = models.CharField(max_length=8, choices=item_10_choices, null=True)
item_11_choices = [('produces', 'produces')]
item_11 = models.CharField(max_length=8, choices=item_11_choices, null=True)
item_12_choices = [('produces', 'produces')]
item_12 = models.CharField(max_length=8, choices=item_12_choices, null=True)
item_13_choices = [('produces', 'produces')]
item_13 = models.CharField(max_length=8, choices=item_13_choices, null=True)
item_14_choices = [('produces', 'produces')]
item_14 = models.CharField(max_length=8, choices=item_14_choices, null=True)
item_15_choices = [('produces', 'produces')]
item_15 = models.CharField(max_length=8, choices=item_15_choices, null=True)
item_16_choices = [('produces', 'produces')]
item_16 = models.CharField(max_length=8, choices=item_16_choices, null=True)
item_17_choices = [('produces', 'produces')]
item_17 = models.CharField(max_length=8, choices=item_17_choices, null=True)
item_18_choices = [('produces', 'produces')]
item_18 = models.CharField(max_length=8, choices=item_18_choices, null=True)
item_19_choices = [('produces', 'produces')]
item_19 = models.CharField(max_length=8, choices=item_19_choices, null=True)
item_20_choices = [('produces', 'produces')]
item_20 = models.CharField(max_length=8, choices=item_20_choices, null=True)
item_21_choices = [('produces', 'produces')]
item_21 = models.CharField(max_length=8, choices=item_21_choices, null=True)
item_22_choices = [('produces', 'produces')]
item_22 = models.CharField(max_length=8, choices=item_22_choices, null=True)
item_23_choices = [('produces', 'produces')]
item_23 = models.CharField(max_length=8, choices=item_23_choices, null=True)
item_24_choices = [('produces', 'produces')]
item_24 = models.CharField(max_length=8, choices=item_24_choices, null=True)
item_25_choices = [('produces', 'produces')]
item_25 = models.CharField(max_length=8, choices=item_25_choices, null=True)
item_26_choices = [('produces', 'produces')]
item_26 = models.CharField(max_length=8, choices=item_26_choices, null=True)
item_27_choices = [('produces', 'produces')]
item_27 = models.CharField(max_length=8, choices=item_27_choices, null=True)
item_28_choices = [('produces', 'produces')]
item_28 = models.CharField(max_length=8, choices=item_28_choices, null=True)
item_29_choices = [('produces', 'produces')]
item_29 = models.CharField(max_length=8, choices=item_29_choices, null=True)
item_30_choices = [('produces', 'produces')]
item_30 = models.CharField(max_length=8, choices=item_30_choices, null=True)
item_31_choices = [('produces', 'produces')]
item_31 = models.CharField(max_length=8, choices=item_31_choices, null=True)
item_32_choices = [('produces', 'produces')]
item_32 = models.CharField(max_length=8, choices=item_32_choices, null=True)
item_33_choices = [('produces', 'produces')]
item_33 = models.CharField(max_length=8, choices=item_33_choices, null=True)
item_34_choices = [('produces', 'produces')]
item_34 = models.CharField(max_length=8, choices=item_34_choices, null=True)
item_35_choices = [('produces', 'produces')]
item_35 = models.CharField(max_length=8, choices=item_35_choices, null=True)
item_36_choices = [('produces', 'produces')]
item_36 = models.CharField(max_length=8, choices=item_36_choices, null=True)
item_37_choices = [('produces', 'produces')]
item_37 = models.CharField(max_length=8, choices=item_37_choices, null=True)
item_38_choices = [('produces', 'produces')]
item_38 = models.CharField(max_length=8, choices=item_38_choices, null=True)
item_39_choices = [('produces', 'produces')]
item_39 = models.CharField(max_length=8, choices=item_39_choices, null=True)
item_40_choices = [('produces', 'produces')]
item_40 = models.CharField(max_length=8, choices=item_40_choices, null=True)
item_41_choices = [('produces', 'produces')]
item_41 = models.CharField(max_length=8, choices=item_41_choices, null=True)
item_42_choices = [('produces', 'produces')]
item_42 = models.CharField(max_length=8, choices=item_42_choices, null=True)
item_43_choices = [('produces', 'produces')]
item_43 = models.CharField(max_length=8, choices=item_43_choices, null=True)
item_44_choices = [('produces', 'produces')]
item_44 = models.CharField(max_length=8, choices=item_44_choices, null=True)
item_45_choices = [('produces', 'produces')]
item_45 = models.CharField(max_length=8, choices=item_45_choices, null=True)
item_46_choices = [('produces', 'produces')]
item_46 = models.CharField(max_length=8, choices=item_46_choices, null=True)
item_47_choices = [('produces', 'produces')]
item_47 = models.CharField(max_length=8, choices=item_47_choices, null=True)
item_48_choices = [('produces', 'produces')]
item_48 = models.CharField(max_length=8, choices=item_48_choices, null=True)
item_49_choices = [('produces', 'produces')]
item_49 = models.CharField(max_length=8, choices=item_49_choices, null=True)
item_50_choices = [('produces', 'produces')]
item_50 = models.CharField(max_length=8, choices=item_50_choices, null=True)
item_51_choices = [('produces', 'produces')]
item_51 = models.CharField(max_length=8, choices=item_51_choices, null=True)
item_52_choices = [('produces', 'produces')]
item_52 = models.CharField(max_length=8, choices=item_52_choices, null=True)
item_53_choices = [('produces', 'produces')]
item_53 = models.CharField(max_length=8, choices=item_53_choices, null=True)
item_54_choices = [('produces', 'produces')]
item_54 = models.CharField(max_length=8, choices=item_54_choices, null=True)
item_55_choices = [('produces', 'produces')]
item_55 = models.CharField(max_length=8, choices=item_55_choices, null=True)
item_56_choices = [('produces', 'produces')]
item_56 = models.CharField(max_length=8, choices=item_56_choices, null=True)
item_57_choices = [('produces', 'produces')]
item_57 = models.CharField(max_length=8, choices=item_57_choices, null=True)
item_58_choices = [('produces', 'produces')]
item_58 = models.CharField(max_length=8, choices=item_58_choices, null=True)
item_59_choices = [('produces', 'produces')]
item_59 = models.CharField(max_length=8, choices=item_59_choices, null=True)
item_60_choices = [('produces', 'produces')]
item_60 = models.CharField(max_length=8, choices=item_60_choices, null=True)
item_61_choices = [('produces', 'produces')]
item_61 = models.CharField(max_length=8, choices=item_61_choices, null=True)
item_62_choices = [('produces', 'produces')]
item_62 = models.CharField(max_length=8, choices=item_62_choices, null=True)
item_63_choices = [('produces', 'produces')]
item_63 = models.CharField(max_length=8, choices=item_63_choices, null=True)
item_64_choices = [('produces', 'produces')]
item_64 = models.CharField(max_length=8, choices=item_64_choices, null=True)
item_65_choices = [('produces', 'produces')]
item_65 = models.CharField(max_length=8, choices=item_65_choices, null=True)
item_66_choices = [('produces', 'produces')]
item_66 = models.CharField(max_length=8, choices=item_66_choices, null=True)
item_67_choices = [('produces', 'produces')]
item_67 = models.CharField(max_length=8, choices=item_67_choices, null=True)
item_68_choices = [('produces', 'produces')]
item_68 = models.CharField(max_length=8, choices=item_68_choices, null=True)
item_69_choices = [('produces', 'produces')]
item_69 = models.CharField(max_length=8, choices=item_69_choices, null=True)
item_70_choices = [('produces', 'produces')]
item_70 = models.CharField(max_length=8, choices=item_70_choices, null=True)
item_71_choices = [('produces', 'produces')]
item_71 = models.CharField(max_length=8, choices=item_71_choices, null=True)
item_72_choices = [('produces', 'produces')]
item_72 = models.CharField(max_length=8, choices=item_72_choices, null=True)
item_73_choices = [('produces', 'produces')]
item_73 = models.CharField(max_length=8, choices=item_73_choices, null=True)
item_74_choices = [('produces', 'produces')]
item_74 = models.CharField(max_length=8, choices=item_74_choices, null=True)
item_75_choices = [('produces', 'produces')]
item_75 = models.CharField(max_length=8, choices=item_75_choices, null=True)
item_76_choices = [('produces', 'produces')]
item_76 = models.CharField(max_length=8, choices=item_76_choices, null=True)
item_77_choices = [('produces', 'produces')]
item_77 = models.CharField(max_length=8, choices=item_77_choices, null=True)
item_78_choices = [('produces', 'produces')]
item_78 = models.CharField(max_length=8, choices=item_78_choices, null=True)
item_79_choices = [('produces', 'produces')]
item_79 = models.CharField(max_length=8, choices=item_79_choices, null=True)
item_80_choices = [('produces', 'produces')]
item_80 = models.CharField(max_length=8, choices=item_80_choices, null=True)
item_81_choices = [('produces', 'produces')]
item_81 = models.CharField(max_length=8, choices=item_81_choices, null=True)
item_82_choices = [('produces', 'produces')]
item_82 = models.CharField(max_length=8, choices=item_82_choices, null=True)
item_83_choices = [('produces', 'produces')]
item_83 = models.CharField(max_length=8, choices=item_83_choices, null=True)
item_84_choices = [('produces', 'produces')]
item_84 = models.CharField(max_length=8, choices=item_84_choices, null=True)
item_85_choices = [('produces', 'produces')]
item_85 = models.CharField(max_length=8, choices=item_85_choices, null=True)
item_86_choices = [('produces', 'produces')]
item_86 = models.CharField(max_length=8, choices=item_86_choices, null=True)
item_87_choices = [('produces', 'produces')]
item_87 = models.CharField(max_length=8, choices=item_87_choices, null=True)
item_88_choices = [('produces', 'produces')]
item_88 = models.CharField(max_length=8, choices=item_88_choices, null=True)
item_89_choices = [('produces', 'produces')]
item_89 = models.CharField(max_length=8, choices=item_89_choices, null=True)
item_90_choices = [('produces', 'produces')]
item_90 = models.CharField(max_length=8, choices=item_90_choices, null=True)
item_91_choices = [('produces', 'produces')]
item_91 = models.CharField(max_length=8, choices=item_91_choices, null=True)
item_92_choices = [('produces', 'produces')]
item_92 = models.CharField(max_length=8, choices=item_92_choices, null=True)
item_93_choices = [('produces', 'produces')]
item_93 = models.CharField(max_length=8, choices=item_93_choices, null=True)
item_94_choices = [('produces', 'produces')]
item_94 = models.CharField(max_length=8, choices=item_94_choices, null=True)
item_95_choices = [('produces', 'produces')]
item_95 = models.CharField(max_length=8, choices=item_95_choices, null=True)
item_96_choices = [('produces', 'produces')]
item_96 = models.CharField(max_length=8, choices=item_96_choices, null=True)
item_97_choices = [('produces', 'produces')]
item_97 = models.CharField(max_length=8, choices=item_97_choices, null=True)
item_98_choices = [('produces', 'produces')]
item_98 = models.CharField(max_length=8, choices=item_98_choices, null=True)
item_99_choices = [('produces', 'produces')]
item_99 = models.CharField(max_length=8, choices=item_99_choices, null=True)
item_100_choices = [('produces', 'produces')]
item_100 = models.CharField(max_length=8, choices=item_100_choices, null=True)
item_101_choices = [('produces', 'produces')]
item_101 = models.CharField(max_length=8, choices=item_101_choices, null=True)
item_102_choices = [('produces', 'produces')]
item_102 = models.CharField(max_length=8, choices=item_102_choices, null=True)
item_103_choices = [('produces', 'produces')]
item_103 = models.CharField(max_length=8, choices=item_103_choices, null=True)
item_104_choices = [('produces', 'produces')]
item_104 = models.CharField(max_length=8, choices=item_104_choices, null=True)
item_105_choices = [('produces', 'produces')]
item_105 = models.CharField(max_length=8, choices=item_105_choices, null=True)
item_106_choices = [('produces', 'produces')]
item_106 = models.CharField(max_length=8, choices=item_106_choices, null=True)
item_107_choices = [('produces', 'produces')]
item_107 = models.CharField(max_length=8, choices=item_107_choices, null=True)
item_108_choices = [('produces', 'produces')]
item_108 = models.CharField(max_length=8, choices=item_108_choices, null=True)
item_109_choices = [('produces', 'produces')]
item_109 = models.CharField(max_length=8, choices=item_109_choices, null=True)
item_110_choices = [('produces', 'produces')]
item_110 = models.CharField(max_length=8, choices=item_110_choices, null=True)
item_111_choices = [('produces', 'produces')]
item_111 = models.CharField(max_length=8, choices=item_111_choices, null=True)
item_112_choices = [('produces', 'produces')]
item_112 = models.CharField(max_length=8, choices=item_112_choices, null=True)
item_113_choices = [('produces', 'produces')]
item_113 = models.CharField(max_length=8, choices=item_113_choices, null=True)
item_114_choices = [('produces', 'produces')]
item_114 = models.CharField(max_length=8, choices=item_114_choices, null=True)
item_115_choices = [('produces', 'produces')]
item_115 = models.CharField(max_length=8, choices=item_115_choices, null=True)
item_116_choices = [('produces', 'produces')]
item_116 = models.CharField(max_length=8, choices=item_116_choices, null=True)
item_117_choices = [('produces', 'produces')]
item_117 = models.CharField(max_length=8, choices=item_117_choices, null=True)
item_118_choices = [('produces', 'produces')]
item_118 = models.CharField(max_length=8, choices=item_118_choices, null=True)
item_119_choices = [('produces', 'produces')]
item_119 = models.CharField(max_length=8, choices=item_119_choices, null=True)
item_120_choices = [('produces', 'produces')]
item_120 = models.CharField(max_length=8, choices=item_120_choices, null=True)
item_121_choices = [('produces', 'produces')]
item_121 = models.CharField(max_length=8, choices=item_121_choices, null=True)
item_122_choices = [('produces', 'produces')]
item_122 = models.CharField(max_length=8, choices=item_122_choices, null=True)
item_123_choices = [('produces', 'produces')]
item_123 = models.CharField(max_length=8, choices=item_123_choices, null=True)
item_124_choices = [('produces', 'produces')]
item_124 = models.CharField(max_length=8, choices=item_124_choices, null=True)
item_125_choices = [('produces', 'produces')]
item_125 = models.CharField(max_length=8, choices=item_125_choices, null=True)
item_126_choices = [('produces', 'produces')]
item_126 = models.CharField(max_length=8, choices=item_126_choices, null=True)
item_127_choices = [('produces', 'produces')]
item_127 = models.CharField(max_length=8, choices=item_127_choices, null=True)
item_128_choices = [('produces', 'produces')]
item_128 = models.CharField(max_length=8, choices=item_128_choices, null=True)
item_129_choices = [('produces', 'produces')]
item_129 = models.CharField(max_length=8, choices=item_129_choices, null=True)
item_130_choices = [('produces', 'produces')]
item_130 = models.CharField(max_length=8, choices=item_130_choices, null=True)
item_131_choices = [('produces', 'produces')]
item_131 = models.CharField(max_length=8, choices=item_131_choices, null=True)
item_132_choices = [('produces', 'produces')]
item_132 = models.CharField(max_length=8, choices=item_132_choices, null=True)
item_133_choices = [('produces', 'produces')]
item_133 = models.CharField(max_length=8, choices=item_133_choices, null=True)
item_134_choices = [('produces', 'produces')]
item_134 = models.CharField(max_length=8, choices=item_134_choices, null=True)
item_135_choices = [('produces', 'produces')]
item_135 = models.CharField(max_length=8, choices=item_135_choices, null=True)
item_136_choices = [('produces', 'produces')]
item_136 = models.CharField(max_length=8, choices=item_136_choices, null=True)
item_137_choices = [('produces', 'produces')]
item_137 = models.CharField(max_length=8, choices=item_137_choices, null=True)
item_138_choices = [('produces', 'produces')]
item_138 = models.CharField(max_length=8, choices=item_138_choices, null=True)
item_139_choices = [('produces', 'produces')]
item_139 = models.CharField(max_length=8, choices=item_139_choices, null=True)
item_140_choices = [('produces', 'produces')]
item_140 = models.CharField(max_length=8, choices=item_140_choices, null=True)
item_141_choices = [('produces', 'produces')]
item_141 = models.CharField(max_length=8, choices=item_141_choices, null=True)
item_142_choices = [('produces', 'produces')]
item_142 = models.CharField(max_length=8, choices=item_142_choices, null=True)
item_143_choices = [('produces', 'produces')]
item_143 = models.CharField(max_length=8, choices=item_143_choices, null=True)
item_144_choices = [('produces', 'produces')]
item_144 = models.CharField(max_length=8, choices=item_144_choices, null=True)
item_145_choices = [('produces', 'produces')]
item_145 = models.CharField(max_length=8, choices=item_145_choices, null=True)
item_146_choices = [('produces', 'produces')]
item_146 = models.CharField(max_length=8, choices=item_146_choices, null=True)
item_147_choices = [('produces', 'produces')]
item_147 = models.CharField(max_length=8, choices=item_147_choices, null=True)
item_148_choices = [('produces', 'produces')]
item_148 = models.CharField(max_length=8, choices=item_148_choices, null=True)
item_149_choices = [('produces', 'produces')]
item_149 = models.CharField(max_length=8, choices=item_149_choices, null=True)
item_150_choices = [('produces', 'produces')]
item_150 = models.CharField(max_length=8, choices=item_150_choices, null=True)
item_151_choices = [('produces', 'produces')]
item_151 = models.CharField(max_length=8, choices=item_151_choices, null=True)
item_152_choices = [('produces', 'produces')]
item_152 = models.CharField(max_length=8, choices=item_152_choices, null=True)
item_153_choices = [('produces', 'produces')]
item_153 = models.CharField(max_length=8, choices=item_153_choices, null=True)
item_154_choices = [('produces', 'produces')]
item_154 = models.CharField(max_length=8, choices=item_154_choices, null=True)
item_155_choices = [('produces', 'produces')]
item_155 = models.CharField(max_length=8, choices=item_155_choices, null=True)
item_156_choices = [('produces', 'produces')]
item_156 = models.CharField(max_length=8, choices=item_156_choices, null=True)
item_157_choices = [('produces', 'produces')]
item_157 = models.CharField(max_length=8, choices=item_157_choices, null=True)
item_158_choices = [('produces', 'produces')]
item_158 = models.CharField(max_length=8, choices=item_158_choices, null=True)
item_159_choices = [('produces', 'produces')]
item_159 = models.CharField(max_length=8, choices=item_159_choices, null=True)
item_160_choices = [('produces', 'produces')]
item_160 = models.CharField(max_length=8, choices=item_160_choices, null=True)
item_161_choices = [('produces', 'produces')]
item_161 = models.CharField(max_length=8, choices=item_161_choices, null=True)
item_162_choices = [('produces', 'produces')]
item_162 = models.CharField(max_length=8, choices=item_162_choices, null=True)
item_163_choices = [('produces', 'produces')]
item_163 = models.CharField(max_length=8, choices=item_163_choices, null=True)
item_164_choices = [('produces', 'produces')]
item_164 = models.CharField(max_length=8, choices=item_164_choices, null=True)
item_165_choices = [('produces', 'produces')]
item_165 = models.CharField(max_length=8, choices=item_165_choices, null=True)
item_166_choices = [('produces', 'produces')]
item_166 = models.CharField(max_length=8, choices=item_166_choices, null=True)
item_167_choices = [('produces', 'produces')]
item_167 = models.CharField(max_length=8, choices=item_167_choices, null=True)
item_168_choices = [('produces', 'produces')]
item_168 = models.CharField(max_length=8, choices=item_168_choices, null=True)
item_169_choices = [('produces', 'produces')]
item_169 = models.CharField(max_length=8, choices=item_169_choices, null=True)
item_170_choices = [('produces', 'produces')]
item_170 = models.CharField(max_length=8, choices=item_170_choices, null=True)
item_171_choices = [('produces', 'produces')]
item_171 = models.CharField(max_length=8, choices=item_171_choices, null=True)
item_172_choices = [('produces', 'produces')]
item_172 = models.CharField(max_length=8, choices=item_172_choices, null=True)
item_173_choices = [('produces', 'produces')]
item_173 = models.CharField(max_length=8, choices=item_173_choices, null=True)
item_174_choices = [('produces', 'produces')]
item_174 = models.CharField(max_length=8, choices=item_174_choices, null=True)
item_175_choices = [('produces', 'produces')]
item_175 = models.CharField(max_length=8, choices=item_175_choices, null=True)
item_176_choices = [('produces', 'produces')]
item_176 = models.CharField(max_length=8, choices=item_176_choices, null=True)
item_177_choices = [('produces', 'produces')]
item_177 = models.CharField(max_length=8, choices=item_177_choices, null=True)
item_178_choices = [('produces', 'produces')]
item_178 = models.CharField(max_length=8, choices=item_178_choices, null=True)
item_179_choices = [('produces', 'produces')]
item_179 = models.CharField(max_length=8, choices=item_179_choices, null=True)
item_180_choices = [('produces', 'produces')]
item_180 = models.CharField(max_length=8, choices=item_180_choices, null=True)
item_181_choices = [('produces', 'produces')]
item_181 = models.CharField(max_length=8, choices=item_181_choices, null=True)
item_182_choices = [('produces', 'produces')]
item_182 = models.CharField(max_length=8, choices=item_182_choices, null=True)
item_183_choices = [('produces', 'produces')]
item_183 = models.CharField(max_length=8, choices=item_183_choices, null=True)
item_184_choices = [('produces', 'produces')]
item_184 = models.CharField(max_length=8, choices=item_184_choices, null=True)
item_185_choices = [('produces', 'produces')]
item_185 = models.CharField(max_length=8, choices=item_185_choices, null=True)
item_186_choices = [('produces', 'produces')]
item_186 = models.CharField(max_length=8, choices=item_186_choices, null=True)
item_187_choices = [('produces', 'produces')]
item_187 = models.CharField(max_length=8, choices=item_187_choices, null=True)
item_188_choices = [('produces', 'produces')]
item_188 = models.CharField(max_length=8, choices=item_188_choices, null=True)
item_189_choices = [('produces', 'produces')]
item_189 = models.CharField(max_length=8, choices=item_189_choices, null=True)
item_190_choices = [('produces', 'produces')]
item_190 = models.CharField(max_length=8, choices=item_190_choices, null=True)
item_191_choices = [('produces', 'produces')]
item_191 = models.CharField(max_length=8, choices=item_191_choices, null=True)
item_192_choices = [('produces', 'produces')]
item_192 = models.CharField(max_length=8, choices=item_192_choices, null=True)
item_193_choices = [('produces', 'produces')]
item_193 = models.CharField(max_length=8, choices=item_193_choices, null=True)
item_194_choices = [('produces', 'produces')]
item_194 = models.CharField(max_length=8, choices=item_194_choices, null=True)
item_195_choices = [('produces', 'produces')]
item_195 = models.CharField(max_length=8, choices=item_195_choices, null=True)
item_196_choices = [('produces', 'produces')]
item_196 = models.CharField(max_length=8, choices=item_196_choices, null=True)
item_197_choices = [('produces', 'produces')]
item_197 = models.CharField(max_length=8, choices=item_197_choices, null=True)
item_198_choices = [('produces', 'produces')]
item_198 = models.CharField(max_length=8, choices=item_198_choices, null=True)
item_199_choices = [('produces', 'produces')]
item_199 = models.CharField(max_length=8, choices=item_199_choices, null=True)
item_200_choices = [('produces', 'produces')]
item_200 = models.CharField(max_length=8, choices=item_200_choices, null=True)
item_201_choices = [('produces', 'produces')]
item_201 = models.CharField(max_length=8, choices=item_201_choices, null=True)
item_202_choices = [('produces', 'produces')]
item_202 = models.CharField(max_length=8, choices=item_202_choices, null=True)
item_203_choices = [('produces', 'produces')]
item_203 = models.CharField(max_length=8, choices=item_203_choices, null=True)
item_204_choices = [('produces', 'produces')]
item_204 = models.CharField(max_length=8, choices=item_204_choices, null=True)
item_205_choices = [('produces', 'produces')]
item_205 = models.CharField(max_length=8, choices=item_205_choices, null=True)
item_206_choices = [('produces', 'produces')]
item_206 = models.CharField(max_length=8, choices=item_206_choices, null=True)
item_207_choices = [('produces', 'produces')]
item_207 = models.CharField(max_length=8, choices=item_207_choices, null=True)
item_208_choices = [('produces', 'produces')]
item_208 = models.CharField(max_length=8, choices=item_208_choices, null=True)
item_209_choices = [('produces', 'produces')]
item_209 = models.CharField(max_length=8, choices=item_209_choices, null=True)
item_210_choices = [('produces', 'produces')]
item_210 = models.CharField(max_length=8, choices=item_210_choices, null=True)
item_211_choices = [('produces', 'produces')]
item_211 = models.CharField(max_length=8, choices=item_211_choices, null=True)
item_212_choices = [('produces', 'produces')]
item_212 = models.CharField(max_length=8, choices=item_212_choices, null=True)
item_213_choices = [('produces', 'produces')]
item_213 = models.CharField(max_length=8, choices=item_213_choices, null=True)
item_214_choices = [('produces', 'produces')]
item_214 = models.CharField(max_length=8, choices=item_214_choices, null=True)
item_215_choices = [('produces', 'produces')]
item_215 = models.CharField(max_length=8, choices=item_215_choices, null=True)
item_216_choices = [('produces', 'produces')]
item_216 = models.CharField(max_length=8, choices=item_216_choices, null=True)
item_217_choices = [('produces', 'produces')]
item_217 = models.CharField(max_length=8, choices=item_217_choices, null=True)
item_218_choices = [('produces', 'produces')]
item_218 = models.CharField(max_length=8, choices=item_218_choices, null=True)
item_219_choices = [('produces', 'produces')]
item_219 = models.CharField(max_length=8, choices=item_219_choices, null=True)
item_220_choices = [('produces', 'produces')]
item_220 = models.CharField(max_length=8, choices=item_220_choices, null=True)
item_221_choices = [('produces', 'produces')]
item_221 = models.CharField(max_length=8, choices=item_221_choices, null=True)
item_222_choices = [('produces', 'produces')]
item_222 = models.CharField(max_length=8, choices=item_222_choices, null=True)
item_223_choices = [('produces', 'produces')]
item_223 = models.CharField(max_length=8, choices=item_223_choices, null=True)
item_224_choices = [('produces', 'produces')]
item_224 = models.CharField(max_length=8, choices=item_224_choices, null=True)
item_225_choices = [('produces', 'produces')]
item_225 = models.CharField(max_length=8, choices=item_225_choices, null=True)
item_226_choices = [('produces', 'produces')]
item_226 = models.CharField(max_length=8, choices=item_226_choices, null=True)
item_227_choices = [('produces', 'produces')]
item_227 = models.CharField(max_length=8, choices=item_227_choices, null=True)
item_228_choices = [('produces', 'produces')]
item_228 = models.CharField(max_length=8, choices=item_228_choices, null=True)
item_229_choices = [('produces', 'produces')]
item_229 = models.CharField(max_length=8, choices=item_229_choices, null=True)
item_230_choices = [('produces', 'produces')]
item_230 = models.CharField(max_length=8, choices=item_230_choices, null=True)
item_231_choices = [('produces', 'produces')]
item_231 = models.CharField(max_length=8, choices=item_231_choices, null=True)
item_232_choices = [('produces', 'produces')]
item_232 = models.CharField(max_length=8, choices=item_232_choices, null=True)
item_233_choices = [('produces', 'produces')]
item_233 = models.CharField(max_length=8, choices=item_233_choices, null=True)
item_234_choices = [('produces', 'produces')]
item_234 = models.CharField(max_length=8, choices=item_234_choices, null=True)
item_235_choices = [('produces', 'produces')]
item_235 = models.CharField(max_length=8, choices=item_235_choices, null=True)
item_236_choices = [('produces', 'produces')]
item_236 = models.CharField(max_length=8, choices=item_236_choices, null=True)
item_237_choices = [('produces', 'produces')]
item_237 = models.CharField(max_length=8, choices=item_237_choices, null=True)
item_238_choices = [('produces', 'produces')]
item_238 = models.CharField(max_length=8, choices=item_238_choices, null=True)
item_239_choices = [('produces', 'produces')]
item_239 = models.CharField(max_length=8, choices=item_239_choices, null=True)
item_240_choices = [('produces', 'produces')]
item_240 = models.CharField(max_length=8, choices=item_240_choices, null=True)
item_241_choices = [('produces', 'produces')]
item_241 = models.CharField(max_length=8, choices=item_241_choices, null=True)
item_242_choices = [('produces', 'produces')]
item_242 = models.CharField(max_length=8, choices=item_242_choices, null=True)
item_243_choices = [('produces', 'produces')]
item_243 = models.CharField(max_length=8, choices=item_243_choices, null=True)
item_244_choices = [('produces', 'produces')]
item_244 = models.CharField(max_length=8, choices=item_244_choices, null=True)
item_245_choices = [('produces', 'produces')]
item_245 = models.CharField(max_length=8, choices=item_245_choices, null=True)
item_246_choices = [('produces', 'produces')]
item_246 = models.CharField(max_length=8, choices=item_246_choices, null=True)
item_247_choices = [('produces', 'produces')]
item_247 = models.CharField(max_length=8, choices=item_247_choices, null=True)
item_248_choices = [('produces', 'produces')]
item_248 = models.CharField(max_length=8, choices=item_248_choices, null=True)
item_249_choices = [('produces', 'produces')]
item_249 = models.CharField(max_length=8, choices=item_249_choices, null=True)
item_250_choices = [('produces', 'produces')]
item_250 = models.CharField(max_length=8, choices=item_250_choices, null=True)
item_251_choices = [('produces', 'produces')]
item_251 = models.CharField(max_length=8, choices=item_251_choices, null=True)
item_252_choices = [('produces', 'produces')]
item_252 = models.CharField(max_length=8, choices=item_252_choices, null=True)
item_253_choices = [('produces', 'produces')]
item_253 = models.CharField(max_length=8, choices=item_253_choices, null=True)
item_254_choices = [('produces', 'produces')]
item_254 = models.CharField(max_length=8, choices=item_254_choices, null=True)
item_255_choices = [('produces', 'produces')]
item_255 = models.CharField(max_length=8, choices=item_255_choices, null=True)
item_256_choices = [('produces', 'produces')]
item_256 = models.CharField(max_length=8, choices=item_256_choices, null=True)
item_257_choices = [('produces', 'produces')]
item_257 = models.CharField(max_length=8, choices=item_257_choices, null=True)
item_258_choices = [('produces', 'produces')]
item_258 = models.CharField(max_length=8, choices=item_258_choices, null=True)
item_259_choices = [('produces', 'produces')]
item_259 = models.CharField(max_length=8, choices=item_259_choices, null=True)
item_260_choices = [('produces', 'produces')]
item_260 = models.CharField(max_length=8, choices=item_260_choices, null=True)
item_261_choices = [('produces', 'produces')]
item_261 = models.CharField(max_length=8, choices=item_261_choices, null=True)
item_262_choices = [('produces', 'produces')]
item_262 = models.CharField(max_length=8, choices=item_262_choices, null=True)
item_263_choices = [('produces', 'produces')]
item_263 = models.CharField(max_length=8, choices=item_263_choices, null=True)
item_264_choices = [('produces', 'produces')]
item_264 = models.CharField(max_length=8, choices=item_264_choices, null=True)
item_265_choices = [('produces', 'produces')]
item_265 = models.CharField(max_length=8, choices=item_265_choices, null=True)
item_266_choices = [('produces', 'produces')]
item_266 = models.CharField(max_length=8, choices=item_266_choices, null=True)
item_267_choices = [('produces', 'produces')]
item_267 = models.CharField(max_length=8, choices=item_267_choices, null=True)
item_268_choices = [('produces', 'produces')]
item_268 = models.CharField(max_length=8, choices=item_268_choices, null=True)
item_269_choices = [('produces', 'produces')]
item_269 = models.CharField(max_length=8, choices=item_269_choices, null=True)
item_270_choices = [('produces', 'produces')]
item_270 = models.CharField(max_length=8, choices=item_270_choices, null=True)
item_271_choices = [('produces', 'produces')]
item_271 = models.CharField(max_length=8, choices=item_271_choices, null=True)
item_272_choices = [('produces', 'produces')]
item_272 = models.CharField(max_length=8, choices=item_272_choices, null=True)
item_273_choices = [('produces', 'produces')]
item_273 = models.CharField(max_length=8, choices=item_273_choices, null=True)
item_274_choices = [('produces', 'produces')]
item_274 = models.CharField(max_length=8, choices=item_274_choices, null=True)
item_275_choices = [('produces', 'produces')]
item_275 = models.CharField(max_length=8, choices=item_275_choices, null=True)
item_276_choices = [('produces', 'produces')]
item_276 = models.CharField(max_length=8, choices=item_276_choices, null=True)
item_277_choices = [('produces', 'produces')]
item_277 = models.CharField(max_length=8, choices=item_277_choices, null=True)
item_278_choices = [('produces', 'produces')]
item_278 = models.CharField(max_length=8, choices=item_278_choices, null=True)
item_279_choices = [('produces', 'produces')]
item_279 = models.CharField(max_length=8, choices=item_279_choices, null=True)
item_280_choices = [('produces', 'produces')]
item_280 = models.CharField(max_length=8, choices=item_280_choices, null=True)
item_281_choices = [('produces', 'produces')]
item_281 = models.CharField(max_length=8, choices=item_281_choices, null=True)
item_282_choices = [('produces', 'produces')]
item_282 = models.CharField(max_length=8, choices=item_282_choices, null=True)
item_283_choices = [('produces', 'produces')]
item_283 = models.CharField(max_length=8, choices=item_283_choices, null=True)
item_284_choices = [('produces', 'produces')]
item_284 = models.CharField(max_length=8, choices=item_284_choices, null=True)
item_285_choices = [('produces', 'produces')]
item_285 = models.CharField(max_length=8, choices=item_285_choices, null=True)
item_286_choices = [('produces', 'produces')]
item_286 = models.CharField(max_length=8, choices=item_286_choices, null=True)
item_287_choices = [('produces', 'produces')]
item_287 = models.CharField(max_length=8, choices=item_287_choices, null=True)
item_288_choices = [('produces', 'produces')]
item_288 = models.CharField(max_length=8, choices=item_288_choices, null=True)
item_289_choices = [('produces', 'produces')]
item_289 = models.CharField(max_length=8, choices=item_289_choices, null=True)
item_290_choices = [('produces', 'produces')]
item_290 = models.CharField(max_length=8, choices=item_290_choices, null=True)
item_291_choices = [('produces', 'produces')]
item_291 = models.CharField(max_length=8, choices=item_291_choices, null=True)
item_292_choices = [('produces', 'produces')]
item_292 = models.CharField(max_length=8, choices=item_292_choices, null=True)
item_293_choices = [('produces', 'produces')]
item_293 = models.CharField(max_length=8, choices=item_293_choices, null=True)
item_294_choices = [('produces', 'produces')]
item_294 = models.CharField(max_length=8, choices=item_294_choices, null=True)
item_295_choices = [('produces', 'produces')]
item_295 = models.CharField(max_length=8, choices=item_295_choices, null=True)
item_296_choices = [('produces', 'produces')]
item_296 = models.CharField(max_length=8, choices=item_296_choices, null=True)
item_297_choices = [('produces', 'produces')]
item_297 = models.CharField(max_length=8, choices=item_297_choices, null=True)
item_298_choices = [('produces', 'produces')]
item_298 = models.CharField(max_length=8, choices=item_298_choices, null=True)
item_299_choices = [('produces', 'produces')]
item_299 = models.CharField(max_length=8, choices=item_299_choices, null=True)
item_300_choices = [('produces', 'produces')]
item_300 = models.CharField(max_length=8, choices=item_300_choices, null=True)
item_301_choices = [('produces', 'produces')]
item_301 = models.CharField(max_length=8, choices=item_301_choices, null=True)
item_302_choices = [('produces', 'produces')]
item_302 = models.CharField(max_length=8, choices=item_302_choices, null=True)
item_303_choices = [('produces', 'produces')]
item_303 = models.CharField(max_length=8, choices=item_303_choices, null=True)
item_304_choices = [('produces', 'produces')]
item_304 = models.CharField(max_length=8, choices=item_304_choices, null=True)
item_305_choices = [('produces', 'produces')]
item_305 = models.CharField(max_length=8, choices=item_305_choices, null=True)
item_306_choices = [('produces', 'produces')]
item_306 = models.CharField(max_length=8, choices=item_306_choices, null=True)
item_307_choices = [('produces', 'produces')]
item_307 = models.CharField(max_length=8, choices=item_307_choices, null=True)
item_308_choices = [('produces', 'produces')]
item_308 = models.CharField(max_length=8, choices=item_308_choices, null=True)
item_309_choices = [('produces', 'produces')]
item_309 = models.CharField(max_length=8, choices=item_309_choices, null=True)
item_310_choices = [('produces', 'produces')]
item_310 = models.CharField(max_length=8, choices=item_310_choices, null=True)
item_311_choices = [('produces', 'produces')]
item_311 = models.CharField(max_length=8, choices=item_311_choices, null=True)
item_312_choices = [('produces', 'produces')]
item_312 = models.CharField(max_length=8, choices=item_312_choices, null=True)
item_313_choices = [('produces', 'produces')]
item_313 = models.CharField(max_length=8, choices=item_313_choices, null=True)
item_314_choices = [('produces', 'produces')]
item_314 = models.CharField(max_length=8, choices=item_314_choices, null=True)
item_315_choices = [('produces', 'produces')]
item_315 = models.CharField(max_length=8, choices=item_315_choices, null=True)
item_316_choices = [('produces', 'produces')]
item_316 = models.CharField(max_length=8, choices=item_316_choices, null=True)
item_317_choices = [('produces', 'produces')]
item_317 = models.CharField(max_length=8, choices=item_317_choices, null=True)
item_318_choices = [('produces', 'produces')]
item_318 = models.CharField(max_length=8, choices=item_318_choices, null=True)
item_319_choices = [('produces', 'produces')]
item_319 = models.CharField(max_length=8, choices=item_319_choices, null=True)
item_320_choices = [('produces', 'produces')]
item_320 = models.CharField(max_length=8, choices=item_320_choices, null=True)
item_321_choices = [('produces', 'produces')]
item_321 = models.CharField(max_length=8, choices=item_321_choices, null=True)
item_322_choices = [('produces', 'produces')]
item_322 = models.CharField(max_length=8, choices=item_322_choices, null=True)
item_323_choices = [('produces', 'produces')]
item_323 = models.CharField(max_length=8, choices=item_323_choices, null=True)
item_324_choices = [('produces', 'produces')]
item_324 = models.CharField(max_length=8, choices=item_324_choices, null=True)
item_325_choices = [('produces', 'produces')]
item_325 = models.CharField(max_length=8, choices=item_325_choices, null=True)
item_326_choices = [('produces', 'produces')]
item_326 = models.CharField(max_length=8, choices=item_326_choices, null=True)
item_327_choices = [('produces', 'produces')]
item_327 = models.CharField(max_length=8, choices=item_327_choices, null=True)
item_328_choices = [('produces', 'produces')]
item_328 = models.CharField(max_length=8, choices=item_328_choices, null=True)
item_329_choices = [('produces', 'produces')]
item_329 = models.CharField(max_length=8, choices=item_329_choices, null=True)
item_330_choices = [('produces', 'produces')]
item_330 = models.CharField(max_length=8, choices=item_330_choices, null=True)
item_331_choices = [('produces', 'produces')]
item_331 = models.CharField(max_length=8, choices=item_331_choices, null=True)
item_332_choices = [('produces', 'produces')]
item_332 = models.CharField(max_length=8, choices=item_332_choices, null=True)
item_333_choices = [('produces', 'produces')]
item_333 = models.CharField(max_length=8, choices=item_333_choices, null=True)
item_334_choices = [('produces', 'produces')]
item_334 = models.CharField(max_length=8, choices=item_334_choices, null=True)
item_335_choices = [('produces', 'produces')]
item_335 = models.CharField(max_length=8, choices=item_335_choices, null=True)
item_336_choices = [('produces', 'produces')]
item_336 = models.CharField(max_length=8, choices=item_336_choices, null=True)
item_337_choices = [('produces', 'produces')]
item_337 = models.CharField(max_length=8, choices=item_337_choices, null=True)
item_338_choices = [('produces', 'produces')]
item_338 = models.CharField(max_length=8, choices=item_338_choices, null=True)
item_339_choices = [('produces', 'produces')]
item_339 = models.CharField(max_length=8, choices=item_339_choices, null=True)
item_340_choices = [('produces', 'produces')]
item_340 = models.CharField(max_length=8, choices=item_340_choices, null=True)
item_341_choices = [('produces', 'produces')]
item_341 = models.CharField(max_length=8, choices=item_341_choices, null=True)
item_342_choices = [('produces', 'produces')]
item_342 = models.CharField(max_length=8, choices=item_342_choices, null=True)
item_343_choices = [('produces', 'produces')]
item_343 = models.CharField(max_length=8, choices=item_343_choices, null=True)
item_344_choices = [('produces', 'produces')]
item_344 = models.CharField(max_length=8, choices=item_344_choices, null=True)
item_345_choices = [('produces', 'produces')]
item_345 = models.CharField(max_length=8, choices=item_345_choices, null=True)
item_346_choices = [('produces', 'produces')]
item_346 = models.CharField(max_length=8, choices=item_346_choices, null=True)
item_347_choices = [('produces', 'produces')]
item_347 = models.CharField(max_length=8, choices=item_347_choices, null=True)
item_348_choices = [('produces', 'produces')]
item_348 = models.CharField(max_length=8, choices=item_348_choices, null=True)
item_349_choices = [('produces', 'produces')]
item_349 = models.CharField(max_length=8, choices=item_349_choices, null=True)
item_350_choices = [('produces', 'produces')]
item_350 = models.CharField(max_length=8, choices=item_350_choices, null=True)
item_351_choices = [('produces', 'produces')]
item_351 = models.CharField(max_length=8, choices=item_351_choices, null=True)
item_352_choices = [('produces', 'produces')]
item_352 = models.CharField(max_length=8, choices=item_352_choices, null=True)
item_353_choices = [('produces', 'produces')]
item_353 = models.CharField(max_length=8, choices=item_353_choices, null=True)
item_354_choices = [('produces', 'produces')]
item_354 = models.CharField(max_length=8, choices=item_354_choices, null=True)
item_355_choices = [('produces', 'produces')]
item_355 = models.CharField(max_length=8, choices=item_355_choices, null=True)
item_356_choices = [('produces', 'produces')]
item_356 = models.CharField(max_length=8, choices=item_356_choices, null=True)
item_357_choices = [('produces', 'produces')]
item_357 = models.CharField(max_length=8, choices=item_357_choices, null=True)
item_358_choices = [('produces', 'produces')]
item_358 = models.CharField(max_length=8, choices=item_358_choices, null=True)
item_359_choices = [('produces', 'produces')]
item_359 = models.CharField(max_length=8, choices=item_359_choices, null=True)
item_360_choices = [('produces', 'produces')]
item_360 = models.CharField(max_length=8, choices=item_360_choices, null=True)
item_361_choices = [('produces', 'produces')]
item_361 = models.CharField(max_length=8, choices=item_361_choices, null=True)
item_362_choices = [('produces', 'produces')]
item_362 = models.CharField(max_length=8, choices=item_362_choices, null=True)
item_363_choices = [('produces', 'produces')]
item_363 = models.CharField(max_length=8, choices=item_363_choices, null=True)
item_364_choices = [('produces', 'produces')]
item_364 = models.CharField(max_length=8, choices=item_364_choices, null=True)
item_365_choices = [('produces', 'produces')]
item_365 = models.CharField(max_length=8, choices=item_365_choices, null=True)
item_366_choices = [('produces', 'produces')]
item_366 = models.CharField(max_length=8, choices=item_366_choices, null=True)
item_367_choices = [('produces', 'produces')]
item_367 = models.CharField(max_length=8, choices=item_367_choices, null=True)
item_368_choices = [('produces', 'produces')]
item_368 = models.CharField(max_length=8, choices=item_368_choices, null=True)
item_369_choices = [('produces', 'produces')]
item_369 = models.CharField(max_length=8, choices=item_369_choices, null=True)
item_370_choices = [('produces', 'produces')]
item_370 = models.CharField(max_length=8, choices=item_370_choices, null=True)
item_371_choices = [('produces', 'produces')]
item_371 = models.CharField(max_length=8, choices=item_371_choices, null=True)
item_372_choices = [('produces', 'produces')]
item_372 = models.CharField(max_length=8, choices=item_372_choices, null=True)
item_373_choices = [('produces', 'produces')]
item_373 = models.CharField(max_length=8, choices=item_373_choices, null=True)
item_374_choices = [('produces', 'produces')]
item_374 = models.CharField(max_length=8, choices=item_374_choices, null=True)
item_375_choices = [('produces', 'produces')]
item_375 = models.CharField(max_length=8, choices=item_375_choices, null=True)
item_376_choices = [('produces', 'produces')]
item_376 = models.CharField(max_length=8, choices=item_376_choices, null=True)
item_377_choices = [('produces', 'produces')]
item_377 = models.CharField(max_length=8, choices=item_377_choices, null=True)
item_378_choices = [('produces', 'produces')]
item_378 = models.CharField(max_length=8, choices=item_378_choices, null=True)
item_379_choices = [('produces', 'produces')]
item_379 = models.CharField(max_length=8, choices=item_379_choices, null=True)
item_380_choices = [('produces', 'produces')]
item_380 = models.CharField(max_length=8, choices=item_380_choices, null=True)
item_381_choices = [('produces', 'produces')]
item_381 = models.CharField(max_length=8, choices=item_381_choices, null=True)
item_382_choices = [('produces', 'produces')]
item_382 = models.CharField(max_length=8, choices=item_382_choices, null=True)
item_383_choices = [('produces', 'produces')]
item_383 = models.CharField(max_length=8, choices=item_383_choices, null=True)
item_384_choices = [('produces', 'produces')]
item_384 = models.CharField(max_length=8, choices=item_384_choices, null=True)
item_385_choices = [('produces', 'produces')]
item_385 = models.CharField(max_length=8, choices=item_385_choices, null=True)
item_386_choices = [('produces', 'produces')]
item_386 = models.CharField(max_length=8, choices=item_386_choices, null=True)
item_387_choices = [('produces', 'produces')]
item_387 = models.CharField(max_length=8, choices=item_387_choices, null=True)
item_388_choices = [('produces', 'produces')]
item_388 = models.CharField(max_length=8, choices=item_388_choices, null=True)
item_389_choices = [('produces', 'produces')]
item_389 = models.CharField(max_length=8, choices=item_389_choices, null=True)
item_390_choices = [('produces', 'produces')]
item_390 = models.CharField(max_length=8, choices=item_390_choices, null=True)
item_391_choices = [('produces', 'produces')]
item_391 = models.CharField(max_length=8, choices=item_391_choices, null=True)
item_392_choices = [('produces', 'produces')]
item_392 = models.CharField(max_length=8, choices=item_392_choices, null=True)
item_393_choices = [('produces', 'produces')]
item_393 = models.CharField(max_length=8, choices=item_393_choices, null=True)
item_394_choices = [('produces', 'produces')]
item_394 = models.CharField(max_length=8, choices=item_394_choices, null=True)
item_395_choices = [('produces', 'produces')]
item_395 = models.CharField(max_length=8, choices=item_395_choices, null=True)
item_396_choices = [('produces', 'produces')]
item_396 = models.CharField(max_length=8, choices=item_396_choices, null=True)
item_397_choices = [('produces', 'produces')]
item_397 = models.CharField(max_length=8, choices=item_397_choices, null=True)
item_398_choices = [('produces', 'produces')]
item_398 = models.CharField(max_length=8, choices=item_398_choices, null=True)
item_399_choices = [('produces', 'produces')]
item_399 = models.CharField(max_length=8, choices=item_399_choices, null=True)
item_400_choices = [('produces', 'produces')]
item_400 = models.CharField(max_length=8, choices=item_400_choices, null=True)
item_401_choices = [('produces', 'produces')]
item_401 = models.CharField(max_length=8, choices=item_401_choices, null=True)
item_402_choices = [('produces', 'produces')]
item_402 = models.CharField(max_length=8, choices=item_402_choices, null=True)
item_403_choices = [('produces', 'produces')]
item_403 = models.CharField(max_length=8, choices=item_403_choices, null=True)
item_404_choices = [('produces', 'produces')]
item_404 = models.CharField(max_length=8, choices=item_404_choices, null=True)
item_405_choices = [('produces', 'produces')]
item_405 = models.CharField(max_length=8, choices=item_405_choices, null=True)
item_406_choices = [('produces', 'produces')]
item_406 = models.CharField(max_length=8, choices=item_406_choices, null=True)
item_407_choices = [('produces', 'produces')]
item_407 = models.CharField(max_length=8, choices=item_407_choices, null=True)
item_408_choices = [('produces', 'produces')]
item_408 = models.CharField(max_length=8, choices=item_408_choices, null=True)
item_409_choices = [('produces', 'produces')]
item_409 = models.CharField(max_length=8, choices=item_409_choices, null=True)
item_410_choices = [('produces', 'produces')]
item_410 = models.CharField(max_length=8, choices=item_410_choices, null=True)
item_411_choices = [('produces', 'produces')]
item_411 = models.CharField(max_length=8, choices=item_411_choices, null=True)
item_412_choices = [('produces', 'produces')]
item_412 = models.CharField(max_length=8, choices=item_412_choices, null=True)
item_413_choices = [('produces', 'produces')]
item_413 = models.CharField(max_length=8, choices=item_413_choices, null=True)
item_414_choices = [('produces', 'produces')]
item_414 = models.CharField(max_length=8, choices=item_414_choices, null=True)
item_415_choices = [('produces', 'produces')]
item_415 = models.CharField(max_length=8, choices=item_415_choices, null=True)
item_416_choices = [('produces', 'produces')]
item_416 = models.CharField(max_length=8, choices=item_416_choices, null=True)
item_417_choices = [('produces', 'produces')]
item_417 = models.CharField(max_length=8, choices=item_417_choices, null=True)
item_418_choices = [('produces', 'produces')]
item_418 = models.CharField(max_length=8, choices=item_418_choices, null=True)
item_419_choices = [('produces', 'produces')]
item_419 = models.CharField(max_length=8, choices=item_419_choices, null=True)
item_420_choices = [('produces', 'produces')]
item_420 = models.CharField(max_length=8, choices=item_420_choices, null=True)
item_421_choices = [('produces', 'produces')]
item_421 = models.CharField(max_length=8, choices=item_421_choices, null=True)
item_422_choices = [('produces', 'produces')]
item_422 = models.CharField(max_length=8, choices=item_422_choices, null=True)
item_423_choices = [('produces', 'produces')]
item_423 = models.CharField(max_length=8, choices=item_423_choices, null=True)
item_424_choices = [('produces', 'produces')]
item_424 = models.CharField(max_length=8, choices=item_424_choices, null=True)
item_425_choices = [('produces', 'produces')]
item_425 = models.CharField(max_length=8, choices=item_425_choices, null=True)
item_426_choices = [('produces', 'produces')]
item_426 = models.CharField(max_length=8, choices=item_426_choices, null=True)
item_427_choices = [('produces', 'produces')]
item_427 = models.CharField(max_length=8, choices=item_427_choices, null=True)
item_428_choices = [('produces', 'produces')]
item_428 = models.CharField(max_length=8, choices=item_428_choices, null=True)
item_429_choices = [('produces', 'produces')]
item_429 = models.CharField(max_length=8, choices=item_429_choices, null=True)
item_430_choices = [('produces', 'produces')]
item_430 = models.CharField(max_length=8, choices=item_430_choices, null=True)
item_431_choices = [('produces', 'produces')]
item_431 = models.CharField(max_length=8, choices=item_431_choices, null=True)
item_432_choices = [('produces', 'produces')]
item_432 = models.CharField(max_length=8, choices=item_432_choices, null=True)
item_433_choices = [('produces', 'produces')]
item_433 = models.CharField(max_length=8, choices=item_433_choices, null=True)
item_434_choices = [('produces', 'produces')]
item_434 = models.CharField(max_length=8, choices=item_434_choices, null=True)
item_435_choices = [('produces', 'produces')]
item_435 = models.CharField(max_length=8, choices=item_435_choices, null=True)
item_436_choices = [('produces', 'produces')]
item_436 = models.CharField(max_length=8, choices=item_436_choices, null=True)
item_437_choices = [('produces', 'produces')]
item_437 = models.CharField(max_length=8, choices=item_437_choices, null=True)
item_438_choices = [('produces', 'produces')]
item_438 = models.CharField(max_length=8, choices=item_438_choices, null=True)
item_439_choices = [('produces', 'produces')]
item_439 = models.CharField(max_length=8, choices=item_439_choices, null=True)
item_440_choices = [('produces', 'produces')]
item_440 = models.CharField(max_length=8, choices=item_440_choices, null=True)
item_441_choices = [('produces', 'produces')]
item_441 = models.CharField(max_length=8, choices=item_441_choices, null=True)
item_442_choices = [('produces', 'produces')]
item_442 = models.CharField(max_length=8, choices=item_442_choices, null=True)
item_443_choices = [('produces', 'produces')]
item_443 = models.CharField(max_length=8, choices=item_443_choices, null=True)
item_444_choices = [('produces', 'produces')]
item_444 = models.CharField(max_length=8, choices=item_444_choices, null=True)
item_445_choices = [('produces', 'produces')]
item_445 = models.CharField(max_length=8, choices=item_445_choices, null=True)
item_446_choices = [('produces', 'produces')]
item_446 = models.CharField(max_length=8, choices=item_446_choices, null=True)
item_447_choices = [('produces', 'produces')]
item_447 = models.CharField(max_length=8, choices=item_447_choices, null=True)
item_448_choices = [('produces', 'produces')]
item_448 = models.CharField(max_length=8, choices=item_448_choices, null=True)
item_449_choices = [('produces', 'produces')]
item_449 = models.CharField(max_length=8, choices=item_449_choices, null=True)
item_450_choices = [('produces', 'produces')]
item_450 = models.CharField(max_length=8, choices=item_450_choices, null=True)
item_451_choices = [('produces', 'produces')]
item_451 = models.CharField(max_length=8, choices=item_451_choices, null=True)
item_452_choices = [('produces', 'produces')]
item_452 = models.CharField(max_length=8, choices=item_452_choices, null=True)
item_453_choices = [('produces', 'produces')]
item_453 = models.CharField(max_length=8, choices=item_453_choices, null=True)
item_454_choices = [('produces', 'produces')]
item_454 = models.CharField(max_length=8, choices=item_454_choices, null=True)
item_455_choices = [('produces', 'produces')]
item_455 = models.CharField(max_length=8, choices=item_455_choices, null=True)
item_456_choices = [('produces', 'produces')]
item_456 = models.CharField(max_length=8, choices=item_456_choices, null=True)
item_457_choices = [('produces', 'produces')]
item_457 = models.CharField(max_length=8, choices=item_457_choices, null=True)
item_458_choices = [('produces', 'produces')]
item_458 = models.CharField(max_length=8, choices=item_458_choices, null=True)
item_459_choices = [('produces', 'produces')]
item_459 = models.CharField(max_length=8, choices=item_459_choices, null=True)
item_460_choices = [('produces', 'produces')]
item_460 = models.CharField(max_length=8, choices=item_460_choices, null=True)
item_461_choices = [('produces', 'produces')]
item_461 = models.CharField(max_length=8, choices=item_461_choices, null=True)
item_462_choices = [('produces', 'produces')]
item_462 = models.CharField(max_length=8, choices=item_462_choices, null=True)
item_463_choices = [('produces', 'produces')]
item_463 = models.CharField(max_length=8, choices=item_463_choices, null=True)
item_464_choices = [('produces', 'produces')]
item_464 = models.CharField(max_length=8, choices=item_464_choices, null=True)
item_465_choices = [('produces', 'produces')]
item_465 = models.CharField(max_length=8, choices=item_465_choices, null=True)
item_466_choices = [('produces', 'produces')]
item_466 = models.CharField(max_length=8, choices=item_466_choices, null=True)
item_467_choices = [('produces', 'produces')]
item_467 = models.CharField(max_length=8, choices=item_467_choices, null=True)
item_468_choices = [('produces', 'produces')]
item_468 = models.CharField(max_length=8, choices=item_468_choices, null=True)
item_469_choices = [('produces', 'produces')]
item_469 = models.CharField(max_length=8, choices=item_469_choices, null=True)
item_470_choices = [('produces', 'produces')]
item_470 = models.CharField(max_length=8, choices=item_470_choices, null=True)
item_471_choices = [('produces', 'produces')]
item_471 = models.CharField(max_length=8, choices=item_471_choices, null=True)
item_472_choices = [('produces', 'produces')]
item_472 = models.CharField(max_length=8, choices=item_472_choices, null=True)
item_473_choices = [('produces', 'produces')]
item_473 = models.CharField(max_length=8, choices=item_473_choices, null=True)
item_474_choices = [('produces', 'produces')]
item_474 = models.CharField(max_length=8, choices=item_474_choices, null=True)
item_475_choices = [('produces', 'produces')]
item_475 = models.CharField(max_length=8, choices=item_475_choices, null=True)
item_476_choices = [('produces', 'produces')]
item_476 = models.CharField(max_length=8, choices=item_476_choices, null=True)
item_477_choices = [('produces', 'produces')]
item_477 = models.CharField(max_length=8, choices=item_477_choices, null=True)
item_478_choices = [('produces', 'produces')]
item_478 = models.CharField(max_length=8, choices=item_478_choices, null=True)
item_479_choices = [('produces', 'produces')]
item_479 = models.CharField(max_length=8, choices=item_479_choices, null=True)
item_480_choices = [('produces', 'produces')]
item_480 = models.CharField(max_length=8, choices=item_480_choices, null=True)
item_481_choices = [('produces', 'produces')]
item_481 = models.CharField(max_length=8, choices=item_481_choices, null=True)
item_482_choices = [('produces', 'produces')]
item_482 = models.CharField(max_length=8, choices=item_482_choices, null=True)
item_483_choices = [('produces', 'produces')]
item_483 = models.CharField(max_length=8, choices=item_483_choices, null=True)
item_484_choices = [('produces', 'produces')]
item_484 = models.CharField(max_length=8, choices=item_484_choices, null=True)
item_485_choices = [('produces', 'produces')]
item_485 = models.CharField(max_length=8, choices=item_485_choices, null=True)
item_486_choices = [('produces', 'produces')]
item_486 = models.CharField(max_length=8, choices=item_486_choices, null=True)
item_487_choices = [('produces', 'produces')]
item_487 = models.CharField(max_length=8, choices=item_487_choices, null=True)
item_488_choices = [('produces', 'produces')]
item_488 = models.CharField(max_length=8, choices=item_488_choices, null=True)
item_489_choices = [('produces', 'produces')]
item_489 = models.CharField(max_length=8, choices=item_489_choices, null=True)
item_490_choices = [('produces', 'produces')]
item_490 = models.CharField(max_length=8, choices=item_490_choices, null=True)
item_491_choices = [('produces', 'produces')]
item_491 = models.CharField(max_length=8, choices=item_491_choices, null=True)
item_492_choices = [('produces', 'produces')]
item_492 = models.CharField(max_length=8, choices=item_492_choices, null=True)
item_493_choices = [('produces', 'produces')]
item_493 = models.CharField(max_length=8, choices=item_493_choices, null=True)
item_494_choices = [('produces', 'produces')]
item_494 = models.CharField(max_length=8, choices=item_494_choices, null=True)
item_495_choices = [('produces', 'produces')]
item_495 = models.CharField(max_length=8, choices=item_495_choices, null=True)
item_496_choices = [('produces', 'produces')]
item_496 = models.CharField(max_length=8, choices=item_496_choices, null=True)
item_497_choices = [('produces', 'produces')]
item_497 = models.CharField(max_length=8, choices=item_497_choices, null=True)
item_498_choices = [('produces', 'produces')]
item_498 = models.CharField(max_length=8, choices=item_498_choices, null=True)
item_499_choices = [('produces', 'produces')]
item_499 = models.CharField(max_length=8, choices=item_499_choices, null=True)
item_500_choices = [('produces', 'produces')]
item_500 = models.CharField(max_length=8, choices=item_500_choices, null=True)
item_501_choices = [('produces', 'produces')]
item_501 = models.CharField(max_length=8, choices=item_501_choices, null=True)
item_502_choices = [('produces', 'produces')]
item_502 = models.CharField(max_length=8, choices=item_502_choices, null=True)
item_503_choices = [('produces', 'produces')]
item_503 = models.CharField(max_length=8, choices=item_503_choices, null=True)
item_504_choices = [('produces', 'produces')]
item_504 = models.CharField(max_length=8, choices=item_504_choices, null=True)
item_505_choices = [('produces', 'produces')]
item_505 = models.CharField(max_length=8, choices=item_505_choices, null=True)
item_506_choices = [('produces', 'produces')]
item_506 = models.CharField(max_length=8, choices=item_506_choices, null=True)
item_507_choices = [('produces', 'produces')]
item_507 = models.CharField(max_length=8, choices=item_507_choices, null=True)
item_508_choices = [('produces', 'produces')]
item_508 = models.CharField(max_length=8, choices=item_508_choices, null=True)
item_509_choices = [('produces', 'produces')]
item_509 = models.CharField(max_length=8, choices=item_509_choices, null=True)
item_510_choices = [('produces', 'produces')]
item_510 = models.CharField(max_length=8, choices=item_510_choices, null=True)
item_511_choices = [('produces', 'produces')]
item_511 = models.CharField(max_length=8, choices=item_511_choices, null=True)
item_512_choices = [('produces', 'produces')]
item_512 = models.CharField(max_length=8, choices=item_512_choices, null=True)
item_513_choices = [('produces', 'produces')]
item_513 = models.CharField(max_length=8, choices=item_513_choices, null=True)
item_514_choices = [('produces', 'produces')]
item_514 = models.CharField(max_length=8, choices=item_514_choices, null=True)
item_515_choices = [('produces', 'produces')]
item_515 = models.CharField(max_length=8, choices=item_515_choices, null=True)
item_516_choices = [('produces', 'produces')]
item_516 = models.CharField(max_length=8, choices=item_516_choices, null=True)
item_517_choices = [('produces', 'produces')]
item_517 = models.CharField(max_length=8, choices=item_517_choices, null=True)
item_518_choices = [('produces', 'produces')]
item_518 = models.CharField(max_length=8, choices=item_518_choices, null=True)
item_519_choices = [('produces', 'produces')]
item_519 = models.CharField(max_length=8, choices=item_519_choices, null=True)
item_520_choices = [('produces', 'produces')]
item_520 = models.CharField(max_length=8, choices=item_520_choices, null=True)
item_521_choices = [('produces', 'produces')]
item_521 = models.CharField(max_length=8, choices=item_521_choices, null=True)
item_522_choices = [('produces', 'produces')]
item_522 = models.CharField(max_length=8, choices=item_522_choices, null=True)
item_523_choices = [('produces', 'produces')]
item_523 = models.CharField(max_length=8, choices=item_523_choices, null=True)
item_524_choices = [('produces', 'produces')]
item_524 = models.CharField(max_length=8, choices=item_524_choices, null=True)
item_525_choices = [('produces', 'produces')]
item_525 = models.CharField(max_length=8, choices=item_525_choices, null=True)
item_526_choices = [('produces', 'produces')]
item_526 = models.CharField(max_length=8, choices=item_526_choices, null=True)
item_527_choices = [('produces', 'produces')]
item_527 = models.CharField(max_length=8, choices=item_527_choices, null=True)
item_528_choices = [('produces', 'produces')]
item_528 = models.CharField(max_length=8, choices=item_528_choices, null=True)
item_529_choices = [('produces', 'produces')]
item_529 = models.CharField(max_length=8, choices=item_529_choices, null=True)
item_530_choices = [('produces', 'produces')]
item_530 = models.CharField(max_length=8, choices=item_530_choices, null=True)
item_531_choices = [('produces', 'produces')]
item_531 = models.CharField(max_length=8, choices=item_531_choices, null=True)
item_532_choices = [('produces', 'produces')]
item_532 = models.CharField(max_length=8, choices=item_532_choices, null=True)
item_533_choices = [('produces', 'produces')]
item_533 = models.CharField(max_length=8, choices=item_533_choices, null=True)
item_534_choices = [('produces', 'produces')]
item_534 = models.CharField(max_length=8, choices=item_534_choices, null=True)
item_535_choices = [('produces', 'produces')]
item_535 = models.CharField(max_length=8, choices=item_535_choices, null=True)
item_536_choices = [('produces', 'produces')]
item_536 = models.CharField(max_length=8, choices=item_536_choices, null=True)
item_537_choices = [('produces', 'produces')]
item_537 = models.CharField(max_length=8, choices=item_537_choices, null=True)
item_538_choices = [('produces', 'produces')]
item_538 = models.CharField(max_length=8, choices=item_538_choices, null=True)
item_539_choices = [('produces', 'produces')]
item_539 = models.CharField(max_length=8, choices=item_539_choices, null=True)
item_540_choices = [('produces', 'produces')]
item_540 = models.CharField(max_length=8, choices=item_540_choices, null=True)
item_541_choices = [('produces', 'produces')]
item_541 = models.CharField(max_length=8, choices=item_541_choices, null=True)
item_542_choices = [('produces', 'produces')]
item_542 = models.CharField(max_length=8, choices=item_542_choices, null=True)
item_543_choices = [('produces', 'produces')]
item_543 = models.CharField(max_length=8, choices=item_543_choices, null=True)
item_544_choices = [('produces', 'produces')]
item_544 = models.CharField(max_length=8, choices=item_544_choices, null=True)
item_545_choices = [('produces', 'produces')]
item_545 = models.CharField(max_length=8, choices=item_545_choices, null=True)
item_546_choices = [('produces', 'produces')]
item_546 = models.CharField(max_length=8, choices=item_546_choices, null=True)
item_547_choices = [('produces', 'produces')]
item_547 = models.CharField(max_length=8, choices=item_547_choices, null=True)
item_548_choices = [('produces', 'produces')]
item_548 = models.CharField(max_length=8, choices=item_548_choices, null=True)
item_549_choices = [('produces', 'produces')]
item_549 = models.CharField(max_length=8, choices=item_549_choices, null=True)
item_550_choices = [('produces', 'produces')]
item_550 = models.CharField(max_length=8, choices=item_550_choices, null=True)
item_551_choices = [('produces', 'produces')]
item_551 = models.CharField(max_length=8, choices=item_551_choices, null=True)
item_552_choices = [('produces', 'produces')]
item_552 = models.CharField(max_length=8, choices=item_552_choices, null=True)
item_553_choices = [('produces', 'produces')]
item_553 = models.CharField(max_length=8, choices=item_553_choices, null=True)
item_554_choices = [('produces', 'produces')]
item_554 = models.CharField(max_length=8, choices=item_554_choices, null=True)
item_555_choices = [('produces', 'produces')]
item_555 = models.CharField(max_length=8, choices=item_555_choices, null=True)
item_556_choices = [('produces', 'produces')]
item_556 = models.CharField(max_length=8, choices=item_556_choices, null=True)
item_557_choices = [('produces', 'produces')]
item_557 = models.CharField(max_length=8, choices=item_557_choices, null=True)
item_558_choices = [('produces', 'produces')]
item_558 = models.CharField(max_length=8, choices=item_558_choices, null=True)
item_559_choices = [('produces', 'produces')]
item_559 = models.CharField(max_length=8, choices=item_559_choices, null=True)
item_560_choices = [('produces', 'produces')]
item_560 = models.CharField(max_length=8, choices=item_560_choices, null=True)
item_561_choices = [('produces', 'produces')]
item_561 = models.CharField(max_length=8, choices=item_561_choices, null=True)
item_562_choices = [('produces', 'produces')]
item_562 = models.CharField(max_length=8, choices=item_562_choices, null=True)
item_563_choices = [('produces', 'produces')]
item_563 = models.CharField(max_length=8, choices=item_563_choices, null=True)
item_564_choices = [('produces', 'produces')]
item_564 = models.CharField(max_length=8, choices=item_564_choices, null=True)
item_565_choices = [('produces', 'produces')]
item_565 = models.CharField(max_length=8, choices=item_565_choices, null=True)
item_566_choices = [('produces', 'produces')]
item_566 = models.CharField(max_length=8, choices=item_566_choices, null=True)
item_567_choices = [('produces', 'produces')]
item_567 = models.CharField(max_length=8, choices=item_567_choices, null=True)
item_568_choices = [('produces', 'produces')]
item_568 = models.CharField(max_length=8, choices=item_568_choices, null=True)
item_569_choices = [('produces', 'produces')]
item_569 = models.CharField(max_length=8, choices=item_569_choices, null=True)
item_570_choices = [('produces', 'produces')]
item_570 = models.CharField(max_length=8, choices=item_570_choices, null=True)
item_571_choices = [('produces', 'produces')]
item_571 = models.CharField(max_length=8, choices=item_571_choices, null=True)
item_572_choices = [('produces', 'produces')]
item_572 = models.CharField(max_length=8, choices=item_572_choices, null=True)
item_573_choices = [('produces', 'produces')]
item_573 = models.CharField(max_length=8, choices=item_573_choices, null=True)
item_574_choices = [('produces', 'produces')]
item_574 = models.CharField(max_length=8, choices=item_574_choices, null=True)
item_575_choices = [('produces', 'produces')]
item_575 = models.CharField(max_length=8, choices=item_575_choices, null=True)
item_576_choices = [('produces', 'produces')]
item_576 = models.CharField(max_length=8, choices=item_576_choices, null=True)
item_577_choices = [('produces', 'produces')]
item_577 = models.CharField(max_length=8, choices=item_577_choices, null=True)
item_578_choices = [('produces', 'produces')]
item_578 = models.CharField(max_length=8, choices=item_578_choices, null=True)
item_579_choices = [('produces', 'produces')]
item_579 = models.CharField(max_length=8, choices=item_579_choices, null=True)
item_580_choices = [('produces', 'produces')]
item_580 = models.CharField(max_length=8, choices=item_580_choices, null=True)
item_581_choices = [('produces', 'produces')]
item_581 = models.CharField(max_length=8, choices=item_581_choices, null=True)
item_582_choices = [('produces', 'produces')]
item_582 = models.CharField(max_length=8, choices=item_582_choices, null=True)
item_583_choices = [('produces', 'produces')]
item_583 = models.CharField(max_length=8, choices=item_583_choices, null=True)
item_584_choices = [('produces', 'produces')]
item_584 = models.CharField(max_length=8, choices=item_584_choices, null=True)
item_585_choices = [('produces', 'produces')]
item_585 = models.CharField(max_length=8, choices=item_585_choices, null=True)
item_586_choices = [('produces', 'produces')]
item_586 = models.CharField(max_length=8, choices=item_586_choices, null=True)
item_587_choices = [('produces', 'produces')]
item_587 = models.CharField(max_length=8, choices=item_587_choices, null=True)
item_588_choices = [('produces', 'produces')]
item_588 = models.CharField(max_length=8, choices=item_588_choices, null=True)
item_589_choices = [('produces', 'produces')]
item_589 = models.CharField(max_length=8, choices=item_589_choices, null=True)
item_590_choices = [('produces', 'produces')]
item_590 = models.CharField(max_length=8, choices=item_590_choices, null=True)
item_591_choices = [('produces', 'produces')]
item_591 = models.CharField(max_length=8, choices=item_591_choices, null=True)
item_592_choices = [('produces', 'produces')]
item_592 = models.CharField(max_length=8, choices=item_592_choices, null=True)
item_593_choices = [('produces', 'produces')]
item_593 = models.CharField(max_length=8, choices=item_593_choices, null=True)
item_594_choices = [('produces', 'produces')]
item_594 = models.CharField(max_length=8, choices=item_594_choices, null=True)
item_595_choices = [('produces', 'produces')]
item_595 = models.CharField(max_length=8, choices=item_595_choices, null=True)
item_596_choices = [('produces', 'produces')]
item_596 = models.CharField(max_length=8, choices=item_596_choices, null=True)
item_597_choices = [('produces', 'produces')]
item_597 = models.CharField(max_length=8, choices=item_597_choices, null=True)
item_598_choices = [('produces', 'produces')]
item_598 = models.CharField(max_length=8, choices=item_598_choices, null=True)
item_599_choices = [('produces', 'produces')]
item_599 = models.CharField(max_length=8, choices=item_599_choices, null=True)
item_600_choices = [('produces', 'produces')]
item_600 = models.CharField(max_length=8, choices=item_600_choices, null=True)
item_601_choices = [('produces', 'produces')]
item_601 = models.CharField(max_length=8, choices=item_601_choices, null=True)
item_602_choices = [('produces', 'produces')]
item_602 = models.CharField(max_length=8, choices=item_602_choices, null=True)
item_603_choices = [('produces', 'produces')]
item_603 = models.CharField(max_length=8, choices=item_603_choices, null=True)
item_604_choices = [('produces', 'produces')]
item_604 = models.CharField(max_length=8, choices=item_604_choices, null=True)
item_605_choices = [('produces', 'produces')]
item_605 = models.CharField(max_length=8, choices=item_605_choices, null=True)
item_606_choices = [('produces', 'produces')]
item_606 = models.CharField(max_length=8, choices=item_606_choices, null=True)
item_607_choices = [('produces', 'produces')]
item_607 = models.CharField(max_length=8, choices=item_607_choices, null=True)
item_608_choices = [('produces', 'produces')]
item_608 = models.CharField(max_length=8, choices=item_608_choices, null=True)
item_609_choices = [('produces', 'produces')]
item_609 = models.CharField(max_length=8, choices=item_609_choices, null=True)
item_610_choices = [('produces', 'produces')]
item_610 = models.CharField(max_length=8, choices=item_610_choices, null=True)
item_611_choices = [('produces', 'produces')]
item_611 = models.CharField(max_length=8, choices=item_611_choices, null=True)
item_612_choices = [('produces', 'produces')]
item_612 = models.CharField(max_length=8, choices=item_612_choices, null=True)
item_613_choices = [('produces', 'produces')]
item_613 = models.CharField(max_length=8, choices=item_613_choices, null=True)
item_614_choices = [('produces', 'produces')]
item_614 = models.CharField(max_length=8, choices=item_614_choices, null=True)
item_615_choices = [('produces', 'produces')]
item_615 = models.CharField(max_length=8, choices=item_615_choices, null=True)
item_616_choices = [('produces', 'produces')]
item_616 = models.CharField(max_length=8, choices=item_616_choices, null=True)
item_617_choices = [('produces', 'produces')]
item_617 = models.CharField(max_length=8, choices=item_617_choices, null=True)
item_618_choices = [('produces', 'produces')]
item_618 = models.CharField(max_length=8, choices=item_618_choices, null=True)
item_619_choices = [('produces', 'produces')]
item_619 = models.CharField(max_length=8, choices=item_619_choices, null=True)
item_620_choices = [('produces', 'produces')]
item_620 = models.CharField(max_length=8, choices=item_620_choices, null=True)
item_621_choices = [('produces', 'produces')]
item_621 = models.CharField(max_length=8, choices=item_621_choices, null=True)
item_622_choices = [('produces', 'produces')]
item_622 = models.CharField(max_length=8, choices=item_622_choices, null=True)
item_623_choices = [('produces', 'produces')]
item_623 = models.CharField(max_length=8, choices=item_623_choices, null=True)
item_624_choices = [('produces', 'produces')]
item_624 = models.CharField(max_length=8, choices=item_624_choices, null=True)
item_625_choices = [('produces', 'produces')]
item_625 = models.CharField(max_length=8, choices=item_625_choices, null=True)
item_626_choices = [('produces', 'produces')]
item_626 = models.CharField(max_length=8, choices=item_626_choices, null=True)
item_627_choices = [('produces', 'produces')]
item_627 = models.CharField(max_length=8, choices=item_627_choices, null=True)
item_628_choices = [('produces', 'produces')]
item_628 = models.CharField(max_length=8, choices=item_628_choices, null=True)
item_629_choices = [('produces', 'produces')]
item_629 = models.CharField(max_length=8, choices=item_629_choices, null=True)
item_630_choices = [('produces', 'produces')]
item_630 = models.CharField(max_length=8, choices=item_630_choices, null=True)
item_631_choices = [('produces', 'produces')]
item_631 = models.CharField(max_length=8, choices=item_631_choices, null=True)
item_632_choices = [('produces', 'produces')]
item_632 = models.CharField(max_length=8, choices=item_632_choices, null=True)
item_633_choices = [('produces', 'produces')]
item_633 = models.CharField(max_length=8, choices=item_633_choices, null=True)
item_634_choices = [('produces', 'produces')]
item_634 = models.CharField(max_length=8, choices=item_634_choices, null=True)
item_635_choices = [('produces', 'produces')]
item_635 = models.CharField(max_length=8, choices=item_635_choices, null=True)
item_636_choices = [('produces', 'produces')]
item_636 = models.CharField(max_length=8, choices=item_636_choices, null=True)
item_637_choices = [('produces', 'produces')]
item_637 = models.CharField(max_length=8, choices=item_637_choices, null=True)
item_638_choices = [('produces', 'produces')]
item_638 = models.CharField(max_length=8, choices=item_638_choices, null=True)
item_639_choices = [('produces', 'produces')]
item_639 = models.CharField(max_length=8, choices=item_639_choices, null=True)
item_640_choices = [('produces', 'produces')]
item_640 = models.CharField(max_length=8, choices=item_640_choices, null=True)
item_641_choices = [('produces', 'produces')]
item_641 = models.CharField(max_length=8, choices=item_641_choices, null=True)
item_642_choices = [('produces', 'produces')]
item_642 = models.CharField(max_length=8, choices=item_642_choices, null=True)
item_643_choices = [('produces', 'produces')]
item_643 = models.CharField(max_length=8, choices=item_643_choices, null=True)
item_644_choices = [('produces', 'produces')]
item_644 = models.CharField(max_length=8, choices=item_644_choices, null=True)
item_645_choices = [('produces', 'produces')]
item_645 = models.CharField(max_length=8, choices=item_645_choices, null=True)
item_646_choices = [('produces', 'produces')]
item_646 = models.CharField(max_length=8, choices=item_646_choices, null=True)
item_647_choices = [('produces', 'produces')]
item_647 = models.CharField(max_length=8, choices=item_647_choices, null=True)
item_648_choices = [('produces', 'produces')]
item_648 = models.CharField(max_length=8, choices=item_648_choices, null=True)
item_649_choices = [('produces', 'produces')]
item_649 = models.CharField(max_length=8, choices=item_649_choices, null=True)
item_650_choices = [('produces', 'produces')]
item_650 = models.CharField(max_length=8, choices=item_650_choices, null=True)
item_651_choices = [('produces', 'produces')]
item_651 = models.CharField(max_length=8, choices=item_651_choices, null=True)
item_652_choices = [('produces', 'produces')]
item_652 = models.CharField(max_length=8, choices=item_652_choices, null=True)
item_653_choices = [('produces', 'produces')]
item_653 = models.CharField(max_length=8, choices=item_653_choices, null=True)
item_654_choices = [('produces', 'produces')]
item_654 = models.CharField(max_length=8, choices=item_654_choices, null=True)
item_655_choices = [('produces', 'produces')]
item_655 = models.CharField(max_length=8, choices=item_655_choices, null=True)
item_656_choices = [('produces', 'produces')]
item_656 = models.CharField(max_length=8, choices=item_656_choices, null=True)
item_657_choices = [('produces', 'produces')]
item_657 = models.CharField(max_length=8, choices=item_657_choices, null=True)
item_658_choices = [('produces', 'produces')]
item_658 = models.CharField(max_length=8, choices=item_658_choices, null=True)
item_659_choices = [('produces', 'produces')]
item_659 = models.CharField(max_length=8, choices=item_659_choices, null=True)
item_660_choices = [('produces', 'produces')]
item_660 = models.CharField(max_length=8, choices=item_660_choices, null=True)
item_661_choices = [('produces', 'produces')]
item_661 = models.CharField(max_length=8, choices=item_661_choices, null=True)
item_662_choices = [('produces', 'produces')]
item_662 = models.CharField(max_length=8, choices=item_662_choices, null=True)
item_663_choices = [('produces', 'produces')]
item_663 = models.CharField(max_length=8, choices=item_663_choices, null=True)
item_664_choices = [('produces', 'produces')]
item_664 = models.CharField(max_length=8, choices=item_664_choices, null=True)
item_665_choices = [('produces', 'produces')]
item_665 = models.CharField(max_length=8, choices=item_665_choices, null=True)
item_666_choices = [('produces', 'produces')]
item_666 = models.CharField(max_length=8, choices=item_666_choices, null=True)
item_667_choices = [('produces', 'produces')]
item_667 = models.CharField(max_length=8, choices=item_667_choices, null=True)
item_668_choices = [('produces', 'produces')]
item_668 = models.CharField(max_length=8, choices=item_668_choices, null=True)
item_669_choices = [('produces', 'produces')]
item_669 = models.CharField(max_length=8, choices=item_669_choices, null=True)
item_670_choices = [('produces', 'produces')]
item_670 = models.CharField(max_length=8, choices=item_670_choices, null=True)
item_671_choices = [('produces', 'produces')]
item_671 = models.CharField(max_length=8, choices=item_671_choices, null=True)
item_672_choices = [('produces', 'produces')]
item_672 = models.CharField(max_length=8, choices=item_672_choices, null=True)
item_673_choices = [('produces', 'produces')]
item_673 = models.CharField(max_length=8, choices=item_673_choices, null=True)
item_674_choices = [('produces', 'produces')]
item_674 = models.CharField(max_length=8, choices=item_674_choices, null=True)
item_675_choices = [('produces', 'produces')]
item_675 = models.CharField(max_length=8, choices=item_675_choices, null=True)
item_676_choices = [('produces', 'produces')]
item_676 = models.CharField(max_length=8, choices=item_676_choices, null=True)
item_677_choices = [('produces', 'produces')]
item_677 = models.CharField(max_length=8, choices=item_677_choices, null=True)
item_678_choices = [('produces', 'produces')]
item_678 = models.CharField(max_length=8, choices=item_678_choices, null=True)
item_679_choices = [('produces', 'produces')]
item_679 = models.CharField(max_length=8, choices=item_679_choices, null=True)
item_680_choices = [('produces', 'produces')]
item_680 = models.CharField(max_length=8, choices=item_680_choices, null=True)
item_681_choices = [('produces', 'produces')]
item_681 = models.CharField(max_length=8, choices=item_681_choices, null=True)
item_682_choices = [('produces', 'produces')]
item_682 = models.CharField(max_length=8, choices=item_682_choices, null=True)
item_683_choices = [('produces', 'produces')]
item_683 = models.CharField(max_length=8, choices=item_683_choices, null=True)
item_684_choices = [('produces', 'produces')]
item_684 = models.CharField(max_length=8, choices=item_684_choices, null=True)
item_685_choices = [('produces', 'produces')]
item_685 = models.CharField(max_length=8, choices=item_685_choices, null=True)
item_686_choices = [('produces', 'produces')]
item_686 = models.CharField(max_length=8, choices=item_686_choices, null=True)
item_687_choices = [('produces', 'produces')]
item_687 = models.CharField(max_length=8, choices=item_687_choices, null=True)
item_688_choices = [('produces', 'produces')]
item_688 = models.CharField(max_length=8, choices=item_688_choices, null=True)
item_689_choices = [('produces', 'produces')]
item_689 = models.CharField(max_length=8, choices=item_689_choices, null=True)
item_690_choices = [('produces', 'produces')]
item_690 = models.CharField(max_length=8, choices=item_690_choices, null=True)
item_691_choices = [('produces', 'produces')]
item_691 = models.CharField(max_length=8, choices=item_691_choices, null=True)
item_692_choices = [('produces', 'produces')]
item_692 = models.CharField(max_length=8, choices=item_692_choices, null=True)
item_693_choices = [('produces', 'produces')]
item_693 = models.CharField(max_length=8, choices=item_693_choices, null=True)
item_694_choices = [('produces', 'produces')]
item_694 = models.CharField(max_length=8, choices=item_694_choices, null=True)
item_695_choices = [('produces', 'produces')]
item_695 = models.CharField(max_length=8, choices=item_695_choices, null=True)
item_696_choices = [('produces', 'produces')]
item_696 = models.CharField(max_length=8, choices=item_696_choices, null=True)
item_697_choices = [('produces', 'produces')]
item_697 = models.CharField(max_length=8, choices=item_697_choices, null=True)
item_698_choices = [('produces', 'produces')]
item_698 = models.CharField(max_length=8, choices=item_698_choices, null=True)
item_699_choices = [('produces', 'produces')]
item_699 = models.CharField(max_length=8, choices=item_699_choices, null=True)
item_700_choices = [('produces', 'produces')]
item_700 = models.CharField(max_length=8, choices=item_700_choices, null=True)
item_701_choices = [('produces', 'produces')]
item_701 = models.CharField(max_length=8, choices=item_701_choices, null=True)
item_702_choices = [('produces', 'produces')]
item_702 = models.CharField(max_length=8, choices=item_702_choices, null=True)
item_703_choices = [('produces', 'produces')]
item_703 = models.CharField(max_length=8, choices=item_703_choices, null=True)
item_704_choices = [('produces', 'produces')]
item_704 = models.CharField(max_length=8, choices=item_704_choices, null=True)
item_705_choices = [('produces', 'produces')]
item_705 = models.CharField(max_length=8, choices=item_705_choices, null=True)
item_706_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_706 = models.CharField(max_length=9, choices=item_706_choices, null=True)
item_707_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_707 = models.CharField(max_length=9, choices=item_707_choices, null=True)
item_708_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_708 = models.CharField(max_length=9, choices=item_708_choices, null=True)
item_709_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_709 = models.CharField(max_length=9, choices=item_709_choices, null=True)
item_710_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_710 = models.CharField(max_length=9, choices=item_710_choices, null=True)
item_711_choices = [('simple', 'simple'), ('complex', 'complex')]
item_711 = models.CharField(max_length=7, choices=item_711_choices, null=True)
item_712_choices = [('simple', 'simple'), ('complex', 'complex')]
item_712 = models.CharField(max_length=7, choices=item_712_choices, null=True)
item_713_choices = [('simple', 'simple'), ('complex', 'complex')]
item_713 = models.CharField(max_length=7, choices=item_713_choices, null=True)
item_714_choices = [('simple', 'simple'), ('complex', 'complex')]
item_714 = models.CharField(max_length=7, choices=item_714_choices, null=True)
item_715_choices = [('simple', 'simple'), ('complex', 'complex')]
item_715 = models.CharField(max_length=7, choices=item_715_choices, null=True)
item_716_choices = [('simple', 'simple'), ('complex', 'complex')]
item_716 = models.CharField(max_length=7, choices=item_716_choices, null=True)
item_717_choices = [('simple', 'simple'), ('complex', 'complex')]
item_717 = models.CharField(max_length=7, choices=item_717_choices, null=True)
item_718_choices = [('simple', 'simple'), ('complex', 'complex')]
item_718 = models.CharField(max_length=7, choices=item_718_choices, null=True)
item_719_choices = [('simple', 'simple'), ('complex', 'complex')]
item_719 = models.CharField(max_length=7, choices=item_719_choices, null=True)
item_720_choices = [('simple', 'simple'), ('complex', 'complex')]
item_720 = models.CharField(max_length=7, choices=item_720_choices, null=True)
item_721_choices = [('simple', 'simple'), ('complex', 'complex')]
item_721 = models.CharField(max_length=7, choices=item_721_choices, null=True)
item_722_choices = [('simple', 'simple'), ('complex', 'complex')]
item_722 = models.CharField(max_length=7, choices=item_722_choices, null=True)
item_723_choices = [('simple', 'simple'), ('complex', 'complex')]
item_723 = models.CharField(max_length=7, choices=item_723_choices, null=True)
item_724_choices = [('simple', 'simple'), ('complex', 'complex')]
item_724 = models.CharField(max_length=7, choices=item_724_choices, null=True)
item_725_choices = [('simple', 'simple'), ('complex', 'complex')]
item_725 = models.CharField(max_length=7, choices=item_725_choices, null=True)
item_726_choices = [('simple', 'simple'), ('complex', 'complex')]
item_726 = models.CharField(max_length=7, choices=item_726_choices, null=True)
item_727_choices = [('simple', 'simple'), ('complex', 'complex')]
item_727 = models.CharField(max_length=7, choices=item_727_choices, null=True)
item_728_choices = [('simple', 'simple'), ('complex', 'complex')]
item_728 = models.CharField(max_length=7, choices=item_728_choices, null=True)
item_729_choices = [('simple', 'simple'), ('complex', 'complex')]
item_729 = models.CharField(max_length=7, choices=item_729_choices, null=True)
item_730_choices = [('simple', 'simple'), ('complex', 'complex')]
item_730 = models.CharField(max_length=7, choices=item_730_choices, null=True)
item_731_choices = [('simple', 'simple'), ('complex', 'complex')]
item_731 = models.CharField(max_length=7, choices=item_731_choices, null=True)
item_732_choices = [('simple', 'simple'), ('complex', 'complex')]
item_732 = models.CharField(max_length=7, choices=item_732_choices, null=True)
item_733_choices = [('simple', 'simple'), ('complex', 'complex')]
item_733 = models.CharField(max_length=7, choices=item_733_choices, null=True)
item_734_choices = [('simple', 'simple'), ('complex', 'complex')]
item_734 = models.CharField(max_length=7, choices=item_734_choices, null=True)
item_735_choices = [('simple', 'simple'), ('complex', 'complex')]
item_735 = models.CharField(max_length=7, choices=item_735_choices, null=True)
item_736_choices = [('simple', 'simple'), ('complex', 'complex')]
item_736 = models.CharField(max_length=7, choices=item_736_choices, null=True)
item_737_choices = [('simple', 'simple'), ('complex', 'complex')]
item_737 = models.CharField(max_length=7, choices=item_737_choices, null=True)
item_738_choices = [('not yet', 'not yet'), ('sometimes', 'sometimes'), ('often', 'often')]
item_738 = models.CharField(max_length=9, choices=item_738_choices, null=True)
item_739_choices = [('simple', 'simple'), ('complex', 'complex')]
item_739 = models.CharField(max_length=7, choices=item_739_choices, null=True)
item_740_choices = [('simple', 'simple'), ('complex', 'complex')]
item_740 = models.CharField(max_length=7, choices=item_740_choices, null=True)
item_741_choices = [('simple', 'simple'), ('complex', 'complex')]
item_741 = models.CharField(max_length=7, choices=item_741_choices, null=True)
item_742_choices = [('simple', 'simple'), ('complex', 'complex')]
item_742 = models.CharField(max_length=7, choices=item_742_choices, null=True)
item_743_choices = [('simple', 'simple'), ('complex', 'complex')]
item_743 = models.CharField(max_length=7, choices=item_743_choices, null=True)
item_744_choices = [('simple', 'simple'), ('complex', 'complex')]
item_744 = models.CharField(max_length=7, choices=item_744_choices, null=True)
item_745_choices = [('simple', 'simple'), ('complex', 'complex')]
item_745 = models.CharField(max_length=7, choices=item_745_choices, null=True)
item_746_choices = [('simple', 'simple'), ('complex', 'complex')]
item_746 = models.CharField(max_length=7, choices=item_746_choices, null=True)
item_747_choices = [('simple', 'simple'), ('complex', 'complex')]
item_747 = models.CharField(max_length=7, choices=item_747_choices, null=True)
item_748_choices = [('simple', 'simple'), ('complex', 'complex')]
item_748 = models.CharField(max_length=7, choices=item_748_choices, null=True)
item_749_choices = [('simple', 'simple'), ('complex', 'complex')]
item_749 = models.CharField(max_length=7, choices=item_749_choices, null=True)
item_750_choices = [('simple', 'simple'), ('complex', 'complex')]
item_750 = models.CharField(max_length=7, choices=item_750_choices, null=True)
item_751_choices = [('simple', 'simple'), ('complex', 'complex')]
item_751 = models.CharField(max_length=7, choices=item_751_choices, null=True)
item_752_choices = [('simple', 'simple'), ('complex', 'complex')]
item_752 = models.CharField(max_length=7, choices=item_752_choices, null=True)
item_753_choices = [('simple', 'simple'), ('complex', 'complex')]
item_753 = models.CharField(max_length=7, choices=item_753_choices, null=True)
item_754_choices = [('simple', 'simple'), ('complex', 'complex')]
item_754 = models.CharField(max_length=7, choices=item_754_choices, null=True)
item_755_choices = [('simple', 'simple'), ('complex', 'complex')]
item_755 = models.CharField(max_length=7, choices=item_755_choices, null=True)
item_756_choices = [('simple', 'simple'), ('complex', 'complex')]
item_756 = models.CharField(max_length=7, choices=item_756_choices, null=True)
item_757_choices = [('simple', 'simple'), ('complex', 'complex')]
item_757 = models.CharField(max_length=7, choices=item_757_choices, null=True)
item_758_choices = [('simple', 'simple'), ('complex', 'complex')]
item_758 = models.CharField(max_length=7, choices=item_758_choices, null=True)
item_759_choices = [('simple', 'simple'), ('complex', 'complex')]
item_759 = models.CharField(max_length=7, choices=item_759_choices, null=True)
item_760_choices = [('simple', 'simple'), ('complex', 'complex')]
item_760 = models.CharField(max_length=7, choices=item_760_choices, null=True)
| langcog/wordbank | instruments/schemas/Kiswahili_WS.py | Python | gpl-2.0 | 102,113 |
"""Support to enter a value into a text box."""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ENTITY_SERVICE_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
ATTR_MODE,
CONF_ICON,
CONF_NAME,
CONF_MODE,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "input_text"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_INITIAL = "initial"
CONF_MIN = "min"
CONF_MAX = "max"
MODE_TEXT = "text"
MODE_PASSWORD = "password"
ATTR_VALUE = "value"
ATTR_MIN = "min"
ATTR_MAX = "max"
ATTR_PATTERN = "pattern"
SERVICE_SET_VALUE = "set_value"
SERVICE_SET_VALUE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_VALUE): cv.string}
)
def _cv_input_text(cfg):
"""Configure validation helper for input box (voluptuous)."""
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
if minimum > maximum:
raise vol.Invalid(
f"Max len ({minimum}) is not greater than min len ({maximum})"
)
state = cfg.get(CONF_INITIAL)
if state is not None and (len(state) < minimum or len(state) > maximum):
raise vol.Invalid(
f"Initial value {state} length not in range {minimum}-{maximum}"
)
return cfg
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.Any(
vol.All(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_MIN, default=0): vol.Coerce(int),
vol.Optional(CONF_MAX, default=100): vol.Coerce(int),
vol.Optional(CONF_INITIAL, ""): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(ATTR_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(ATTR_PATTERN): cv.string,
vol.Optional(CONF_MODE, default=MODE_TEXT): vol.In(
[MODE_TEXT, MODE_PASSWORD]
),
},
_cv_input_text,
),
None,
)
)
},
required=True,
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up an input text box."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
if cfg is None:
cfg = {}
name = cfg.get(CONF_NAME)
minimum = cfg.get(CONF_MIN)
maximum = cfg.get(CONF_MAX)
initial = cfg.get(CONF_INITIAL)
icon = cfg.get(CONF_ICON)
unit = cfg.get(ATTR_UNIT_OF_MEASUREMENT)
pattern = cfg.get(ATTR_PATTERN)
mode = cfg.get(CONF_MODE)
entities.append(
InputText(
object_id, name, initial, minimum, maximum, icon, unit, pattern, mode
)
)
if not entities:
return False
component.async_register_entity_service(
SERVICE_SET_VALUE, SERVICE_SET_VALUE_SCHEMA, "async_set_value"
)
await component.async_add_entities(entities)
return True
class InputText(RestoreEntity):
"""Represent a text box."""
def __init__(
self, object_id, name, initial, minimum, maximum, icon, unit, pattern, mode
):
"""Initialize a text input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._current_value = initial
self._minimum = minimum
self._maximum = maximum
self._icon = icon
self._unit = unit
self._pattern = pattern
self._mode = mode
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(self):
"""Return the name of the text input entity."""
return self._name
@property
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def state(self):
"""Return the state of the component."""
return self._current_value
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_MIN: self._minimum,
ATTR_MAX: self._maximum,
ATTR_PATTERN: self._pattern,
ATTR_MODE: self._mode,
}
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
if self._current_value is not None:
return
state = await self.async_get_last_state()
value = state and state.state
# Check against None because value can be 0
if value is not None and self._minimum <= len(value) <= self._maximum:
self._current_value = value
async def async_set_value(self, value):
"""Select new value."""
if len(value) < self._minimum or len(value) > self._maximum:
_LOGGER.warning(
"Invalid value: %s (length range %s - %s)",
value,
self._minimum,
self._maximum,
)
return
self._current_value = value
await self.async_update_ha_state()
| joopert/home-assistant | homeassistant/components/input_text/__init__.py | Python | apache-2.0 | 5,598 |
"""
Contains application form definitions.
"""
from django import forms
from wagtailplus.wagtaillinks.models import Link
class EmailLinkForm(forms.models.ModelForm):
"""
Form for email link instances.
"""
class Meta(object):
model = Link
fields = ('link_type', 'title', 'email', 'tags')
widgets = {
'link_type': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
"""
Sets initial value for link type.
"""
super(EmailLinkForm, self).__init__(*args, **kwargs)
self.fields['link_type'].initial = Link.LINK_TYPE_EMAIL
self.fields['email'].required = True
class ExternalLinkForm(forms.models.ModelForm):
"""
Form for external link instances.
"""
class Meta(object):
model = Link
fields = ('link_type', 'title', 'external_url', 'tags')
widgets = {
'link_type': forms.HiddenInput,
}
def __init__(self, *args, **kwargs):
"""
Sets initial value for link type.
"""
super(ExternalLinkForm, self).__init__(*args, **kwargs)
self.fields['link_type'].initial = Link.LINK_TYPE_EXTERNAL
self.fields['external_url'].required = True
| thenewguy/wagtailplus | wagtailplus/wagtaillinks/forms.py | Python | bsd-2-clause | 1,276 |
#!/usr/bin/python
# removes any files in <targetdir> older than <purgeperiod> days
import os, sys, time, getopt
def get_filepaths(directory):
file_paths = []
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def usage():
print 'purgebackup.py -d -p <purgeperiod> -t <targetdir>'
print ''
print ' -d, --dryrun do not purge anything'
print ' -p, --purgeperiod purge files older than this period (of days)'
print ' -t, --targetdir directory under which files are purged'
def main(argv):
targetdir = ''
purgeperiod = ''
dryrun = False
try:
opts, args = getopt.getopt(argv,"hdp:t:",["purgeperiod=","targetdir="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-t", "--targetdir"):
targetdir = arg
elif opt in ("-p", "--purgeperiod"):
purgeperiod = int(arg)
elif opt in ("-d", "--dryrun"):
dryrun = True
now = time.time()
cutoff = now - (purgeperiod * 86400)
file_paths = get_filepaths(targetdir)
for targetfile in file_paths:
if os.path.isfile(targetfile):
t = os.stat(targetfile)
c = t.st_mtime
if c < cutoff:
if dryrun:
print "File would be purged: " + targetfile
else:
os.remove(targetfile)
print "Removed: " + targetfile
if __name__ == "__main__":
main(sys.argv[1:])
| forgeservicelab/ansible.backup-server | scripts/purge.py | Python | mit | 1,741 |
# -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2011-2015 Christian Lupien <christian.lupien@usherbrooke.ca> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
import socket
import threading
import time
import weakref
from ..instruments_base import BaseInstrument, MemoryDevice,\
dict_improved, locked_calling
from ..instruments_registry import register_instrument
# the server has a timeout (initially 30s), so the connection is lost
# when no commands are sent after that interval.
# keepalive stuff does not seem to work
# import socket
# bf = blueforsValves.bf_valves()
# bf._socket.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 15*1000, 1*1000)) # windows
# bf._socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)
# bf._socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# on linux
# bf._socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 15) # default is 7200 (2 hours)
# bf._socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 1) # default is 75
# bf._socket.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 9) # default is 9
# instead of keepalive, just sent '\n' every 20 s
class keep_alive(threading.Thread):
def __init__(self, interval, sckt, lck):
super(keep_alive, self).__init__()
self.sckt = weakref.proxy(sckt)
self.interval = interval
self.lck = lck
self.update_time()
self.stop = False
def send_keep_alive(self):
with self.lck:
self.sckt.send('\n')
self.update_time()
def run(self):
while True:
with self.lck:
if self.stop:
break
delta = time.time() - self.last
if delta >= self.interval:
self.send_keep_alive()
continue # skipt wait (we just changed self.last)
wait = min(self.interval - delta, 5) # wait at most 5s
time.sleep(wait)
def cancel(self):
with self.lck:
self.stop = True
def update_time(self):
# call with lock acquired
self.last = time.time()
#def __del__(self):
# print 'cleaning up keep_alive thread.'
def makedict(input_str, t=float):
lst = input_str.split(',')
lst2 = [v.lstrip().split('=') for v in lst] # strip needed because mgstatus adds spaces after the comma
return dict_improved([ (k,t(v)) for k,v in lst2])
def booltype(s):
return bool(int(s))
@register_instrument('BlueFors', 'BF-LD400', '3.5')
class bf_valves(BaseInstrument):
"""
This instruments communicates with the BlueFors ValveControl program.
That program needs to be running and to have the remote control server running.
Useful devices:
gage
all_gages
all_status
Useful query methods:
status
flow
gage_val
gage_status
Controlling methods:
turn_on
turn_off
switch
Note that the control methods will only work if remote_en is True
and this connection is in control. So use the methods
control
remote_en
in that order, and before using any of the controlling methods.
When the connection is lost (by del or disconnect)
This instrument does not have separate read and write methods. Only use ask.
"""
def __init__(self, addr=('localhost', 1234), timeout=1., keep_interval=20):
"""
addr is a tupple ip name, port number
timeout is the time in s to wait for the completion of network connect/send/recv
to prevent lockups
keep_interval is the time in s between pings to the server to keep the connection open
it should be smaller than the server timeout
"""
self._socket = None
# timeout in s. Can be None which means blocking. None is the default timeout after importing
#s = socket.socket()
#s.connect(addr)
#s.settimeout(timeout)
s = socket.create_connection(addr, timeout=timeout)
foo = s.recv(1024)
if foo != '\x0c':
raise RuntimeError, 'Did not receive expected signal'
self._socket = s
super(bf_valves, self).__init__()
self._keep_alive = keep_alive(keep_interval, s, self._lock_instrument)
self._keep_alive.start()
def _current_config(self, dev_obj=None, options={}):
return self._conf_helper('idn', 'all_gages', 'all_status', options)
@locked_calling
def ask(self, command, expect=None):
"""
expect is to strip some known string at the start of the answer.
It can be a string, or a list of possible strings
"""
command += '\n'
n = self._socket.send(command)
self._keep_alive.update_time()
# check length or use sendall
if n != len(command):
raise RuntimeError, 'Data was not completely sent: %i out of %i bytes'%(n, len(command))
answer = ''
while len(answer) == 0 or answer[-1] != '\n':
answer += self._socket.recv(1024)
answer = answer[:-2] # remove trailing CR LF
if answer[0] == 'E':
raise RuntimeError, 'Error: %s'%answer
if expect:
if isinstance(expect, basestring):
expect = [expect]
for e in expect:
e += ': '
if answer.startswith(e):
answer = answer[len(e):]
break
else: # not found
raise RuntimeError, 'Unexpected reply: %s'%answer
return answer
def avail_names(self):
"""
returns a list of available names for status, turn_on, turn_off and switch
"""
return self.ask('names', 'S04').split(',')
def _names_helper(self, val):
if isinstance(val, basestring) or val == None:
return val
return ','.join(val)
def status(self, valves=None):
"""
valves can be a string of comma separated name
or a list of strings of name of object to receive the status.
If not given, they are all returned in the order of avail_names.
The return is a dictionnary.
"""
# the answer is name1=value1,name2=value2 ...
# when valves=None the answer is in the order of avail_names
# otherwise it is the order given
cmd = 'status'
valves = self._names_helper(valves)
if valves:
return makedict(self.ask(cmd+' '+valves, 'S02'), booltype)
else:
return makedict(self.ask(cmd, 'S03'), booltype)
def remote_en(self, val=None):
"""
val is True or False to change, or None to read
Remote can only be set if user is in control.
Remote needs to be enable to be able to change settings.
It locks out the Hardware interface.
The remote enabled is deactivated if the connection is lost.
"""
if val == None:
return bool(int(self.ask('remote', 'S06')))
else:
self.ask('remote %s'%int(val), 'S06')
def control(self, val=None):
"""
val is True or False to change, or None to read
When a connection is in control, another one cannot become in control
until the first one releases it, Otherwise you get E10: permission denied.
"""
if val == None:
return self.ask('control', ['S07', 'S08'])
else:
self.ask('control %s'%int(val), ['S07', 'S08'])
def turn_on(self, valves):
"""
valves is either a string of comma separated names or a list of names
of objects to turn on.
No valves is changed if there is an error in the list
"""
valves = self._names_helper(valves)
self.ask('on '+valves, 'S00') # S00: Ok
def turn_off(self, valves):
"""
valves is either a string of comma separated names or a list of names
of objects to turn off.
No valves is changed if there is an error in the list
"""
valves = self._names_helper(valves)
self.ask('off '+valves, 'S00') # S00: Ok
def switch(self, valves):
"""
valves is either a string of comma separated names or a list of names
of objects to toggle.
No valves is changed if there is an error in the list
"""
valves = self._names_helper(valves)
self.ask('switch '+valves, 'S00') # S00: Ok
def flow(self):
"""
returns the flow in mmol/s
"""
return float(self.ask('fmstatus', 'S09'))
def gage_val(self, gage_num=None):
"""
gage_num can be None or 1-6 for P1 to P6
Multiple values are return in a dictionnary
"""
if gage_num == None:
return makedict(self.ask('mgstatus', 'S11')) # a list of p1=val, p2=val, ...
else:
return float(self.ask('mgstatus %s'%int(gage_num), 'S05'))
_gage_status_d = {0:'Measurement data okay', 1:'Underrange', 2:'Overrange', 3:'Sensor error',
4:'Sensor off', 5:'No sensor', 6:'Identification error'}
def _gage_status_helper(self, in_str):
v = int(in_str)
return v, self._gage_status_d[v]
def gage_status(self, gage_num=None):
"""
gage_num can be None or 1-6 for P1 to P6
returns the pressures in mBar.
Multiple values are return in a dictionnary
"""
if gage_num == None:
return makedict(self.ask('mgstatuscode', 'S12'), self._gage_status_helper) # a list of p1=val, p2=val, ...
else:
s = self.ask('mgstatuscode %s'%int(gage_num), 'S10')
return self._gage_status_helper(s)
def __del__(self):
self._keep_alive.cancel()
if self._socket:
self.disconnect()
#print 'bf_valves deleted!'
super(bf_valves, self).__del__()
def disconnect(self):
self._keep_alive.cancel()
self.ask('exit', 'S01') # S01: bye
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def idn(self):
serialn = get_bluefors_sn() # see definition below
# 3.5 is the ValveControl server version number used when this code was first written.
return "BlueFors,BF-LD400,%s,3.5"%serialn
_gages_names = ['p%i'%i for i in range(1,7)]
def _all_gages_getdev(self):
"""
Returns the values of the flow meter followed by all 6 pressure gages.
"""
vals = self.gage_val()
if vals.keys() != self._gages_names:
raise RuntimeError('The keys is gages_vals are not in the expected format.')
flow = self.flow()
return [flow] + vals.values()
def _gage_getdev(self, p=None):
"""
when p=0 returns the flow meter
"""
if p is not None:
self.current_p.set(p)
p = self.current_p.getcache()
if p == 0:
return self.flow()
else:
return self.gage_val(p)
def _status_sort_key_func(self, x):
# from (k,v) where k can be 'valve1'
# return ('valve', 1)
# could also use regular expressions: ks, ke = re.match(r'(\D+)(\d*)', k).groups()
k = x[0]
ks = k.rstrip('0123456789')
ke = k[len(ks):]
if ke != '':
return (ks, int(ke))
else:
return (k, )
def _all_status_getdev(self):
st = self.status().items()
st = sorted(st, key=self._status_sort_key_func)
return dict_improved(st)
def _create_devs(self):
self.current_p = MemoryDevice(1, min=0, max=6)
self._devwrap('all_gages', multi=['flow']+self._gages_names)
self.alias = self.all_gages
self.all_gages
self._devwrap('gage')
self._devwrap('all_status')
# This needs to be last to complete creation
super(type(self),self)._create_devs()
###########################################################
# Code to find usb devices
###########################################################
# another option is to use wmi
# https://github.com/todbot/usbSearch/
# pyusb or pywinusb (uses SetupDi...)
# https://pypi.python.org/pypi/pywinusb/
# Here we use SetupDi...
# http://stackoverflow.com/questions/13927475/windows-how-to-enumerate-all-connected-usb-devices-device-path
# http://samscode.blogspot.ca/2009/08/setupdi-how-to-enumerate-devices-using.html
# http://samscode.blogspot.ca/2009/09/function-discovery-intro.html
import os
# The bluefors dilution fridges have a National Instrument DAQ card.
# We can use that to identify which one it is.
bluefors_serial = {'0158748E':'BF0312-03',
'015873C4':'BF0312-02'}
def get_bluefors_sn():
lst = get_all_usb() # defined below
for v,p,s in lst:
if v == 0x3923 and p == 0x717a:
return bluefors_serial.get(s, 'Unknown serial #')
return 'No fridge found'
if os.name == 'nt':
import ctypes
from ctypes import POINTER, Structure, byref, c_void_p, create_string_buffer, string_at,\
sizeof, get_last_error, c_char, GetLastError, WinError, FormatError,\
cast, pointer, resize
from ctypes.wintypes import HANDLE, LPCSTR, LPSTR, DWORD, WORD, BYTE, BOOL
# Load the SetupAPI
#setup_api = ctypes.windll.setupapi # can use ctypes.GetLastError, WinError, FormatError
setup_api = ctypes.WinDLL('setupapi', use_last_error=True) # use get_last_error
def format_err(err=None):
if err == None:
err = get_last_error()
#return str(WinError(err))
return "[Error %i] %s"%(err, FormatError(err))
# For packing we need to find out if we are 32 or 64 bits:
if sizeof(c_void_p) == 4: # 32 bits
setup_api_pack = 1
else: # 64 bits
setup_api_pack = 8
# define necessary structures
class GUID(Structure):
_fields_ = [("data1", DWORD), ("data2", WORD), ("data3", WORD), ("data4", BYTE*8)]
def __init__(self, *args, **kwarg):
if len(args) == 1 and isinstance(args[0], basestring):
s = args[0].lstrip('{').rstrip('}').replace('-','')
args = (int(s[:8],16), int(s[8:12],16), int(s[12:16],16), tuple([int(s[16+i*2:16+i*2+2], 16) for i in range(8)]))
super(GUID, self).__init__(*args, **kwarg)
# Some GUIDs
GUID_CLASS_DAQDevice = GUID("{7c797140-f6d8-11cf-9fd6-00a024178a17}")
GUID_INTERFACE_USB_DEVICE = GUID("{A5DCBF10-6530-11D2-901F-00C04FB951ED}")
GUID_INTERFACE_COMPORT = GUID('{86E0D1E0-8089-11D0-9CE4-08003E301F73}')
class SP_DEVINFO_DATA(Structure):
_pack_ = setup_api_pack
_fields_ = [('cbSize', DWORD), ('ClassGuid', GUID), ('DevInst', DWORD), ('Reserved', c_void_p)]
class SP_DEVICE_INTERFACE_DATA(Structure):
_pack_ = setup_api_pack
_fields_ = [('cbSize', DWORD), ('InterfaceClassGuid', GUID), ('Flags', DWORD), ('Reserved', c_void_p)]
class SP_DEVICE_INTERFACE_DETAIL_DATA(Structure):
_pack_ = setup_api_pack
_fields_ = [('cbSize', DWORD), ('DevicePath', c_char*1) ]
def get_string(self):
"""
instead of using detail_gen, to create a properly sized object,
you can use ctypes.resize(obj, newsize)
"""
return string_at(byref(self, self.__class__.DevicePath.offset))
def detail_gen(size):
#length = size - sizeof(DWORD)
length = size - SP_DEVICE_INTERFACE_DETAIL_DATA.DevicePath.offset
class foo(Structure):
_pack_ = setup_api_pack
_fields_ = [('cbSize', DWORD), ('DevicePath', c_char*length) ]
return foo()
def detail_gen_subclass(size):
# This adds fields to the base class
# You cannot overwrite one (giving the same name just removes access
# to the old one and adds a new field)
# Also the new fields are placed after the packing bytes possibly
# added for the base struct. So the result is not like a concatenation
# of the _fields_ entries (there can be extra space in between
# the groups of _fields_)
length = size - sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA)
if length <= 0:
length =1
class foo(SP_DEVICE_INTERFACE_DETAIL_DATA):
#_pack_ = setup_api_pack
_fields_ = [('DevicePathM', c_char*length)]
return foo()
# declare setupAPI functions
# SetupDiGetClassDevsA
GetClassDevs = setup_api.SetupDiGetClassDevsA
GetClassDevs.restype = HANDLE
GetClassDevs.argtypes = [POINTER(GUID), LPCSTR, HANDLE, DWORD]
DIGCF_DEFAULT = 0x01
DIGCF_PRESENT = 0x02
DIGCF_ALLCLASSES = 0x04
DIGCF_PROFILE = 0x08
DIGCF_DEVICEINTERFACE = 0x10
# SetupDiDestroyDeviceInfoList
DestroyDeviceInfoList = setup_api.SetupDiDestroyDeviceInfoList
DestroyDeviceInfoList.restype = BOOL
DestroyDeviceInfoList.argtypes = [HANDLE]
# SetupDiEnumDeviceInterfaces
EnumDeviceInterfaces = setup_api.SetupDiEnumDeviceInterfaces
EnumDeviceInterfaces.restype = BOOL
EnumDeviceInterfaces.argtypes = [HANDLE, c_void_p, POINTER(GUID), DWORD, POINTER(SP_DEVICE_INTERFACE_DATA)]
# SetupDiEnumDeviceInfo
EnumDeviceInfo = setup_api.SetupDiEnumDeviceInfo
EnumDeviceInfo.restype = BOOL
EnumDeviceInfo.argtypes = [HANDLE, DWORD, POINTER(SP_DEVINFO_DATA)]
# SetupDiGetDeviceInstanceIdA
GetDeviceInstanceId = setup_api.SetupDiGetDeviceInstanceIdA
GetDeviceInstanceId.restype = BOOL
GetDeviceInstanceId.argtypes = [HANDLE, POINTER(SP_DEVINFO_DATA), LPSTR, DWORD, POINTER(DWORD)]
# SetupDiGetDeviceInterfaceDetailA
GetDeviceInterfaceDetail = setup_api.SetupDiGetDeviceInterfaceDetailA
GetDeviceInterfaceDetail.restype = BOOL
#GetDeviceInterfaceDetail.argtypes = [HANDLE, POINTER(SP_DEVICE_INTERFACE_DATA), c_void_p, DWORD, POINTER(DWORD), POINTER(SP_DEVINFO_DATA)]
GetDeviceInterfaceDetail.argtypes = [HANDLE, POINTER(SP_DEVICE_INTERFACE_DATA), POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA), DWORD, POINTER(DWORD), POINTER(SP_DEVINFO_DATA)]
# error when Enum are finished
ERROR_NO_MORE_ITEMS = 259
# Error return value for GetClassDevs
INVALID_HANDLE_VALUE = HANDLE(-1).value
# The returned values look like:
# 'USB\\VID_3923&PID_717A\\0158748E'
# which is the same as seen in the Gestionnaire de peripherique entry:
# Chemin d'access a l'instance du peripherique
# suggested calls
# get_all_dev_instanceID(None, None, DIGCF_PRESENT | DIGCF_ALLCLASSES)
# get_all_dev_instanceID(None, 'PCI', DIGCF_PRESENT | DIGCF_ALLCLASSES)
# get_all_dev_instanceID(None, 'USB', DIGCF_PRESENT | DIGCF_ALLCLASSES)
# get_all_dev_instanceID(GUID_CLASS_DAQDevice, 'USB', DIGCF_PRESENT)
# get_all_dev_instanceID(GUID_INTERFACE_USB_DEVICE, 'USB', DIGCF_PRESENT | DIGCF_DEVICEINTERFACE)
def get_all_dev_instanceID(ClassGuid, Enumerator, Flags):
devinfo = GetClassDevs(ClassGuid, Enumerator, 0, Flags)
if devinfo == INVALID_HANDLE_VALUE:
raise RuntimeError, format_err()
m=0
dinfo = SP_DEVINFO_DATA()
dinfo.cbSize = sizeof(SP_DEVINFO_DATA)
bufsize = DWORD()
res = []
while True:
if not EnumDeviceInfo(devinfo, m, dinfo):
err = get_last_error()
if err != ERROR_NO_MORE_ITEMS:
DestroyDeviceInfoList(devinfo)
raise RuntimeError, 'EnumDeviceInfo '+format_err(err)
break
# Find required bufsize
GetDeviceInstanceId(devinfo, dinfo, None, 0, bufsize)
buf = create_string_buffer(bufsize.value)
if not GetDeviceInstanceId(devinfo, dinfo, buf, bufsize, None):
DestroyDeviceInfoList(devinfo)
raise RuntimeError, 'GetDeviceInstanceId '+format_err()
res.append(buf.value)
#print "m:%i instanceID:%r"%(m, buf.value)
m += 1
DestroyDeviceInfoList(devinfo)
return res
# suggested calls:
# get_all_dev_interface(None, None, DIGCF_PRESENT | DIGCF_ALLCLASSES | DIGCF_DEVICEINTERFACE)
# get_all_dev_interface(GUID_INTERFACE_USB_DEVICE, None, DIGCF_PRESENT|DIGCF_DEVICEINTERFACE)
# get_all_dev_interface(None, None, DIGCF_PRESENT | DIGCF_ALLCLASSES | DIGCF_DEVICEINTERFACE, search_interface=GUID_INTERFACE_COMPORT)
# The returned values look like:
# '\\\\?\\usb#vid_3923&pid_717a#0158748e#{a5dcbf10-6530-11d2-901f-00c04fb951ed}'
USE_RESIZE = True
def get_all_dev_interface(ClassGuid, Enumerator, Flags, search_interface=GUID_INTERFACE_USB_DEVICE):
if not Flags & DIGCF_DEVICEINTERFACE:
raise ValueError, "The DIGCF_DEVICEINTERFACE flag is required here."
devinfo = GetClassDevs(ClassGuid, Enumerator, 0, Flags)
if devinfo == INVALID_HANDLE_VALUE:
raise RuntimeError, format_err()
m=0
dinter = SP_DEVICE_INTERFACE_DATA()
dinter.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA)
bufsize = DWORD()
res = []
while True:
if not EnumDeviceInterfaces(devinfo, None, search_interface, m, dinter):
err = get_last_error()
if err != ERROR_NO_MORE_ITEMS:
DestroyDeviceInfoList(devinfo)
raise RuntimeError, 'EnumDeviceInterface '+format_err(err)
break
# Find required bufsize
GetDeviceInterfaceDetail(devinfo, dinter, None, 0, bufsize, None)
if USE_RESIZE:
detail = SP_DEVICE_INTERFACE_DETAIL_DATA()
resize(detail, bufsize.value)
detailp = byref(detail)
else:
detail = detail_gen(bufsize.value)
# cast is needed because GetDeviceInterfaceDetail is defined to require
# POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA)
# Instead of a cast the object could also be a subclass
detailp = cast(pointer(detail), POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA))
detail.cbSize = sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA)
# Note that the last argument could be used to have the SP_DEVINFO_DATA
# reference of this entry that can be used with GetDeviceInstanceId
if not GetDeviceInterfaceDetail(devinfo, dinter, detailp, sizeof(detail), None, None):
DestroyDeviceInfoList(devinfo)
raise RuntimeError, 'GetDeviceInterfaceDetail '+format_err()
if USE_RESIZE:
res.append(detail.get_string())
else:
res.append(detail.DevicePath)
m += 1
DestroyDeviceInfoList(devinfo)
return res
def get_all_usb():
all_usb = get_all_dev_instanceID(GUID_INTERFACE_USB_DEVICE, None, DIGCF_PRESENT|DIGCF_DEVICEINTERFACE)
res = []
for one_usb in all_usb:
lst = one_usb.split('\\')
# When serialn contains &, it is a serial number invented by windows.
# http://rtshiva.com/2009/05/19/usb-specification-and-windows-limitation-on-serial-numbers/
serialn = lst[2]
ids = lst[1].split('&')
vid = ids[0][4:]
pid = ids[1][4:]
res.append((int(vid, 16), int(pid, 16), serialn))
return res
else: # Not windows
#pyusb requires special permissions to read the serial number
def get_all_usb_pyusb():
import usb
def getstr(dev, index):
try:
return usb.util.get_string(dev, 1024, index)
except usb.USBError:
return "Not available: Wrong Permissions"
lst = usb.core.find(find_all=True)
return [(i.idVendor, i.idProduct, getstr(i, i.iSerialNumber)) for i in lst]
# This is linux only
def _read_file(filename):
with open(filename, 'r') as f:
line = f.read()
return line
def get_all_usb_sysfs():
import glob
# the linux usb sysfs path looks like
# bus-port.port.port ...
# and :config.interface
dl=glob.glob('/sys/bus/usb/devices/[0-9]*-[0-9.]*')
res = []
for d in dl:
if ':' in d:
break
try:
vid = _read_file(d+'/idVendor')
except IOError:
break
try:
pid = _read_file(d+'/idProduct')
except IOError:
break
try:
serialn = _read_file(d+'/serial')
except IOError:
serialn = 'Unknown'
res.append((int(vid, 16), int(pid, 16), serialn))
return res
get_all_usb = get_all_usb_sysfs
| JeanOlivier/pyHegel | pyHegel/instruments/blueforsValves.py | Python | gpl-3.0 | 26,380 |
import numpy as np
import math
import scipy.integrate as integrate
from matplotlib import pyplot as plt
from matplotlib import mlab
from matplotlib import colors
from matplotlib import animation
from matplotlib import rc
X_MIN = -39.9
X_MAX = 39.9
X_POINTS = 500
Y_MIN = -19.9
Y_MAX = 24.5
Y_POINTS = 200
WELL_DEPTH = 42.8 # in MeV
NEUTRON_MASS = 938.5 # in MeV/c^2
NEUTRON_ENERGY = 14 # in MeV
PLANCK_CONSTANT = 1239.842 # in MeV*(fm/c)
NUMBER_OF_WAVEFRONTS = 10
INITIAL_DISPLACEMENT = -15
NUCLEUS_MASS = 100
R_0 = 1.4 # nuclear radius constant, in fm
def wavelength(energy):
return(PLANCK_CONSTANT*(2*NEUTRON_MASS*energy)**(-0.5)*((NUCLEUS_MASS+1)/NUCLEUS_MASS)) # in fm
def speed(energy):
return((2*energy/NEUTRON_MASS)**0.5) # in units of c
def WoodsSaxon(x,y):
U_0 = WELL_DEPTH
R = R_0*NUCLEUS_MASS**(1/3.0)
a = 0.5
distance = math.sqrt(x**2 + y**2)
U = U_0/(1+math.exp((distance-R)/a))
return U
def indexOfRefraction(x, y, energy):
return ((energy+WoodsSaxon(x,y))/energy)**(0.5) # (unitless)
class Wavefront:
def __init__(self,
init_state = -3):
self.init_state = init_state # in fm
self.xValues = np.linspace(init_state, init_state, Y_POINTS)
self.yValues = np.linspace(Y_MIN+2.5,Y_MAX-7.5,Y_POINTS)
#self.intensity = 1
def position(self):
return(self.xValues, self.yValues) # in fm
def phaseShift_dt(self, x, y):
return (speed(NEUTRON_ENERGY)/indexOfRefraction(x,y,
NEUTRON_ENERGY))*(indexOfRefraction(x,y, NEUTRON_ENERGY)-1)\
/(wavelength(NEUTRON_ENERGY)/(2*math.pi)) # in units of C
def dstate_dt(self, xValues, t):
dx = np.zeros_like(xValues);
#di = np.zeros_like(xValues);
for j, k in enumerate(self.yValues):
dx[j] = speed(NEUTRON_ENERGY)-self.phaseShift_dt(xValues[j], self.yValues[j])
#di[j] = self.imaginaryPhaseShift_dt(xValues[j], self.yValues[j])
return dx
time_elapsed = 0
phase_difference = 0
def step(waves, dt):
global time_elapsed, phase_difference
for wave in waves:
wave.xValues = integrate.odeint(wave.dstate_dt, wave.xValues, [0,dt])[1]
time_elapsed += dt
phase_difference = (waves[0].xValues[len(waves[0].xValues)/2]\
- waves[0].xValues[0])/(wavelength(NEUTRON_ENERGY)/(2*math.pi))
rc('text', usetex=True)
rc('font', family='serif')
fig = plt.figure(figsize=(8*(X_MAX-X_MIN)/(Y_MAX-Y_MIN),8))
axes = plt.axes(xlim=(X_MIN,X_MAX), ylim=(Y_MIN,Y_MAX))
axes.tick_params(width=4, length=10)
for axis in ['top', 'bottom', 'left', 'right']:
axes.spines[axis].set_linewidth(5)
xRange = np.linspace(X_MIN, X_MAX, X_POINTS)
yRange = np.linspace(Y_MIN, Y_MAX, Y_POINTS)
plt.xticks(fontsize=48, weight='bold')
plt.yticks(fontsize=48, weight='bold')
plt.xlabel('Distance [fm]', fontsize=48)
plt.ylabel('Distance [fm]', fontsize=48)
potentialGrid = [[WoodsSaxon(x,y) for x in xRange] for y in
yRange]
cmap = colors.LinearSegmentedColormap.from_list('custom red', ['#FFFFFF', '#FF0000'], N=256)
norm = colors.Normalize(vmax=WELL_DEPTH, vmin=0)
potential = plt.contourf(xRange, yRange, potentialGrid, 30,
norm=norm, cmap=cmap)
#plt.colorbar(potential)
waves = []
lines = []
for i in range(NUMBER_OF_WAVEFRONTS):
waves.append(Wavefront(INITIAL_DISPLACEMENT-i*wavelength(NEUTRON_ENERGY)))
lines.append(axes.plot([], [], "b-", lw=4)[0])
time_text = axes.text(0.05, 0.9, '', transform=axes.transAxes)
phase_text = axes.text(0.72, 0.9, '', transform=axes.transAxes)
index_text = axes.text(0.25, 0.76, '', transform=axes.transAxes)
energy_text = axes.text(0.02, 0.88, '', transform=axes.transAxes)
nucleus_text = axes.text(0.02, 0.76, '', transform=axes.transAxes)
dt = 0.1/speed(NEUTRON_ENERGY)
def init():
for line in lines:
line.set_data([], [])
time_text.set_text('')
time_text.set_fontsize(36)
phase_text.set_text('')
phase_text.set_fontsize(36)
#index_text.set_text('$\\textit{n}_{core}$ = %.2f' % (indexOfRefraction(0,0,NEUTRON_ENERGY)))
#index_text.set_fontsize(36)
#nucleus_text.set_text('A = %.0f' % NUCLEUS_MASS)
#nucleus_text.set_fontsize(36)
#energy_text.set_text('$\\textrm{E_{n}}$ = %.1f MeV' % NEUTRON_ENERGY)
#energy_text.set_fontsize(36)
initObjects = tuple(lines) + (time_text, phase_text,)\
#index_text, nucleus_text, energy_text)
return initObjects
frameNumber = 0
def animate(i):
global waves, dt, frameNumber
step(waves, dt)
for index, wave in enumerate(waves):
lines[index].set_data(*wave.position())
#lines[index].set_alpha(wave.intensity)
time_text.set_text('t = %.1f fm/C' % time_elapsed)
time_text.set_fontsize(36)
phase_text.set_text('$\Delta$ = %.2f rad' % phase_difference)
phase_text.set_fontsize(36)
wave = waves[0]
animateObjects = tuple(lines) + (time_text, phase_text)
if(frameNumber%50==0):
plt.savefig('Frame%d.png' % (frameNumber))#, bbox_inches='tight')
frameNumber += 1
return animateObjects
from time import time
t0 = time()
animate(0)
t1 = time()
interval = 1 * dt - (t1-t0)
fig.subplots_adjust(left=0.15, bottom=0., right=0.998, top=0.85)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=500, interval=interval, repeat=False, blit=True)
plt.show()
#anim.save('WoodsSaxon.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
| cdpruitt/total-neutron-cross-sections | theory/OMAnimation/WoodsSaxon.py | Python | gpl-3.0 | 5,498 |
#!/usr/bin/env python
'Set Glyph names to standard PostScript names based on values in the gsi.xml file.'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2015, SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
__version__ = '0.0.1'
import xml.sax
#from silfont.fontforge import XmlFF # Needs updating - this was based on old XmlFL
from silfont.genlib import execute
argspec = [
('ifont',{'help': 'Input font file'}, {'type': 'infont'}),
('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont', 'def': 'new'}),
('-i','--input',{'help': 'Input gsi.xml file'}, {'type': 'filen', 'def': 'gsi.xml'}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'setPSnames.log'})]
def doit(args) :
font=args.ifont
logf = args.log
# Parse the glyph supplemental info file
parser = xml.sax.make_parser()
handler = XmlFF.CollectXmlInfo()
parser.setContentHandler(handler)
print 'Parsing XML file: ',args.input
try :
parser.parse(args.input)
except Exception as e :
print e
sys.exit()
parser.parse(args.input)
gsi_dict = handler.get_data_dict()
# Rename the glyphs
for glyph in font:
g = font[glyph]
sil_name = g.glyphname
ps_nm = None
try:
if gsi_dict[sil_name].glyph_active == u"0": #skip inactive glyphs
continue
ps_nm = gsi_dict[sil_name].ps_name_value.encode() #encode() converts from Unicode string to std string
g.glyphname = ps_nm
logf.write("Glyph renamed - SIL Name: %s PS Name: %s\n" % (sil_name, ps_nm))
except:
print "Glyph not renamed - SIL Name: %s" % sil_name
logf.write("** Glyph not renamed - SIL Name: %s PS Name: %s\n" % (sil_name, ps_nm))
logf.close()
return font
execute("FF",doit, argspec)
| bitforks/pysilfont | scripts/FFsetPSNames.py | Python | mit | 1,999 |
from __future__ import print_function
import warnings
import os.path as op
import copy as cp
from nose.tools import assert_true, assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
from mne.time_frequency import csd_epochs
from mne.externals.six import advance_iterator
from mne.utils import run_tests_if_main
# Note that this is the first test file, this will apply to all subsequent
# tests in a full nosetest:
warnings.simplefilter("always") # ensure we can verify expected warnings
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
def _read_forward_solution_meg(*args, **kwargs):
fwd = mne.read_forward_solution(*args, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
"""Read in data used in tests."""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)[:10]
raw = mne.io.read_raw_fif(fname_raw, preload=False)
raw.add_proj([], remove_existing=True) # we'll subselect so remove proj
forward = mne.read_forward_solution(fname_fwd)
if read_all_forward:
forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True)
forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False,
stim=True, eog=True, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
epochs.resample(200, npad=0, n_jobs=2)
evoked = epochs.average().crop(0, None)
# Computing the data and noise cross-spectral density matrices
if compute_csds:
data_csd = csd_epochs(epochs, mode='multitaper', tmin=0.045,
tmax=None, fmin=8, fmax=12,
mt_bandwidth=72.72)
noise_csd = csd_epochs(epochs, mode='multitaper', tmin=None,
tmax=0.0, fmin=8, fmax=12,
mt_bandwidth=72.72)
else:
data_csd, noise_csd = None, None
return raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@testing.requires_testing_data
def test_dics():
"""Test DICS with evoked data and single trials."""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
epochs.crop(0, None)
reg = 0.5 # Heavily regularize due to low SNR
for real_filter in (True, False):
stc = dics(evoked, forward, noise_csd=noise_csd, data_csd=data_csd,
label=label, real_filter=real_filter, reg=reg)
stc_pow = np.sum(stc.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
# Incorrect due to limited number of epochs
assert_true(0.04 < tmax < 0.06, msg=tmax)
assert_true(3. < np.max(max_stc) < 6., msg=np.max(max_stc))
# Test picking normal orientation
stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", label=label, real_filter=True,
reg=reg)
assert_true(stc_normal.data.min() < 0) # this doesn't take abs
stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", label=label, reg=reg)
assert_true(stc_normal.data.min() >= 0) # this does take abs
# The amplitude of normal orientation results should always be smaller than
# free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_fixed, noise_csd,
data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_epochs, epochs, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_vol, noise_csd,
data_csd, pick_ori="normal")
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
stcs = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, label=label)
# Testing returning of generator
stcs_ = dics_epochs(epochs, forward_fixed, noise_csd, data_csd,
return_generator=True, label=label)
assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
# Test whether correct number of trials was returned
epochs.drop_bad()
assert_true(len(epochs.events) == len(stcs))
# Average the single trial estimates
stc_avg = np.zeros_like(stc.data)
for this_stc in stcs:
stc_avg += this_stc.data
stc_avg /= len(stcs)
idx = np.argmax(np.max(stc_avg, axis=1))
max_stc = stc_avg[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.120 < tmax < 0.150, msg=tmax) # incorrect due to limited #
assert_true(12 < np.max(max_stc) < 18.5)
@testing.requires_testing_data
def test_dics_source_power():
"""Test DICS source power computation."""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
epochs.crop(0, None)
reg = 0.05
stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
data_csd, label=label, reg=reg)
max_source_idx = np.argmax(stc_source_power.data)
max_source_power = np.max(stc_source_power.data)
# TODO: Maybe these could be more directly compared to dics() results?
assert_true(max_source_idx == 1)
assert_true(0.004 < max_source_power < 0.005, msg=max_source_power)
# Test picking normal orientation and using a list of CSD matrices
stc_normal = dics_source_power(epochs.info, forward_surf_ori,
[noise_csd] * 2, [data_csd] * 2,
pick_ori="normal", label=label, reg=reg)
assert_true(stc_normal.data.shape == (stc_source_power.data.shape[0], 2))
# The normal orientation results should always be smaller than free
# orientation results
assert_true((np.abs(stc_normal.data[:, 0]) <=
stc_source_power.data[:, 0]).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_source_power, raw.info, forward_fixed,
noise_csd, data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_source_power, raw.info, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_source_power, epochs.info, forward_vol,
noise_csd, data_csd, pick_ori="normal")
# Test detection of different number of CSD matrices provided
assert_raises(ValueError, dics_source_power, epochs.info, forward,
[noise_csd] * 2, [data_csd] * 3)
# Test detection of different frequencies in noise and data CSD objects
noise_csd.frequencies = [1, 2]
data_csd.frequencies = [1, 2, 3]
assert_raises(ValueError, dics_source_power, epochs.info, forward,
noise_csd, data_csd)
# Test detection of uneven frequency spacing
data_csds = [cp.deepcopy(data_csd) for i in range(3)]
frequencies = [1, 3, 4]
for freq, data_csd in zip(frequencies, data_csds):
data_csd.frequencies = [freq]
noise_csds = data_csds
with warnings.catch_warnings(record=True) as w:
dics_source_power(epochs.info, forward, noise_csds, data_csds)
assert_equal(len(w), 1)
@testing.requires_testing_data
def test_tf_dics():
"""Test TF beamforming based on DICS."""
tmin, tmax, tstep = -0.2, 0.2, 0.1
raw, epochs, _, _, _, label, forward, _, _, _ =\
_get_data(tmin, tmax, read_all_forward=False, compute_csds=False)
freq_bins = [(4, 20), (30, 55)]
win_lengths = [0.2, 0.2]
reg = 0.05
noise_csds = []
for freq_bin, win_length in zip(freq_bins, win_lengths):
noise_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bin[0], fmax=freq_bin[1],
fsum=True, tmin=tmin,
tmax=tmin + win_length)
noise_csds.append(noise_csd)
stcs = tf_dics(epochs, forward, noise_csds, tmin, tmax, tstep, win_lengths,
freq_bins, reg=reg, label=label)
assert_true(len(stcs) == len(freq_bins))
assert_true(stcs[0].shape[1] == 4)
assert_true(2.2 < stcs[0].data.max() < 2.3)
assert_true(0.94 < stcs[0].data.min() < 0.95)
# Manually calculating source power in several time windows to compare
# results and test overlapping
source_power = []
time_windows = [(-0.1, 0.1), (0.0, 0.2)]
for time_window in time_windows:
data_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bins[0][0],
fmax=freq_bins[0][1], fsum=True,
tmin=time_window[0], tmax=time_window[1])
noise_csd = csd_epochs(epochs, mode='fourier',
fmin=freq_bins[0][0],
fmax=freq_bins[0][1], fsum=True,
tmin=-0.2, tmax=0.0)
data_csd.data /= data_csd.n_fft
noise_csd.data /= noise_csd.n_fft
stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
data_csd, reg=reg, label=label)
source_power.append(stc_source_power.data)
# Averaging all time windows that overlap the time period 0 to 100 ms
source_power = np.mean(source_power, axis=0)
# Selecting the first frequency bin in tf_dics results
stc = stcs[0]
# Comparing tf_dics results with dics_source_power results
assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])
# Test if using unsupported max-power orientation is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths, freq_bins=freq_bins,
pick_ori='max-power')
# Test if incorrect number of noise CSDs is detected
assert_raises(ValueError, tf_dics, epochs, forward, [noise_csds[0]], tmin,
tmax, tstep, win_lengths, freq_bins=freq_bins)
# Test if freq_bins and win_lengths incompatibility is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins)
# Test if time step exceeding window lengths is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins)
# Test if incorrect number of mt_bandwidths is detected
assert_raises(ValueError, tf_dics, epochs, forward, noise_csds, tmin, tmax,
tstep, win_lengths, freq_bins, mode='multitaper',
mt_bandwidths=[20])
# Pass only one epoch to test if subtracting evoked responses yields zeros
stcs = tf_dics(epochs[0], forward, noise_csds, tmin, tmax, tstep,
win_lengths, freq_bins, subtract_evoked=True, reg=reg,
label=label)
assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
run_tests_if_main()
| jaeilepp/mne-python | mne/beamformer/tests/test_dics.py | Python | bsd-3-clause | 13,250 |
import django.forms
from ..utils import get_active_plugins_choices
class ChoosePostTypeForm(django.forms.Form):
post_types = django.forms.ChoiceField(
choices=get_active_plugins_choices())
class Meta:
fields = ['post_types']
| AASHE/django-bulletin | bulletin/tools/plugins/forms/plugin.py | Python | mit | 254 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import subprocess
import sys
import datetime
import traceback
import signal
import time
import syslog
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# logger.info("cobblerd started")
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(022)
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
if len(sys.argv) < 3:
print json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
})
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# setup logging directory
logdir = os.path.expanduser("~/.ansible_async")
log_path = os.path.join(logdir, jid)
if not os.path.exists(logdir):
try:
os.makedirs(logdir)
except:
print json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % logdir
})
def _run_command(wrapped_cmd, jid, log_path):
logfile = open(log_path, "w")
logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
logfile.close()
logfile = open(log_path, "w")
result = {}
outdata = ''
try:
cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False,
stdin=None, stdout=logfile, stderr=logfile)
script.communicate()
outdata = file(log_path).read()
result = json.loads(outdata)
except (OSError, IOError), e:
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
except:
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary debug only
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
logfile.close()
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
#import logging
#import logging.handlers
#logger = logging.getLogger("ansible_async")
#logger.setLevel(logging.WARNING)
#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") )
def debug(msg):
#logger.warning(msg)
pass
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
time.sleep(1)
debug("Return async_wrapper task started.")
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path })
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
debug("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
debug("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(5)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
debug("%s still running (%s)"%(sub_pid, remaining))
time.sleep(5)
remaining = remaining - 5
if remaining <= 0:
debug("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
debug("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
debug("Done in kid B.")
os._exit(0)
else:
# the child process runs the actual module
debug("Start module (%s)"%os.getpid())
_run_command(cmd, jid, log_path)
debug("Module complete (%s)"%os.getpid())
sys.exit(0)
except Exception, err:
debug("error: %s"%(err))
raise err
| 47lining/ansible-modules-core | utilities/logic/async_wrapper.py | Python | gpl-3.0 | 6,183 |
"""
WSGI config for teste project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "teste.settings")
application = get_wsgi_application()
| Dturati/projetoUFMT | teste/teste/wsgi.py | Python | mit | 388 |
"""
Define the SeriesGroupBy, DataFrameGroupBy, and PanelGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
import collections
import copy
import warnings
from functools import partial
from textwrap import dedent
import numpy as np
import pandas.core.algorithms as algorithms
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas import compat
from pandas._libs import Timestamp, lib
from pandas.compat import lzip, map
from pandas.compat.numpy import _np_version_under1p13
from pandas.core.arrays import Categorical
from pandas.core.base import DataError, SpecificationError
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_bool, is_datetimelike,
is_integer_dtype, is_interval_dtype, is_numeric_dtype, is_scalar
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy, _apply_docs, _transform_template
)
from pandas.core.index import CategoricalIndex, Index, MultiIndex
from pandas.core.internals import BlockManager, make_block
from pandas.core.panel import Panel
from pandas.core.series import Series
from pandas.plotting._core import boxplot_frame_groupby
from pandas.util._decorators import Appender, Substitution
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
from pandas.core.groupby.groupby import groupby
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + loc)]
offset += loc
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except Exception:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (isinstance(v.index, MultiIndex) or
key_index is None or
isinstance(key_index, MultiIndex)):
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in base.cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
Returns
-------
filtered : DataFrame
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = base.series_apply_whitelist
for _def_str in base.whitelist_method_generator(
GroupBy, Series, _apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_doc = dedent("""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
See also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
@Appender(_apply_docs['template']
.format(input='series',
examples=_apply_docs['series_examples']))
def apply(self, func, *args, **kwargs):
return super(SeriesGroupBy, self).apply(func, *args, **kwargs)
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
versionadded='',
axis=''))
def aggregate(self, func_or_funcs, *args, **kwargs):
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, compat.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
("using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com.get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if any(isinstance(x, DataFrame) for x in compat.itervalues(results)):
# let higher level handle
if _level:
return results
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in base.cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
msg = ('val.dtype must be object, got {dtype}'
.format(dtype=val.dtype))
assert val.dtype == object, msg
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or (None if _np_version_under1p13 else 0)
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):
"""Calculate percent change of each value to previous entry in group"""
filled = getattr(self, fill_method)(limit=limit)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = base.dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in base.whitelist_method_generator(
GroupBy, DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
See also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[-1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
If we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups);
This can re-expand the output space
"""
# we need to re-expand the output space to accomodate all values
# whether observed or not in the cartesian product of our groupes
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
# if we only care about the observed values
# we are done
elif self.observed:
return result
# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def _fill(self, direction, limit=None):
"""Overridden method to join grouped columns in output"""
res = super(DataFrameGroupBy, self)._fill(direction, limit=limit)
output = collections.OrderedDict(
(grp.name, grp.grouper) for grp in self.grouper.groupings)
from pandas import concat
return concat((self._wrap_transformed_output(output), res), axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from pandas.core.dtypes.missing import _isna_ndarraylike as _isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~_isna(np.atleast_2d(blk.get_values())))
for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(
lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = ibase.default_index(len(results))
return results
boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
| amolkahat/pandas | pandas/core/groupby/generic.py | Python | bsd-3-clause | 58,252 |
from __future__ import absolute_import, division, print_function
from dynd import nd
from pandas import DataFrame
from blaze import compute, Table, by, into
from blaze.expr import Expr, TableSymbol, Field
from blaze.dispatch import dispatch
from blaze.server import Server
from blaze.server.index import parse_index, emit_index
from blaze.server.client import Client, discover, resource
df = DataFrame([['Alice', 100], ['Bob', 200]],
columns=['name', 'amount'])
server = Server(datasets={'accounts': df})
test = server.app.test_client()
from blaze.server import client
client.requests = test # OMG monkey patching
def test_expr_client():
ec = Client('localhost:6363', 'accounts')
assert discover(ec) == discover(df)
t = TableSymbol('t', discover(ec))
expr = t.amount.sum()
assert compute(expr, ec) == 300
assert 'name' in t.fields
assert isinstance(t.name, Field)
assert compute(t.name, ec) == ['Alice', 'Bob']
def test_expr_client_interactive():
ec = Client('localhost:6363', 'accounts')
t = Table(ec)
assert compute(t.name) == ['Alice', 'Bob']
assert (into(set, compute(by(t.name, min=t.amount.min(),
max=t.amount.max()))) ==
set([('Alice', 100, 100), ('Bob', 200, 200)]))
def test_resource():
ec = resource('blaze://localhost:6363', 'accounts')
assert discover(ec) == discover(df)
def test_resource_default_port():
ec = resource('blaze://localhost', 'accounts')
assert discover(ec) == discover(df)
def test_resource_all_in_one():
ec = resource('blaze://localhost:6363::accounts')
assert discover(ec) == discover(df)
class CustomExpr(Expr):
__slots__ = '_child',
@property
def dshape(self):
return self._child.dshape
@dispatch(CustomExpr, DataFrame)
def compute_up(expr, data, **kwargs):
return data
def test_custom_expressions():
ec = Client('localhost:6363', 'accounts')
t = TableSymbol('t', discover(ec))
assert list(map(tuple, compute(CustomExpr(t), ec))) == into(list, df)
| vitan/blaze | blaze/server/tests/test_client.py | Python | bsd-3-clause | 2,083 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include, url
from shop.urls import rest_api
from shop.urls import auth
from shop.urls import payment
app_name = 'shop'
urlpatterns = [
url(r'^api/', include(rest_api)),
url(r'^auth/', include(auth)),
url(r'^payment/', include(payment)),
]
| divio/django-shop | shop/urls/__init__.py | Python | bsd-3-clause | 348 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import helper
import sys, unittest, time, types
from copy import copy
from duplicity import globals
from duplicity import dup_time
helper.setup()
class TimeTest:
def testConversion(self):
"""test timetostring and stringtotime"""
dup_time.setcurtime()
assert type(dup_time.curtime) in (types.IntType, types.LongType)
assert type(dup_time.curtimestr) is types.StringType
assert (dup_time.cmp(int(dup_time.curtime), dup_time.curtimestr) == 0 or
dup_time.cmp(int(dup_time.curtime) + 1, dup_time.curtimestr) == 0)
time.sleep(1.05)
assert dup_time.cmp(time.time(), dup_time.curtime) == 1
assert dup_time.cmp(dup_time.timetostring(time.time()), dup_time.curtimestr) == 1
def testConversion_separator(self):
"""Same as testConversion, but change time Separator"""
prev_sep = copy(globals.time_separator)
try:
globals.time_separator = "_"
self.testConversion()
finally:
globals.time_separator = prev_sep
def testCmp(self):
"""Test time comparisons"""
cmp = dup_time.cmp
assert cmp(1,2) == -1
assert cmp(2,2) == 0
assert cmp(5,1) == 1
assert cmp("2001-09-01T21:49:04Z", "2001-08-01T21:49:04Z") == 1
assert cmp("2001-09-01T04:49:04+03:23", "2001-09-01T21:49:04Z") == -1
assert cmp("2001-09-01T12:00:00Z", "2001-09-01T04:00:00-08:00") == 0
assert cmp("2001-09-01T12:00:00-08:00", "2001-09-01T12:00:00-07:00") == 1
assert cmp("2001-09-01T11:00:00Z", "20010901T120000Z") == -1
assert cmp("2001-09-01T12:00:00Z", "20010901T120000Z") == 0
assert cmp("2001-09-01T13:00:00Z", "20010901T120000Z") == 1
def testCmp_separator(self):
"""Like testCmp but with new separator"""
prev_sep = copy(globals.time_separator)
try:
globals.time_separator = "_"
cmp = dup_time.cmp
assert cmp(1,2) == -1
assert cmp(2,2) == 0
assert cmp(5,1) == 1
assert cmp("2001-09-01T21_49_04Z", "2001-08-01T21_49_04Z") == 1
assert cmp("2001-09-01T04_49_04+03_23", "2001-09-01T21_49_04Z") == -1
assert cmp("2001-09-01T12_00_00Z", "2001-09-01T04_00_00-08_00") == 0
assert cmp("2001-09-01T12_00_00-08_00", "2001-09-01T12_00_00-07_00") == 1
finally:
globals.time_separator = prev_sep
def testStringtotime(self):
"""Test converting string to time"""
timesec = int(time.time())
assert timesec == int(dup_time.stringtotime(dup_time.timetostring(timesec)))
assert not dup_time.stringtotime("2001-18-83T03:03:03Z")
assert not dup_time.stringtotime("2001-01-23L03:03:03L")
assert not dup_time.stringtotime("2001_01_23T03:03:03Z")
def testIntervals(self):
"""Test converting strings to intervals"""
i2s = dup_time.intstringtoseconds
for s in ["32", "", "d", "231I", "MM", "s", "-2h"]:
try: i2s(s)
except dup_time.TimeException: pass
else: assert 0, s
assert i2s("7D") == 7*86400
assert i2s("232s") == 232
assert i2s("2M") == 2*30*86400
assert i2s("400m") == 400*60
assert i2s("1Y") == 365*86400
assert i2s("30h") == 30*60*60
assert i2s("3W") == 3*7*86400
def testIntervalsComposite(self):
"""Like above, but allow composite intervals"""
i2s = dup_time.intstringtoseconds
assert i2s("7D2h") == 7*86400 + 2*3600
assert i2s("2Y3s") == 2*365*86400 + 3
assert i2s("1M2W4D2h5m20s") == (30*86400 + 2*7*86400 + 4*86400 +
2*3600 + 5*60 + 20)
def testPrettyIntervals(self):
"""Test printable interval conversion"""
assert dup_time.inttopretty(3600) == "1 hour"
assert dup_time.inttopretty(7220) == "2 hours 20 seconds"
assert dup_time.inttopretty(0) == "0 seconds"
assert dup_time.inttopretty(353) == "5 minutes 53 seconds"
assert dup_time.inttopretty(3661) == "1 hour 1 minute 1 second"
assert dup_time.inttopretty(353.234234) == "5 minutes 53.23 seconds"
def testGenericString(self):
"""Test genstrtotime, conversion of arbitrary string to time"""
g2t = dup_time.genstrtotime
assert g2t('now', 1000) == 1000
assert g2t('2h3s', 10000) == 10000 - 2*3600 - 3
assert g2t('2001-09-01T21:49:04Z') == \
dup_time.stringtotime('2001-09-01T21:49:04Z')
assert g2t('2002-04-26T04:22:01') == \
dup_time.stringtotime('2002-04-26T04:22:01' + dup_time.gettzd(0))
t = dup_time.stringtotime('2001-05-12T00:00:00' + dup_time.gettzd(0))
assert g2t('2001-05-12') == t
assert g2t('2001/05/12') == t
assert g2t('5/12/2001') == t
assert g2t('123456') == 123456
def testGenericStringErrors(self):
"""Test genstrtotime on some bad strings"""
g2t = dup_time.genstrtotime
self.assertRaises(dup_time.TimeException, g2t, "hello")
self.assertRaises(dup_time.TimeException, g2t, "")
self.assertRaises(dup_time.TimeException, g2t, "3q")
def testConvertion(self):
t = int(time.time())
assert dup_time.stringtotime(dup_time.timetostring(t)) == t
class TimeTest1(TimeTest, unittest.TestCase):
def setUp(self):
globals.old_filenames = False
class TimeTest2(TimeTest, unittest.TestCase):
def setUp(self):
globals.old_filenames = True
if __name__ == '__main__':
unittest.main()
| AZed/duplicity | testing/tests/dup_timetest.py | Python | gpl-2.0 | 6,530 |
from .models import Concept, Type
from conceptpower import Conceptpower
from urlparse import urlparse
from django.conf import settings
logger = settings.LOGGER
class AuthorityManager(object):
pass
class ConceptpowerAuthority(AuthorityManager, Conceptpower):
__name__ = 'ConceptpowerAuthority'
endpoint = 'http://chps.asu.edu/conceptpower/rest/'
namespace = '{http://www.digitalhps.org/}'
# Register AuthorityManagers here.
authority_managers = (
ConceptpowerAuthority,
)
def searchall(query):
return [ concept_from_result(result, manager.__name__) for manager in authority_managers for result in manager().search(query) ]
def concept_from_result(result, authority):
logger.debug('Create Concept from result {0}.'.format(result['id']))
type_instance = None
if 'type_uri' in result:
if result['type_uri'] is not None:
logger.debug('Create Type from result {0}.'.format(result['type_uri']))
type_instance = Type.objects.get_or_create(uri=result['type_uri'])[0]
concept = Concept.objects.get_or_create(
uri=result['id'],
defaults = {
'label': result['lemma'],
'authority': authority,
'description': result['description'],
'resolved': True,
'typed': (type_instance or None),
})[0]
return concept
def resolve(sender, instance):
"""
Resolve :class:`.Concept`\s and :class:`.Type`\s using the registered
:class:`.AuthorityManager`\s.
Parameters
----------
sender : class
instance : :class:`.Type` or :class:`.Concept`
"""
logger.debug(
'Received post_save signal for Concept {0}.'.format(instance.id))
if instance is not None:
# Configure based on sender model class.
instance_cast = instance.cast()
if type(instance_cast) is Concept:
get_method = 'get'
label_field = 'lemma'
elif type(instance_cast) is Type:
get_method = 'get_type'
label_field = 'type'
# Skip any instance that has already been resolved, or that lacks a URI.
if not instance.resolved and instance.uri is not None:
logger.debug('Instance {0} not yet resolved.'.format(instance.id))
# Get AuthorityManager classes by namespace.
managers = get_by_namespace(get_namespace(instance.uri))
logger.debug(
'Found {0} managers for {1}'.format(len(managers),instance.uri))
# Try each AuthorityManager...
for manager_class in managers:
if instance.resolved: break # ...until success.
try:
manager = manager_class()
method = getattr(manager, get_method)
concept_data = method(instance.uri)
instance.authority = manager.__name__
logger.debug(
'Trying AuthorityManager {0}.'.format(manager.__name__))
# Update description, label, (and typed).
instance.description = concept_data['description']
instance.label = concept_data[label_field]
# For Types, this will create a cascade of post_save
# signals resulting in a crawl up the Type ontology
# based on the ``supertype`` property.
if type(instance_cast) is Concept:
type_uri = concept_data['type_uri']
elif type(instance_cast) is Type:
try:
type_uri = concept_data['supertype_uri']
except KeyError:
type_uri = None
if type_uri is not None:
type_instance = Type.objects.get_or_create(
uri=type_uri)[0]
instance.typed = type_instance
logger.debug(
'Added Type {0} to Concept {1}.'.format(
type_instance.uri, instance.uri))
instance.resolved = True
instance.save()
except Exception as E:
logger.error('Encountered Exception {0}.'.format(E))
continue
def get_namespace(uri):
"""
Extract namespace from URI.
"""
o = urlparse(uri)
namespace = o.scheme + "://" + o.netloc + "/"
if o.scheme == '' or o.netloc == '':
raise ValueError("Could not determine namespace for {0}.".format(uri))
return "{" + namespace + "}"
def get_by_namespace(namespace):
"""
Retrieve a registered :class:`AuthorityManager` by its namespace.
"""
return [ manager for manager in authority_managers
if manager.namespace == namespace ]
| diging/jars | concepts/authorities.py | Python | gpl-3.0 | 4,956 |
# Interface for STSADM.EXE
#
# Allows users to execute STSADM operations by calling the
# run method. The run method takes an operation argument
# and a list of name-value pairs for the operation arguments
#
# import references
import clr
clr.AddReference("System")
from System import Environment
from System.IO import Path
from System.Diagnostics import Process
# Get Path to stsadm.exe
common_dir = Environment.GetFolderPath(Environment.SpecialFolder.CommonProgramFiles)
STSADMPATH = Path.Combine(common_dir, "Microsoft Shared\\Web Server Extensions\\12\\Bin\\stsadm.exe")
def run(operation, **args):
"""Execute a STSADM operation"""
argstring = "-o " + operation
argstring += " " + _format_args(args)
_execute_command(argstring)
def _format_args(args):
"""Formats a series of arguments as an STSADM argument string"""
argstring = ""
for kw in args.keys():
if type(args[kw]) is bool:
if args[kw] == True:
argstring += "-" + kw
else:
argstring += "-" + kw + " \"" + args[kw] + "\""
argstring += " "
return argstring
def _execute_command(argstring, workingdir=None):
"""Executs an STSADM command"""
proc = Process()
proc.StartInfo.UseShellExecute = False
proc.StartInfo.FileName = "\"" + STSADMPATH + "\""
proc.StartInfo.Arguments = argstring
if workingdir != None:
proc.StartInfo.WorkingDirectory = workingdir
proc.Start()
proc.WaitForExit()
| glenc/sp.py | src/sp/stsadm.py | Python | bsd-3-clause | 1,452 |
# -*- coding: utf-8 -*-
import pycurl
from module.network.HTTPRequest import BadHeader
from module.network.RequestFactory import getRequest as get_request
from module.plugins.internal.Hook import Hook, threaded
class BypassCaptchaException(Exception):
def __init__(self, err):
self.err = err
def get_code(self):
return self.err
def __str__(self):
return "<BypassCaptchaException %s>" % self.err
def __repr__(self):
return "<BypassCaptchaException %s>" % self.err
class BypassCaptcha(Hook):
__name__ = "BypassCaptcha"
__type__ = "hook"
__version__ = "0.08"
__status__ = "testing"
__config__ = [("passkey" , "password", "Access key" , "" ),
("check_client", "bool" , "Don't use if client is connected", True)]
__description__ = """Send captchas to BypassCaptcha.com"""
__license__ = "GPLv3"
__authors__ = [("RaNaN" , "RaNaN@pyload.org" ),
("Godofdream", "soilfcition@gmail.com"),
("zoidberg" , "zoidberg@mujmail.cz" )]
PYLOAD_KEY = "4f771155b640970d5607f919a615bdefc67e7d32"
SUBMIT_URL = "http://bypasscaptcha.com/upload.php"
RESPOND_URL = "http://bypasscaptcha.com/check_value.php"
GETCREDITS_URL = "http://bypasscaptcha.com/ex_left.php"
def get_credits(self):
res = self.load(self.GETCREDITS_URL, post={'key': self.get_config('passkey')})
data = dict(x.split(' ', 1) for x in res.splitlines())
return int(data['Left'])
def submit(self, captcha, captchaType="file", match=None):
req = get_request()
#: Raise timeout threshold
req.c.setopt(pycurl.LOW_SPEED_TIME, 80)
try:
res = self.load(self.SUBMIT_URL,
post={'vendor_key': self.PYLOAD_KEY,
'key': self.get_config('passkey'),
'gen_task_id': "1",
'file': (pycurl.FORM_FILE, captcha)},
req=req)
finally:
req.close()
data = dict(x.split(' ', 1) for x in res.splitlines())
if not data or "Value" not in data:
raise BypassCaptchaException(res)
result = data['Value']
ticket = data['TaskId']
self.log_debug("Result %s : %s" % (ticket, result))
return ticket, result
def respond(self, ticket, success):
try:
res = self.load(self.RESPOND_URL, post={'task_id': ticket, 'key': self.get_config('passkey'),
'cv': 1 if success else 0})
except BadHeader, e:
self.log_error(_("Could not send response"), e)
def captcha_task(self, task):
if "service" in task.data:
return False
if not task.isTextual():
return False
if not self.get_config('passkey'):
return False
if self.pyload.isClientConnected() and self.get_config('check_client'):
return False
if self.get_credits() > 0:
task.handler.append(self)
task.data['service'] = self.__name__
task.setWaiting(100)
self._process_captcha(task)
else:
self.log_info(_("Your %s account has not enough credits") % self.__name__)
def captcha_correct(self, task):
if task.data['service'] is self.__name__ and "ticket" in task.data:
self.respond(task.data['ticket'], True)
def captcha_invalid(self, task):
if task.data['service'] is self.__name__ and "ticket" in task.data:
self.respond(task.data['ticket'], False)
@threaded
def _process_captcha(self, task):
c = task.captchaFile
try:
ticket, result = self.submit(c)
except BypassCaptchaException, e:
task.error = e.get_code()
return
task.data['ticket'] = ticket
task.setResult(result)
| fayf/pyload | module/plugins/hooks/BypassCaptcha.py | Python | gpl-3.0 | 4,068 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ImportTestCases
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3beta1_generated_TestCases_ImportTestCases_async]
from google.cloud import dialogflowcx_v3beta1
async def sample_import_test_cases():
# Create a client
client = dialogflowcx_v3beta1.TestCasesAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3beta1.ImportTestCasesRequest(
gcs_uri="gcs_uri_value",
parent="parent_value",
)
# Make the request
operation = client.import_test_cases(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END dialogflow_v3beta1_generated_TestCases_ImportTestCases_async]
| googleapis/python-dialogflow-cx | samples/generated_samples/dialogflow_v3beta1_generated_test_cases_import_test_cases_async.py | Python | apache-2.0 | 1,636 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ckanutils
~~~~~~~~~
Provides methods for interacting with a CKAN instance
Examples:
literal blocks::
python example_google.py
Attributes:
CKAN_KEYS (List[str]): available CKAN keyword arguments.
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
import requests
import ckanapi
import itertools as it
from os import environ, path as p
from datetime import datetime as dt
from operator import itemgetter
from pprint import pprint
from ckanapi import NotFound, NotAuthorized, ValidationError
from tabutils import process as pr, io, fntools as ft, convert as cv
__version__ = '0.14.9'
__title__ = 'ckanutils'
__author__ = 'Reuben Cummings'
__description__ = 'Miscellaneous CKAN utility library'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CKAN_KEYS = ['hash_table', 'remote', 'api_key', 'ua', 'force', 'quiet']
API_KEY_ENV = 'CKAN_API_KEY'
REMOTE_ENV = 'CKAN_REMOTE_URL'
UA_ENV = 'CKAN_USER_AGENT'
DEF_USER_AGENT = 'ckanutils/%s' % __version__
DEF_HASH_PACK = 'hash-table'
DEF_HASH_RES = 'hash-table.csv'
CHUNKSIZE_ROWS = 10 ** 3
CHUNKSIZE_BYTES = 2 ** 20
ENCODING = 'utf-8'
class CKAN(object):
"""Interacts with a CKAN instance.
Attributes:
force (bool): Force.
verbose (bool): Print debug statements.
quiet (bool): Suppress debug statements.
address (str): CKAN url.
hash_table (str): The hash table package id.
keys (List[str]):
"""
def __init__(self, **kwargs):
"""Initialization method.
Args:
**kwargs: Keyword arguments.
Kwargs:
hash_table (str): The hash table package id.
remote (str): The remote ckan url.
api_key (str): The ckan api key.
ua (str): The user agent.
force (bool): Force (default: True).
quiet (bool): Suppress debug statements (default: False).
Returns:
New instance of :class:`CKAN`
Examples:
>>> CKAN() #doctest: +ELLIPSIS
<ckanutils.CKAN object at 0x...>
"""
default_ua = environ.get(UA_ENV, DEF_USER_AGENT)
def_remote = environ.get(REMOTE_ENV)
def_api_key = environ.get(API_KEY_ENV)
remote = kwargs.get('remote', def_remote)
self.api_key = kwargs.get('api_key', def_api_key)
self.force = kwargs.get('force', True)
self.quiet = kwargs.get('quiet')
self.user_agent = kwargs.get('ua', default_ua)
self.verbose = not self.quiet
self.hash_table = kwargs.get('hash_table', DEF_HASH_PACK)
ckan_kwargs = {'apikey': self.api_key, 'user_agent': self.user_agent}
attr = 'RemoteCKAN' if remote else 'LocalCKAN'
ckan = getattr(ckanapi, attr)(remote, **ckan_kwargs)
self.address = ckan.address
self.package_show = ckan.action.package_show
try:
self.hash_table_pack = self.package_show(id=self.hash_table)
except NotFound:
self.hash_table_pack = None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
self.hash_table_pack = None
else:
raise err
try:
self.hash_table_id = self.hash_table_pack['resources'][0]['id']
except (IndexError, TypeError):
self.hash_table_id = None
# shortcuts
self.datastore_search = ckan.action.datastore_search
self.datastore_create = ckan.action.datastore_create
self.datastore_delete = ckan.action.datastore_delete
self.datastore_upsert = ckan.action.datastore_upsert
self.datastore_search = ckan.action.datastore_search
self.resource_show = ckan.action.resource_show
self.resource_create = ckan.action.resource_create
self.package_create = ckan.action.package_create
self.package_update = ckan.action.package_update
self.package_privatize = ckan.action.bulk_update_private
self.revision_show = ckan.action.revision_show
self.organization_list = ckan.action.organization_list_for_user
self.organization_show = ckan.action.organization_show
self.license_list = ckan.action.license_list
self.group_list = ckan.action.group_list
self.user = ckan.action.get_site_user()
def create_table(self, resource_id, fields, **kwargs):
"""Creates a datastore table for an existing filestore resource.
Args:
resource_id (str): The filestore resource id.
fields (List[dict]): fields/columns and their extra metadata.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Create resource even if read-only.
aliases (List[str]): name(s) for read only alias(es) of the
resource.
primary_key (List[str]): field(s) that represent a unique key.
indexes (List[str]): index(es) on table.
Returns:
dict: The newly created data object.
Raises:
ValidationError: If unable to validate user on ckan site.
NotFound: If unable to find resource.
Examples:
>>> CKAN(quiet=True).create_table('rid', fields=[{'id': 'field', \
'type': 'text'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
kwargs['fields'] = fields
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
if self.verbose:
print('Creating table `%s` in datastore...' % resource_id)
try:
return self.datastore_create(**kwargs)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise
def delete_table(self, resource_id, **kwargs):
"""Deletes a datastore table.
Args:
resource_id (str): The datastore resource id.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
force (bool): Delete resource even if read-only.
filters (dict): Filters to apply before deleting, e.g.,
{"name": "fred"}. If missing delete whole table and all
dependent views.
Returns:
dict: Original filters sent if table was found, `None` otherwise.
Raises:
ValidationError: If unable to validate user on ckan site.
Examples:
>>> CKAN(quiet=True).delete_table('rid')
Can't delete. Table `rid` was not found in datastore.
"""
kwargs.setdefault('force', self.force)
kwargs['resource_id'] = resource_id
init_msg = "Can't delete. Table `%s`" % resource_id
err_msg = '%s was not found in datastore.' % init_msg
read_msg = '%s is read only.' % init_msg
if self.verbose:
print('Deleting table `%s` from datastore...' % resource_id)
try:
result = self.datastore_delete(**kwargs)
except NotFound:
print(err_msg)
result = None
except ValidationError as err:
if 'read-only' in err.error_dict:
print(read_msg)
print("Set 'force' to True and try again.")
result = None
elif err.error_dict.get('resource_id') == ['Not found: Resource']:
print(err_msg)
result = None
else:
raise err
return result
def insert_records(self, resource_id, records, **kwargs):
"""Inserts records into a datastore table.
Args:
resource_id (str): The datastore resource id.
records (List[dict]): The records to insert.
**kwargs: Keyword arguments that are passed to datastore_create.
Kwargs:
method (str): Insert method. One of ['update, 'insert', 'upsert']
(default: 'insert').
force (bool): Create resource even if read-only.
start (int): Row number to start from (zero indexed).
stop (int): Row number to stop at (zero indexed).
chunksize (int): Number of rows to write at a time.
Returns:
int: Number of records inserted.
Raises:
NotFound: If unable to find the resource.
Examples:
>>> CKAN(quiet=True).insert_records('rid', [{'field': 'value'}])
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
recoded = pr.json_recode(records)
chunksize = kwargs.pop('chunksize', 0)
start = kwargs.pop('start', 0)
stop = kwargs.pop('stop', None)
kwargs.setdefault('force', self.force)
kwargs.setdefault('method', 'insert')
kwargs['resource_id'] = resource_id
count = 1
for chunk in ft.chunk(recoded, chunksize, start=start, stop=stop):
length = len(chunk)
if self.verbose:
print(
'Adding records %i - %i to resource %s...' % (
count, count + length - 1, resource_id))
kwargs['records'] = chunk
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
self.datastore_upsert(**kwargs)
except requests.exceptions.ConnectionError as err:
if 'Broken pipe' in err.message[1]:
print('Chunksize too large. Try using a smaller chunksize.')
return 0
else:
raise err
except NotFound:
# Keep exception message consistent with the others
raise NotFound(err_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
count += length
return count
def get_hash(self, resource_id):
"""Gets the hash of a datastore table.
Args:
resource_id (str): The datastore resource id.
Returns:
str: The datastore resource hash.
Raises:
NotFound: If `hash_table_id` isn't set or not in datastore.
NotAuthorized: If unable to authorize ckan user.
Examples:
>>> CKAN(hash_table='hash_jhb34rtj34t').get_hash('rid')
Traceback (most recent call last):
NotFound: {u'item': u'package', u'message': u'Package \
`hash_jhb34rtj34t` was not found!'}
"""
if not self.hash_table_pack:
message = 'Package `%s` was not found!' % self.hash_table
raise NotFound({'message': message, 'item': 'package'})
if not self.hash_table_id:
message = 'No resources found in package `%s`!' % self.hash_table
raise NotFound({'message': message, 'item': 'resource'})
kwargs = {
'resource_id': self.hash_table_id,
'filters': {'datastore_id': resource_id},
'fields': 'hash',
'limit': 1
}
err_msg = 'Resource `%s` was not found' % resource_id
alt_msg = 'Hash table `%s` was not found' % self.hash_table_id
try:
result = self.datastore_search(**kwargs)
resource_hash = result['records'][0]['hash']
except NotFound:
message = '%s in datastore!' % alt_msg
raise NotFound({'message': message, 'item': 'datastore'})
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound('%s in filestore.' % err_msg)
else:
raise err
except IndexError:
print('%s in hash table.' % err_msg)
resource_hash = None
if self.verbose:
print('Resource `%s` hash is `%s`.' % (resource_id, resource_hash))
return resource_hash
def fetch_resource(self, resource_id, user_agent=None, stream=True):
"""Fetches a single resource from filestore.
Args:
resource_id (str): The filestore resource id.
Kwargs:
user_agent (str): The user agent.
stream (bool): Stream content (default: True).
Returns:
obj: requests.Response object.
Raises:
NotFound: If unable to find the resource.
NotAuthorized: If access to fetch resource is denied.
Examples:
>>> CKAN(quiet=True).fetch_resource('rid')
Traceback (most recent call last):
NotFound: Resource `rid` was not found in filestore.
"""
user_agent = user_agent or self.user_agent
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
raise NotFound(err_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
url = resource.get('perma_link') or resource.get('url')
if self.verbose:
print('Downloading url %s...' % url)
headers = {'User-Agent': user_agent}
r = requests.get(url, stream=stream, headers=headers)
err_msg = 'Access to fetch resource %s was denied.' % resource_id
if any('403' in h.headers.get('x-ckan-error', '') for h in r.history):
raise NotAuthorized(err_msg)
elif r.status_code == 401:
raise NotAuthorized(err_msg)
else:
return r
def get_filestore_update_func(self, resource, **kwargs):
"""Returns the function to create or update a single resource on
filestore. To create a resource, you must supply either `url`,
`filepath`, or `fileobj`.
Args:
resource (dict): The resource passed to resource_create.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link, requires `format`).
format (str): New file format (for file link, requires `url`).
fileobj (obj): New file like object (for file upload).
filepath (str): New file path (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
tuple: (func, args, data)
where func is `requests.post` if `post` option is specified,
`self.resource_create` otherwise. `args` and `data` should be
passed as *args and **kwargs respectively.
See also:
ckanutils._update_filestore
Examples:
>>> ckan = CKAN(quiet=True)
>>> resource = {
... 'name': 'name', 'package_id': 'pid', 'resource_id': 'rid',
... 'description': 'description', 'hash': 'hash'}
>>> kwargs = {'url': 'http://example.com/file', 'format': 'csv'}
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> func, args, kwargs = res
>>> func(*args, **kwargs)
Traceback (most recent call last):
NotFound: Not found
"""
post = kwargs.pop('post', None)
filepath = kwargs.pop('filepath', None)
fileobj = kwargs.pop('fileobj', None)
f = open(filepath, 'rb') if filepath else fileobj
resource.update(kwargs)
if post:
args = ['%s/api/action/resource_create' % self.address]
hdrs = {
'X-CKAN-API-Key': self.api_key, 'User-Agent': self.user_agent}
data = {'data': resource, 'headers': hdrs}
data.update({'files': {'upload': f}}) if f else None
func = requests.post
else:
args = []
resource.update({'upload': f}) if f else None
data = {
k: v for k, v in resource.items() if not isinstance(v, dict)}
func = self.resource_create
return (func, args, data)
def _update_filestore(self, func, *args, **kwargs):
"""Helps create or update a single resource on filestore.
To create a resource, you must supply either `url`, `filepath`, or
`fileobj`.
Args:
func (func): The resource passed to resource_create.
*args: Postional arguments that are passed to `func`
**kwargs: Keyword arguments that are passed to `func`.
Kwargs:
url (str): New file url (for file link).
fileobj (obj): New file like object (for file upload).
filepath (str): New file path (for file upload).
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
See also:
ckanutils.get_filestore_update_func
Examples:
>>> ckan = CKAN(quiet=True)
>>> url = 'http://example.com/file'
>>> resource = {'package_id': 'pid'}
>>> kwargs = {'name': 'name', 'url': url, 'format': 'csv'}
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> ckan._update_filestore(res[0], *res[1], **res[2])
Package `pid` was not found.
>>> resource['resource_id'] = 'rid'
>>> res = ckan.get_filestore_update_func(resource, **kwargs)
>>> ckan._update_filestore(res[0], *res[1], **res[2])
Resource `rid` was not found in filestore.
"""
data = kwargs.get('data', {})
files = kwargs.get('files', {})
resource_id = kwargs.get('resource_id', data.get('resource_id'))
package_id = kwargs.get('package_id', data.get('package_id'))
f = kwargs.get('upload', files.get('upload'))
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
r = func(*args, **kwargs) or {'id': None}
except NotFound:
pck_msg = 'Package `%s` was not found.' % package_id
print(err_msg if resource_id else pck_msg)
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
print(err_msg)
r = None
else:
raise err
except requests.exceptions.ConnectionError as err:
if 'Broken pipe' in err.message[1]:
print('File size too large. Try uploading a smaller file.')
r = None
else:
raise err
else:
return r
finally:
f.close() if f else None
def create_resource(self, package_id, **kwargs):
"""Creates a single resource on filestore. You must supply either
`url`, `filepath`, or `fileobj`.
Args:
package_id (str): The filestore package id.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link).
filepath (str): New file path (for file upload).
fileobj (obj): New file like object (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name (defaults to the filename).
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
Raises:
TypeError: If neither `url`, `filepath`, nor `fileobj` are supplied.
Examples:
>>> ckan = CKAN(quiet=True)
>>> ckan.create_resource('pid')
Traceback (most recent call last):
TypeError: You must specify either a `url`, `filepath`, or `fileobj`
>>> ckan.create_resource('pid', url='http://example.com/file')
Package `pid` was not found.
"""
if not any(map(kwargs.get, ['url', 'filepath', 'fileobj'])):
raise TypeError(
'You must specify either a `url`, `filepath`, or `fileobj`')
path = filter(None, map(kwargs.get, ['url', 'filepath', 'fileobj']))[0]
try:
if 'docs.google.com' in path:
def_name = path.split('gid=')[1].split('&')[0]
else:
def_name = p.basename(path)
except AttributeError:
def_name = None
file_format = 'csv'
else:
# copy/pasted from utils... fix later
if 'format=' in path:
file_format = path.split('format=')[1].split('&')[0]
else:
file_format = p.splitext(path)[1].lstrip('.')
kwargs.setdefault('name', def_name)
# Will get `ckan.logic.ValidationError` if url isn't set
kwargs.setdefault('url', 'http://example.com')
kwargs['format'] = file_format
resource = {'package_id': package_id}
if self.verbose:
print('Creating new resource in package %s...' % package_id)
func, args, data = self.get_filestore_update_func(resource, **kwargs)
return self._update_filestore(func, *args, **data)
def update_filestore(self, resource_id, **kwargs):
"""Updates a single resource on filestore.
Args:
resource_id (str): The filestore resource id.
**kwargs: Keyword arguments that are passed to resource_create.
Kwargs:
url (str): New file url (for file link).
filepath (str): New file path (for file upload).
fileobj (obj): New file like object (for file upload).
post (bool): Post data using requests instead of ckanapi.
name (str): The resource name.
description (str): The resource description.
hash (str): The resource hash.
Returns:
obj: requests.Response object if `post` option is specified,
ckan resource object otherwise.
Examples:
>>> CKAN(quiet=True).update_filestore('rid')
Resource `rid` was not found in filestore.
"""
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
print(err_msg)
return None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
else:
resource['package_id'] = self.get_package_id(resource_id)
if self.verbose:
print('Updating resource %s...' % resource_id)
f, args, data = self.get_filestore_update_func(resource, **kwargs)
return self._update_filestore(f, *args, **data)
def update_datastore(self, resource_id, filepath, **kwargs):
verbose = not kwargs.get('quiet')
chunk_rows = kwargs.get('chunksize_rows')
primary_key = kwargs.get('primary_key')
content_type = kwargs.get('content_type')
type_cast = kwargs.get('type_cast')
method = 'upsert' if primary_key else 'insert'
keys = ['aliases', 'primary_key', 'indexes']
try:
extension = p.splitext(filepath)[1].split('.')[1]
except (IndexError, AttributeError):
# no file extension given, e.g., a tempfile
extension = cv.ctype2ext(content_type)
try:
reader = io.get_reader(extension)
except TypeError:
print('Error: plugin for extension `%s` not found!' % extension)
return False
else:
records = reader(filepath, **kwargs)
first = records.next()
keys = first.keys()
records = it.chain([first], records)
if type_cast:
records, results = pr.detect_types(records)
types = results['types']
casted_records = pr.type_cast(records, types)
else:
types = [{'id': key, 'type': 'text'} for key in keys]
casted_records = records
if verbose:
print('Parsed types:')
pprint(types)
create_kwargs = {k: v for k, v in kwargs.items() if k in keys}
if not primary_key:
self.delete_table(resource_id)
insert_kwargs = {'chunksize': chunk_rows, 'method': method}
self.create_table(resource_id, types, **create_kwargs)
args = [resource_id, casted_records]
return self.insert_records(*args, **insert_kwargs)
def find_ids(self, packages, **kwargs):
default = {'rid': '', 'pname': ''}
kwargs.update({'method': self.query, 'default': default})
return pr.find(packages, **kwargs)
def get_package_id(self, resource_id):
"""Gets the package id of a single resource on filestore.
Args:
resource_id (str): The filestore resource id.
Returns:
str: The package id.
Examples:
>>> CKAN(quiet=True).get_package_id('rid')
Resource `rid` was not found in filestore.
"""
err_msg = 'Resource `%s` was not found in filestore.' % resource_id
try:
resource = self.resource_show(id=resource_id)
except NotFound:
print(err_msg)
return None
except ValidationError as err:
if err.error_dict.get('resource_id') == ['Not found: Resource']:
raise NotFound(err_msg)
else:
raise err
else:
revision = self.revision_show(id=resource['revision_id'])
return revision['packages'][0]
def create_hash_table(self, verbose=False):
kwargs = {
'resource_id': self.hash_table_id,
'fields': [
{'id': 'datastore_id', 'type': 'text'},
{'id': 'hash', 'type': 'text'}],
'primary_key': 'datastore_id'
}
if verbose:
print('Creating hash table...')
self.create_table(**kwargs)
def update_hash_table(self, resource_id, resource_hash, verbose=False):
records = [{'datastore_id': resource_id, 'hash': resource_hash}]
if verbose:
print('Updating hash table...')
self.insert_records(self.hash_table_id, records, method='upsert')
def get_update_date(self, item):
timestamps = {
'revision_timestamp': 'revision',
'last_modified': 'resource',
'metadata_modified': 'package'
}
for key, value in timestamps.items():
if key in item:
timestamp = item[key]
item_type = value
break
else:
keys = timestamps.keys()
msg = 'None of the following keys found in item: %s' % keys
raise TypeError(msg)
if not timestamp and item_type == 'resource':
# print('Resource timestamp is empty. Querying revision.')
timestamp = self.revision_show(id=item['revision_id'])['timestamp']
return dt.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
def filter(self, items, tagged=None, named=None, updated=None):
for i in items:
if i['state'] != 'active':
continue
if updated and updated(self.get_update_date(i)):
yield i
continue
if named and named.lower() in i['name'].lower():
yield i
continue
tags = it.imap(itemgetter('name'), i['tags'])
is_tagged = tagged and 'tags' in i
if is_tagged and any(it.ifilter(lambda t: t == tagged, tags)):
yield i
continue
if not (named or tagged or updated):
yield i
def query(self, packages, **kwargs):
pkwargs = {
'named': kwargs.get('pnamed'),
'tagged': kwargs.get('ptagged')}
rkwargs = {
'named': kwargs.get('rnamed'),
'tagged': kwargs.get('rtagged')}
skwargs = {'key': self.get_update_date, 'reverse': True}
filtered_packages = self.filter(packages, **pkwargs)
for pack in sorted(filtered_packages, **skwargs):
package = self.package_show(id=pack['name'])
resources = self.filter(package['resources'], **rkwargs)
for resource in sorted(resources, **skwargs):
yield {'rid': resource['id'], 'pname': package['name']}
| reubano/ckanutils | ckanutils.py | Python | mit | 29,704 |
from django.core import mail
from selenium.webdriver.common.keys import Keys
import re
from .base import FunctionalTest
TEST_EMAIL = 'edith@example.com'
SUBJECT = 'Your login link for Superlists'
class LoginTest(FunctionalTest):
def test_can_get_email_link_to_log_in(self):
# Edith goes to the awesome superlists site
# and notices a "Log in" section in the navbar for the first time
# It's telling her to enter her email address, so she does
self.browser.get(self.live_server_url)
self.browser.find_element_by_name('email').send_keys(TEST_EMAIL)
self.browser.find_element_by_name('email').send_keys(Keys.ENTER)
# A message appears telling her an email has been sent
self.wait_for(lambda: self.assertIn(
'Check your email',
self.browser.find_element_by_tag_name('body').text
))
# She checks her email and finds a message
email = mail.outbox[0]
self.assertIn(TEST_EMAIL, email.to)
self.assertEqual(email.subject, SUBJECT)
# It has a url link in it
self.assertIn('Use this link to log in', email.body)
url_search = re.search(r'http://.+/.+$', email.body)
if not url_search:
self.fail(f'Could not find url in email body:\n{email.body}')
url = url_search.group(0)
self.assertIn(self.live_server_url, url)
# she clicks it
self.browser.get(url)
# she is logged in!
self.wait_for(
lambda: self.browser.find_element_by_link_text('Log out')
)
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertIn(TEST_EMAIL, navbar.text)
# Now she logs out
self.browser.find_element_by_link_text('Log out').click()
# She is logged out
self.wait_for(
lambda: self.browser.find_element_by_name('email')
)
navbar = self.browser.find_element_by_css_selector('.navbar')
self.assertNotIn(TEST_EMAIL, navbar.text)
| jtmolon/obey_the_goat | superlists/functional_tests/test_login.py | Python | gpl-3.0 | 2,044 |
#!/usr/bin/env python
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'build'))
import landmine_utils
platform = landmine_utils.platform # pylint: disable=invalid-name
def print_landmines(): # pylint: disable=invalid-name
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
# DO NOT add landmines as part of a regular CL. Landmines are a last-effort
# bandaid fix if a CL that got landed has a build dependency bug and all bots
# need to be cleaned up. If you're writing a new CL that causes build
# dependency problems, fix the dependency problems instead of adding a
# landmine.
# See the Chromium version in src/build/get_landmines.py for usage examples.
print 'Clobber to remove out/{Debug,Release}/args.gn (webrtc:5070)'
if platform() == 'android':
print ('Clobber to remove artifacts on Android causing lint errors after '
'rolling in https://codereview.webrtc.org/2293863002')
print ('Clobber to remove old AppRTCDemo artifacts after renaming to '
'AppRTCMobile in https://codereview.webrtc.org/2373443005')
print ('Clobber to fix Android x86/x64 builds after '
'https://codereview.webrtc.org/1414343008/')
if platform() == 'win':
print 'Clobber to resolve some issues with corrupt .pdb files on bots.'
print 'Clobber due to corrupt .pdb files (after #14623)'
print 'Clobber due to Win 64-bit Debug linking error (crbug.com/668961)'
print ('Clobber due to Win Clang Debug linking errors in '
'https://codereview.webrtc.org/2786603002')
print ('Clobber due to Win Debug linking errors in '
'https://codereview.webrtc.org/2832063003/')
if platform() == 'mac':
# platform == 'ios' doesn't work since it assumes GYP_DEFINES is set, which
# is no longer the case.
print 'Clobber due to iOS compile errors (crbug.com/694721)'
print 'Clobber to unblock https://codereview.webrtc.org/2709573003'
print ('Clobber to fix https://codereview.webrtc.org/2709573003 after '
'landing')
print ('Clobber to fix https://codereview.webrtc.org/2767383005 before'
'landing (changing rtc_executable -> rtc_test on iOS)')
print ('Clobber to fix https://codereview.webrtc.org/2767383005 before'
'landing (changing rtc_executable -> rtc_test on iOS)')
print 'Another landmine for low_bandwidth_audio_test (webrtc:7430)'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
| wangcy6/storm_app | frame/c++/webrtc-master/tools_webrtc/get_landmines.py | Python | apache-2.0 | 3,131 |
# Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import poplib
from datetime import datetime
import logging
from beeswarm.drones.client.baits.clientbase import ClientBase
logger = logging.getLogger(__name__)
class Pop3s(ClientBase):
def __init__(self, options):
super(Pop3s, self).__init__(options)
def start(self):
"""
Launches a new POP3 client session on the server taken from the `self.options` dict.
:param my_ip: IP of this Client itself
"""
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
try:
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port,
session.id))
conn = poplib.POP3_SSL(server_host, server_port)
session.source_port = conn.sock.getsockname()[1]
banner = conn.getwelcome()
session.protocol_data['banner'] = banner
session.did_connect = True
conn.user(username)
conn.pass_(password)
# TODO: Handle failed login
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
session.timestamp = datetime.utcnow()
except Exception as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
else:
list_entries = conn.list()[1]
for entry in list_entries:
index, _ = entry.split(' ')
conn.retr(index)
conn.dele(index)
logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host))
conn.quit()
session.did_complete = True
finally:
session.alldone = True
session.end_session()
| honeynet/beeswarm | beeswarm/drones/client/baits/pop3s.py | Python | gpl-3.0 | 2,819 |
import argparse
import idigbio
import json
import requests
parser = argparse.ArgumentParser(description='Read list of values from file and search for records matching in specified field.')
parser.add_argument('-i', '--inputfile', dest='inputfile', required=True, help="Input file must be one value per line.")
parser.add_argument('-f', '--field', dest='field', default='scientificname', help="The specified field must be an iDigbio indexed term.")
parser.add_argument('--header-row', dest='header_row', default=False, action='store_true', help="Use this option if the first line of the input file is a header row.")
parser.add_argument('--stop-count', dest='stopcount', type=int, help="Stop reading inputfile after this many rows. Default: 10")
args = parser.parse_args()
inputfile = args.inputfile
field = args.field
header_row = args.header_row
if args.stopcount:
# seem to hit issues if stopcount > 7000
stopcount=args.stopcount
else:
stopcount = 10
inputset = set()
header_needs_skipped = True
count = 0
with open(inputfile, 'r') as f:
for line in f:
if header_needs_skipped and header_row:
header_needs_skipped = False
else:
inputset.add(line)
count += 1
if count >= stopcount:
break
api = idigbio.json()
answer = dict()
fields = ["uuid", "genus", "specificepithet", "geopoint", "country", "stateprovince", "county", "municipality"]
query = {}
values = list()
place = 0
########## DO ON MONDAY ---- try to get "fields" to work
while len(inputset) > 0:
while (place < 100) and len(inputset) > 0:
value = inputset.pop()
values.append(value.lower().strip())
place += 1
query = { field : values}
query_as_string = json.dumps( { field : values })
# print query
print query_as_string
# record_list =
# answer.append(api.search_records(rq=query))
r = requests.post('http://beta-search.idigbio.org/v2/search/records/',data=query_as_string, headers={'content-type': 'application/json'})
response_json = r.json()
for item in response_json["items"]:
item_uuid = item["indexTerms"]["uuid"]
answer[item_uuid]=[]
for key in fields:
if key in item["indexTerms"]:
answer[item_uuid].append[item["indexTerms"][key]
print type(answer[item_uuid])
#print query
# print answer
values=list()
place = 0
break
#print answer
#query_as_string = '{"query" : "rq":{"scientificname":"puma concolor"},"limit":10},"email":"dstoner"}'
#q_json = json.dumps(query_as_string)
#r = requests.post('http://csv.idigbio.org/', data=json.loads(q_json), headers={'content-type': 'application/json'})
#print r.text
#r = requests.post('http://search.idigbio.org/idigbio/records/_search', data=json.loads(q_json), headers={'content-type': 'application/json'})
raise SystemExit
api = idigbio.json()
#record_list = api.search_records(rq={"genus":["abelia","abelmoschus"]})
record_list = api.search_records(rq=query)
## output specification:
# Genus, specific epithet, lat, long, locality (all fields so that I could
# have country, state, county, etc. so that we can check the lat/long
# against the field)
#print record_list["itemCount"]
raise SystemExit
for item in record_list["items"]:
for key in item["indexTerms"]:
if key == "genus":
print item["indexTerms"][key]
| danstoner/idigbio-scratch | pam_10k/pam_10k.py | Python | mit | 3,435 |
from MCNPtools import Gen
# example usage of the module
# first you initialize the tally by defining the bins: segment (surface number), angle (cosine) and energy (MeV)
cos = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
seg = [666,667]
erg =[1.000E-10, 1.259E-10, 1.585E-10, 1.995E-10, 2.512E-10, 3.162E-10,
3.981E-10, 5.012E-10, 6.310E-10, 7.943E-10, 1.000E-09, 1.259E-09,
1.585E-09, 1.995E-09, 2.512E-09, 3.162E-09, 3.981E-09, 5.012E-09,
6.310E-09, 7.943E-09, 1.000E-08, 1.259E-08, 1.585E-08, 1.995E-08,
2.512E-08, 3.162E-08, 3.981E-08, 5.012E-08, 6.310E-08, 7.943E-08,
1.000E-07, 1.259E-07, 1.585E-07, 1.995E-07, 2.512E-07, 3.162E-07,
3.981E-07, 5.012E-07, 6.310E-07, 7.943E-07, 1.000E-06, 1.259E-06,
1.585E-06, 1.995E-06, 2.512E-06, 3.162E-06, 3.981E-06, 5.012E-06,
6.310E-06, 7.943E-06, 1.000E-05, 1.259E-05, 1.585E-05, 1.995E-05,
2.512E-05, 3.162E-05, 3.981E-05, 5.012E-05, 6.310E-05, 7.943E-05,
1.000E-04, 1.259E-04, 1.585E-04, 1.995E-04, 2.512E-04, 3.162E-04,
3.981E-04, 5.012E-04, 6.310E-04, 7.943E-04, 1.000E-03, 1.259E-03,
1.585E-03, 1.995E-03, 2.512E-03, 3.162E-03, 3.981E-03, 5.012E-03,
6.310E-03, 7.943E-03, 1.000E-02, 1.259E-02, 1.585E-02, 1.995E-02,
2.512E-02, 3.162E-02, 3.981E-02, 5.012E-02, 6.310E-02, 7.943E-02,
1.000E-01, 1.259E-01, 1.585E-01, 1.995E-01, 2.512E-01, 3.162E-01,
3.981E-01, 5.012E-01, 6.310E-01, 7.943E-01, 1.000E+00, 1.259E+00,
1.585E+00, 1.995E+00, 2.512E+00, 3.162E+00, 3.981E+00, 5.012E+00,
6.310E+00, 7.943E+00, 1.000E+01, 1.259E+01, 1.585E+01, 1.995E+01]
myTally = Gen.Tally(seg,cos,erg)
# Once the object is initlialized it can be used to print out the tally needed by MCNP giving:
# surface -> 999
# title -> test
# normFactor -> 1
myTally.PrintTally(681,"test",9.6E+13)
| ipostuma/MCNPtools | WriteTally.py | Python | gpl-3.0 | 1,830 |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="layout.polar.angularaxis.tickfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/layout/polar/angularaxis/tickfont/_size.py | Python | mit | 535 |
#!/usr/bin/python
i = 0
numbers = []
while i < 6:
print "At the top i is %d" % i
numbers.append(i)
i += 1
print "Numbers now: ", numbers
print "At the bottom i is %d" % i
print "the Numbers: "
for num in numbers:
print num
| hackinginformation/learnPythonTheHardWay | ex33.py | Python | gpl-2.0 | 252 |
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
import os
import pickle
import zipfile
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
import logging
rootLogger = logging.getLogger(__name__)
# ****************************************************************************
# BEGIN POST-PROCESSING ...
# ****************************************************************************
def calc_tick_vals(val_list, xstep=0.1):
num_ticks = int(round(len(val_list)/xstep)) + 1
if num_ticks>12 and num_ticks<=20:
xstep = 0.2
num_ticks = int(round(len(val_list)/xstep)) + 1
elif num_ticks>20:
num_ticks = 11
tick_labels = val_list[::(num_ticks-1)]
if type(tick_labels[0])==float:
tick_labels = ['{:.3f}'.format(val) for val in tick_labels]
return tick_labels
def plot_mean_econ_loss(scenario, economic_loss_array, hazards):
"""Draws and saves a boxplot of mean economic loss"""
hazvals_ext = [[str(i)] * scenario.num_samples
for i in hazards.hazard_scenario_list]
x1 = np.ndarray.flatten(np.array(hazvals_ext))
smpl = list(range(1, scenario.num_samples + 1, 1))
x2 = np.array(smpl * hazards.num_hazard_pts)
arrays = [x1, x2]
econ_loss = np.array(economic_loss_array)
econ_loss = np.ndarray.flatten(econ_loss.transpose())
econ_loss_flat = np.ndarray.flatten(econ_loss)
econ_loss_df = pd.DataFrame(econ_loss_flat, index=arrays)
econ_loss_df.index.names = ['Hazard Intensity', 'Sample Num']
econ_loss_df.columns = ['Econ Loss Ratio']
fig = plt.figure(figsize=(9, 5), facecolor='white')
sns.set(style='ticks', palette='Set2')
ax = sns.boxplot(x=x1, y='Econ Loss Ratio',
data=econ_loss_df,
linewidth=0.8, color='whitesmoke',
showmeans=True,
showfliers=True,
meanprops=dict(marker='o',
markeredgecolor='coral',
markerfacecolor='coral')
)
sns.despine(bottom=False, top=True, left=True, right=True,
offset=None, trim=True)
ax.spines['bottom'].set_linewidth(0.8)
ax.spines['bottom'].set_color('#555555')
ax.spines['bottom'].set_position(('axes', 0.0))
ax.yaxis.grid(True, which="major", linestyle='-',
linewidth=0.4, color='#B6B6B6')
ax.tick_params(axis='x', bottom=True, top=False,
width=0.8, labelsize=8, color='#555555')
ax.tick_params(axis='y', left=False, right=False,
width=0.8, labelsize=8, color='#555555')
hazard_scenario_list = hazards.hazard_scenario_list
xtick_labels = calc_tick_vals(hazard_scenario_list)
xtick_pos = []
for val in xtick_labels:
xtick_pos.append(hazard_scenario_list.index(val))
intensity_label = hazards.intensity_measure_param+' ('+\
hazards.intensity_measure_unit+')'
ax.set_xticks(xtick_pos)
ax.set_xticklabels(xtick_labels, rotation='vertical')
ax.set_xlabel(intensity_label, labelpad=9, size=10)
ax.set_yticks(np.linspace(0.0, 1.0, 11, endpoint=True))
ax.set_ylabel('Loss Fraction (%)', labelpad=9, size=10)
ax.set_title('Loss Ratio', loc='center', y=1.04,fontsize=12, weight='bold')
figfile = os.path.join(scenario.output_path, 'fig_lossratio_boxplot.png')
plt.margins(0.05)
plt.savefig(figfile, format='png', bbox_inches='tight', dpi=300)
plt.close(fig)
def write_system_response(response_list, infrastructure, scenario, hazards):
# ------------------------------------------------------------------------
# 'ids_comp_vs_haz' is a dict of numpy arrays
# We pickle it for archival. But the file size can get very large.
# So we zip it for archival and delete the original
# ------------------------------------------------------------------------
idshaz = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.pickle')
haz_vs_ds_index_of_comp = response_list[0]
with open(idshaz, 'wb') as handle:
for response_key in sorted(haz_vs_ds_index_of_comp.keys()):
pickle.dump(
{response_key: haz_vs_ds_index_of_comp[response_key]},
handle
)
idshaz_zip = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.zip')
zf = zipfile.ZipFile(idshaz_zip, mode='w', allowZip64=True)
zf.write(idshaz, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(idshaz)
# ------------------------------------------------------------------------
# System output file (for given hazard transfer parameter value)
# ------------------------------------------------------------------------
sys_output_dict = response_list[1]
sod_pkl = os.path.join(scenario.raw_output_dir,
'sys_output_dict.pickle')
with open(sod_pkl, 'wb') as handle:
for response_key in sorted(sys_output_dict.keys()):
pickle.dump(
{response_key: sys_output_dict[response_key]},
handle
)
sys_output_df = pd.DataFrame(sys_output_dict)
sys_output_df = sys_output_df.transpose()
sys_output_df.index.name = 'Hazard Intensity'
outfile_sysoutput = os.path.join(scenario.output_path,
'system_output_vs_haz_intensity.csv')
sys_output_df.to_csv(outfile_sysoutput,
sep=',',
index_label=[sys_output_df.index.name])
# ------------------------------------------------------------------------
# Hazard response for component instances, i.e. components as-installed
# ------------------------------------------------------------------------
component_resp_dict = response_list[2]
crd_pkl = os.path.join(scenario.raw_output_dir,'component_resp_dict.pickle')
with open(crd_pkl, 'wb') as handle:
for response_key in sorted(component_resp_dict.keys()):
pickle.dump(
{response_key: component_resp_dict[response_key]},
handle
)
# ------------------------------------------------------------------------
# Hazard response for component types
# ------------------------------------------------------------------------
comptype_resp_dict = response_list[3]
outfile_comptype_resp = os.path.join(
scenario.output_path, 'comptype_response.csv')
comptype_resp_df = pd.DataFrame(comptype_resp_dict)
comptype_resp_df.index.names = ['component_type', 'response']
comptype_resp_df.to_csv(
outfile_comptype_resp, sep=',',
index_label=['component_type', 'response'])
# ------------------------------------------------------------------------
# Calculating system fragility:
# ------------------------------------------------------------------------
# infrastructure econ loss for sample
economic_loss_array = response_list[5]
sys_frag = np.zeros_like(economic_loss_array, dtype=int)
sys_damage_state_bounds = infrastructure.get_dmg_scale_bounds()
for j, hazard_level in enumerate(hazards.hazard_scenario_list):
for i in range(scenario.num_samples):
# system output and economic loss
sys_frag[i, j] = \
np.sum(economic_loss_array[i, j] > sys_damage_state_bounds)
# Calculating Probability of Exceedence:
pe_sys_econloss = np.zeros(
(len(infrastructure.get_system_damage_states()),
hazards.num_hazard_pts)
)
for j in range(hazards.num_hazard_pts):
for i in range(len(infrastructure.get_system_damage_states())):
pe_sys_econloss[i, j] = \
np.sum(sys_frag[:, j] >= i) / float(scenario.num_samples)
compcls_dmg_level_percentages = response_list[6]
comp_class_list = infrastructure.get_component_classes()
pe_sys_classdmg = np.zeros(
(len(infrastructure.get_system_damage_states()),
hazards.num_hazard_pts)
)
###########################################################################
# print("****************************")
# print('compcls_dmg_level_percentages')
# pp.pprint(compcls_dmg_level_percentages)
# for j in range(hazards.num_hazard_pts):
# for i in range(len(infrastructure.get_system_damage_states())):
# pe_sys_classdmg[i, j] = \
#
###########################################################################
np.save(os.path.join(scenario.raw_output_dir, 'sys_frag.npy'), sys_frag)
np.save(os.path.join(scenario.raw_output_dir, 'pe_sys_econloss.npy'),
pe_sys_econloss)
# ------------------------------------------------------------------------------
def pe_by_component_class(response_list, infrastructure, scenario, hazards):
"""
Calculated probability of exceedence based on component classes
:param response_list:
:param infrastructure:
:param scenario:
:param hazard:
:return:
"""
# ------------------------------------------------------------------------
# For Probability of Exceedence calculations based on component failures
# Damage state boundaries for Component Type Failures (Substations) are
# based on HAZUS MH MR3, p 8-66 to 8-68
# ------------------------------------------------------------------------
cp_classes_in_system = np.unique(list(infrastructure.
get_component_class_list()))
cp_class_map = {k: [] for k in cp_classes_in_system}
for comp_id, component in list(infrastructure.components.items()):
cp_class_map[component.component_class].append(component)
if infrastructure.system_class == 'Substation':
cp_classes_costed = \
[x for x in cp_classes_in_system
if x not in infrastructure.uncosted_classes]
# --- System fragility - Based on Failure of Component Classes ---
comp_class_failures = \
{cc: np.zeros((scenario.num_samples, hazards.num_hazard_pts))
for cc in cp_classes_costed}
comp_class_frag = \
{cc: np.zeros((scenario.num_samples, hazards.num_hazard_pts))
for cc in cp_classes_costed}
# TODO check or correctness
# for j, hazard_level in enumerate(hazard.hazard_range):
# for i in range(scenario.num_samples):
# for compclass in cp_classes_costed:
# for c in cp_class_map[compclass]:
# comp_class_failures[compclass][i, j] += \
# response_list[hazard_level.hazard_intensity]\
# [i, infrastructure.components[c]]
# comp_class_failures[compclass][i, j] /= \
# len(cp_class_map[compclass])
#
# comp_class_frag[compclass][i, j] = \
# np.sum(comp_class_failures[compclass][i, j] > \
# infrastructure.ds_lims_compclasses[compclass])
for j, (scenario_name, hazard_data) in \
enumerate(hazards.scenario_hazard_data.items()):
for i in range(scenario.num_samples):
for compclass in cp_classes_costed:
for comptype in cp_class_map[compclass]:
comp_ndx = list(infrastructure.components.keys()).\
index(comptype.component_id)
# -----------------------------------------------------
if response_list[0][scenario_name][i, comp_ndx] >= 2:
comp_class_failures[compclass][i, j] += 1
# comp_class_failures[compclass][i, j] += \
# response_list[0][scenario_name][i, comp_ndx]
# -----------------------------------------------------
comp_class_failures[compclass][i, j] /= \
len(cp_class_map[compclass])
comp_class_frag[compclass][i, j] = \
np.sum(comp_class_failures[compclass][i, j] > \
infrastructure.ds_lims_compclasses[compclass])
# Probability of Exceedence -- Based on Failure of Component Classes
pe_sys_cpfailrate = np.zeros(
(len(infrastructure.sys_dmg_states), hazards.num_hazard_pts)
)
for p in range(hazards.num_hazard_pts):
for d in range(len(infrastructure.sys_dmg_states)):
ds_ss_ix = []
for compclass in cp_classes_costed:
ds_ss_ix.append(
np.sum(comp_class_frag[compclass][:, p] >= d) /
float(scenario.num_samples)
)
pe_sys_cpfailrate[d, p] = np.median(ds_ss_ix)
# --- Save prob exceedance data as npy ---
np.save(os.path.join(scenario.raw_output_dir, 'pe_sys_cpfailrate.npy'),
pe_sys_cpfailrate)
# ------------------------------------------------------------------------
# Validate damage ratio of the system
# ------------------------------------------------------------------------
exp_damage_ratio = np.zeros((len(infrastructure.components),
hazards.num_hazard_pts))
for l, hazard in enumerate(hazards.listOfhazards):
# compute expected damage ratio
for j, component in enumerate(infrastructure.components.values()):
# TODO remove invalid Component accesses !!
component_pe_ds = np.zeros(len(component.damage_states))
for damage_state_index in component.damage_states.keys():
x_loc, y_loc = component.get_location()
hazard_intensity \
= hazard.get_hazard_intensity_at_location(x_loc, y_loc)
component_pe_ds[damage_state_index] \
= component.damage_states[damage_state_index].\
response_function(hazard_intensity)
component_pe_ds = component_pe_ds[1:]
pb = pe2pb(component_pe_ds)
dr = np.array([component.damage_states[int(ds)].damage_ratio
for ds in range(len(component.damage_states))])
cf = component.cost_fraction
loss_list = dr * cf
exp_damage_ratio[j, l] = np.sum(pb * loss_list)
# ------------------------------------------------------------------------
# Write analytical outputs to file
# ------------------------------------------------------------------------
# --- Output File --- summary output ---
outfile_sys_response = os.path.join(
scenario.output_path, 'system_response.csv')
out_cols = ['INTENSITY_MEASURE',
'Economic Loss',
'Mean Output']
# create the arrays
comp_response_list = response_list[2]
economic_loss_array = response_list[5]
calculated_output_array = response_list[4]
outdat = {out_cols[0]: hazards.hazard_scenario_list,
out_cols[1]: np.mean(economic_loss_array, axis=0),
out_cols[2]: np.mean(calculated_output_array, axis=0)}
df = pd.DataFrame(outdat)
df.to_csv(
outfile_sys_response, sep=',',
index=False, columns=out_cols
)
# --- Output File --- response of each COMPONENT to hazard ---
outfile_comp_resp = os.path.join(scenario.output_path,
'component_response.csv')
component_resp_df = pd.DataFrame(comp_response_list)
component_resp_df.index.names = ['component_id', 'response']
component_resp_df.columns = hazards.hazard_scenario_name
component_resp_df.to_csv(
outfile_comp_resp, sep=',',
index_label=['component_id', 'response']
)
# --- Output File --- mean loss of component ---
outfile_comp_loss = os.path.join(scenario.output_path,
'component_meanloss.csv')
component_loss_df = component_resp_df.iloc\
[component_resp_df.index.get_level_values(1) == 'loss_mean']
component_loss_df.reset_index(level='response', inplace=True)
component_loss_df = component_loss_df.drop('response', axis=1)
component_loss_df.to_csv(
outfile_comp_loss, sep=',',
index_label=['component_id']
)
# # --- Output File --- DataFrame of mean failures per component CLASS ---
# outfile_compclass_failures = os.path.join(
# output_path, 'comp_class_meanfailures.csv')
# compclass_failure_df.to_csv(outfile_compclass_failures, sep=',',
# index_label=['component_class'])
# ------------------------------------------------------------------------
# *** Saving vars ***
# ------------------------------------------------------------------------
if scenario.save_vars_npy:
np.save(
os.path.join(scenario.raw_output_dir,
'economic_loss_array.npy'),
economic_loss_array
)
np.save(
os.path.join(scenario.raw_output_dir,
'calculated_output_array.npy'),
calculated_output_array
)
np.save(
os.path.join(scenario.raw_output_dir,
'exp_damage_ratio.npy'),
exp_damage_ratio
)
# ... END POST-PROCESSING
# **************************************************************************
def pe2pb(pe):
"""
Convert probability of excedence of damage states, to
probability of being in each discrete damage state
"""
# sorted array: from max to min
pex = np.sort(pe)[::-1]
tmp = -1.0 * np.diff(pex)
pb = np.append(tmp, pex[-1])
pb = np.insert(pb, 0, 1 - pex[0])
return pb
| GeoscienceAustralia/sifra | sira/infrastructure_response.py | Python | apache-2.0 | 18,123 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AccessPolicyRole(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""A role defining the data plane operations that a principal can perform on a Time Series
Insights client.
"""
READER = "Reader"
CONTRIBUTOR = "Contributor"
class DataStringComparisonBehavior(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reference data set key comparison behavior can be set using this property. By default, the
value is 'Ordinal' - which means case sensitive key comparison will be performed while joining
reference data with events or while adding new reference data. When 'OrdinalIgnoreCase' is set,
case insensitive comparison will be used.
"""
ORDINAL = "Ordinal"
ORDINAL_IGNORE_CASE = "OrdinalIgnoreCase"
class EnvironmentKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the environment.
"""
GEN1 = "Gen1"
GEN2 = "Gen2"
class EnvironmentResourceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the environment.
"""
GEN1 = "Gen1"
GEN2 = "Gen2"
class EventSourceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the event source.
"""
MICROSOFT_EVENT_HUB = "Microsoft.EventHub"
MICROSOFT_IO_T_HUB = "Microsoft.IoTHub"
class EventSourceResourceKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of the event source.
"""
MICROSOFT_EVENT_HUB = "Microsoft.EventHub"
MICROSOFT_IO_T_HUB = "Microsoft.IoTHub"
class IngressState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""This string represents the state of ingress operations on an environment. It can be "Disabled",
"Ready", "Running", "Paused" or "Unknown"
"""
DISABLED = "Disabled"
READY = "Ready"
RUNNING = "Running"
PAUSED = "Paused"
UNKNOWN = "Unknown"
class LocalTimestampFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum that represents the format of the local timestamp property that needs to be set.
"""
EMBEDDED = "Embedded"
class PropertyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the property.
"""
STRING = "String"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the resource.
"""
ACCEPTED = "Accepted"
CREATING = "Creating"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
DELETING = "Deleting"
class ReferenceDataKeyPropertyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the key property.
"""
STRING = "String"
DOUBLE = "Double"
BOOL = "Bool"
DATE_TIME = "DateTime"
class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The name of this SKU.
"""
S1 = "S1"
S2 = "S2"
P1 = "P1"
L1 = "L1"
class StorageLimitExceededBehavior(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The behavior the Time Series Insights service should take when the environment's capacity has
been exceeded. If "PauseIngress" is specified, new events will not be read from the event
source. If "PurgeOldData" is specified, new events will continue to be read and old events will
be deleted from the environment. The default behavior is PurgeOldData.
"""
PURGE_OLD_DATA = "PurgeOldData"
PAUSE_INGRESS = "PauseIngress"
class WarmStoragePropertiesState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""This string represents the state of warm storage properties usage. It can be "Ok", "Error",
"Unknown".
"""
OK = "Ok"
ERROR = "Error"
UNKNOWN = "Unknown"
| Azure/azure-sdk-for-python | sdk/timeseriesinsights/azure-mgmt-timeseriesinsights/azure/mgmt/timeseriesinsights/models/_time_series_insights_client_enums.py | Python | mit | 4,853 |
"""
DIRAC Wrapper to execute python and system commands with a wrapper, that might
set a timeout.
3 FUNCTIONS are provided:
- shellCall( iTimeOut, cmdSeq, callbackFunction = None, env = None ):
it uses subprocess.Popen class with "shell = True".
If cmdSeq is a string, it specifies the command string to execute through
the shell. If cmdSeq is a sequence, the first item specifies the command
string, and any additional items will be treated as additional shell arguments.
- systemCall( iTimeOut, cmdSeq, callbackFunction = None, env = None ):
it uses subprocess.Popen class with "shell = False".
cmdSeq should be a string, or a sequence of program arguments.
stderr and stdout are piped. callbackFunction( pipeId, line ) can be
defined to process the stdout (pipeId = 0) and stderr (pipeId = 1) as
they are produced
They return a DIRAC.ReturnValue dictionary with a tuple in Value
( returncode, stdout, stderr ) the tuple will also be available upon
timeout error or buffer overflow error.
- pythonCall( iTimeOut, function, \*stArgs, \*\*stKeyArgs )
calls function with given arguments within a timeout Wrapper
should be used to wrap third party python functions
"""
from multiprocessing import Process, Manager
import threading
import time
import select
import os
import sys
import types
import subprocess
import signal
# Very Important:
# Here we can not import directly from DIRAC, since this file it is imported
# at initialization time therefore the full path is necessary
# from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
# from DIRAC import gLogger
from DIRAC.FrameworkSystem.Client.Logger import gLogger
__RCSID__ = "$Id$"
USE_WATCHDOG = False
class Watchdog(object):
"""
.. class Watchdog
timeout watchdog decorator
"""
def __init__(self, func, args=None, kwargs=None):
""" c'tor """
self.func = func if callable(func) else None
self.args = args if args else tuple()
self.kwargs = kwargs if kwargs else {}
self.start = self.end = self.pid = None
self.rwEvent = threading.Event()
self.rwEvent.clear()
self.__watchdogThread = None
self.manager = Manager()
self.s_ok_error = self.manager.dict()
self.__executor = Process(target=self.run_func, args=(self.s_ok_error, ))
def run_func(self, s_ok_error):
""" subprocess target
:param Pipe pipe: pipe used for communication
"""
try:
ret = self.func(*self.args, **self.kwargs)
# set rw event
self.rwEvent.set()
for k in ret:
s_ok_error[k] = ret[k]
except Exception as error:
s_ok_error["OK"] = False
s_ok_error["Message"] = str(error)
finally:
# clear rw event
self.rwEvent.clear()
def watchdog(self):
""" watchdog thread target """
while True:
if self.rwEvent.is_set() or time.time() < self.end:
time.sleep(5)
else:
break
if not self.__executor.is_alive():
return
else:
# wait until r/w operation finishes
while self.rwEvent.is_set():
time.sleep(5)
continue
# SIGTERM
os.kill(self.pid, signal.SIGTERM)
time.sleep(5)
# SIGKILL
if self.__executor.is_alive():
os.kill(self.pid, signal.SIGKILL)
def __call__(self, timeout=0):
""" decorator execution """
timeout = int(timeout)
ret = {"OK": True, "Value": ""}
if timeout:
self.start = int(time.time())
self.end = self.start + timeout + 2
self.__watchdogThread = threading.Thread(target=self.watchdog)
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
ret = {"OK": False, "Message": "Timeout after %s seconds" % timeout,
"Value": (1, '', '')}
try:
self.__executor.start()
time.sleep(0.5)
self.pid = self.__executor.pid
if timeout:
self.__executor.join(timeout)
else:
self.__executor.join()
# get results if any, block watchdog by setting rwEvent
if not self.__executor.is_alive():
self.rwEvent.set()
for k in self.s_ok_error.keys():
ret[k] = self.s_ok_error[k]
self.rwEvent.clear()
except Exception as error:
return {"OK": False, "Message": str(error),
"Value": (2, '', '')}
return ret
class Subprocess:
"""
.. class:: Subprocess
"""
def __init__(self, timeout=False, bufferLimit=52428800):
""" c'tor
:param int timeout: timeout in seconds
:param int bufferLimit: buffer size, default 5MB
"""
self.log = gLogger.getSubLogger('Subprocess')
self.timeout = False
try:
self.changeTimeout(timeout)
self.bufferLimit = int(bufferLimit) # 5MB limit for data
except Exception as x:
self.log.exception('Failed initialisation of Subprocess object')
raise x
self.child = None
self.childPID = 0
self.childKilled = False
self.callback = None
self.bufferList = []
self.cmdSeq = []
def changeTimeout(self, timeout):
""" set the time out limit to :timeout: seconds
:param int timeout: time out in seconds
"""
self.timeout = int(timeout)
if self.timeout == 0:
self.timeout = False
#self.log.debug( 'Timeout set to', timeout )
def __readFromFD(self, fd, baseLength=0):
""" read from file descriptior :fd:
:param fd: file descriptior
:param int baseLength: ???
"""
dataString = ''
redBuf = " "
while len(redBuf) > 0:
redBuf = os.read(fd, 8192)
dataString += redBuf
if len(dataString) + baseLength > self.bufferLimit:
self.log.error('Maximum output buffer length reached',
"First and last data in buffer: \n%s \n....\n %s " % (dataString[:100], dataString[-100:]))
retDict = S_ERROR('Reached maximum allowed length (%d bytes) '
'for called function return value' % self.bufferLimit)
retDict['Value'] = dataString
return retDict
return S_OK(dataString)
def __executePythonFunction(self, function, writePipe, *stArgs, **stKeyArgs):
"""
execute function :funtion: using :stArgs: and :stKeyArgs:
"""
from DIRAC.Core.Utilities import DEncode
try:
os.write(writePipe, DEncode.encode(S_OK(function(*stArgs, **stKeyArgs))))
except OSError as x:
if str(x) == '[Errno 32] Broken pipe':
# the parent has died
pass
except Exception as x:
self.log.exception('Exception while executing', function.__name__)
os.write(writePipe, DEncode.encode(S_ERROR(str(x))))
# HACK: Allow some time to flush logs
time.sleep(1)
try:
os.close(writePipe)
finally:
os._exit(0)
def __selectFD(self, readSeq, timeout=False):
""" select file descriptor from :readSeq: list """
validList = []
for fd in readSeq:
try:
os.fstat(fd)
validList.append(fd)
except OSError:
pass
if not validList:
return False
if self.timeout and not timeout:
timeout = self.timeout
if not timeout:
return select.select(validList, [], [])[0]
return select.select(validList, [], [], timeout)[0]
def __killPid(self, pid, sig=9):
""" send signal :sig: to process :pid:
:param int pid: process id
:param int sig: signal to send, default 9 (SIGKILL)
"""
try:
os.kill(pid, sig)
except Exception as x:
if str(x) != '[Errno 3] No such process':
self.log.exception('Exception while killing timed out process')
raise x
def __poll(self, pid):
""" wait for :pid: """
try:
return os.waitpid(pid, os.WNOHANG)
except os.error:
if self.childKilled:
return False
return None
def killChild(self, recursive=True):
""" kill child process
:param boolean recursive: flag to kill all descendants
"""
if self.childPID < 1:
self.log.error("Could not kill child", "Child PID is %s" % self.childPID)
return - 1
os.kill(self.childPID, signal.SIGSTOP)
if recursive:
for gcpid in getChildrenPIDs(self.childPID, lambda cpid: os.kill(cpid, signal.SIGSTOP)):
try:
os.kill(gcpid, signal.SIGKILL)
self.__poll(gcpid)
except Exception:
pass
self.__killPid(self.childPID)
# HACK to avoid python bug
# self.child.wait()
exitStatus = self.__poll(self.childPID)
i = 0
while exitStatus is None and i < 1000:
i += 1
time.sleep(0.000001)
exitStatus = self.__poll(self.childPID)
try:
exitStatus = os.waitpid(self.childPID, 0)
except os.error:
pass
self.childKilled = True
if exitStatus is None:
return exitStatus
return exitStatus[1]
def pythonCall(self, function, *stArgs, **stKeyArgs):
""" call python function :function: with :stArgs: and :stKeyArgs: """
from DIRAC.Core.Utilities import DEncode
self.log.verbose('pythonCall:', function.__name__)
readFD, writeFD = os.pipe()
pid = os.fork()
self.childPID = pid
if pid == 0:
os.close(readFD)
self.__executePythonFunction(function, writeFD, *stArgs, **stKeyArgs)
# FIXME: the close it is done at __executePythonFunction, do we need it here?
os.close(writeFD)
else:
os.close(writeFD)
readSeq = self.__selectFD([readFD])
if not readSeq:
return S_ERROR("Can't read from call %s" % (function.__name__))
try:
if len(readSeq) == 0:
self.log.debug('Timeout limit reached for pythonCall', function.__name__)
self.__killPid(pid)
# HACK to avoid python bug
# self.wait()
retries = 10000
while os.waitpid(pid, 0) == -1 and retries > 0:
time.sleep(0.001)
retries -= 1
return S_ERROR('%d seconds timeout for "%s" call' % (self.timeout, function.__name__))
elif readSeq[0] == readFD:
retDict = self.__readFromFD(readFD)
os.waitpid(pid, 0)
if retDict['OK']:
dataStub = retDict['Value']
if not dataStub:
return S_ERROR("Error decoding data coming from call")
retObj, stubLen = DEncode.decode(dataStub)
if stubLen == len(dataStub):
return retObj
return S_ERROR("Error decoding data coming from call")
return retDict
finally:
os.close(readFD)
def __generateSystemCommandError(self, exitStatus, message):
""" create system command error
:param int exitStatus: exist status
:param str message: error message
:return: S_ERROR with additional 'Value' tuple ( existStatus, stdoutBuf, stderrBuf )
"""
retDict = S_ERROR(message)
retDict['Value'] = (exitStatus,
self.bufferList[0][0],
self.bufferList[1][0])
return retDict
def __readFromFile(self, fd, baseLength):
""" read from file descriptor :fd: and save it to the dedicated buffer
"""
try:
dataString = ""
fn = fd.fileno()
while fd in select.select([fd], [], [], 1)[0]:
if isinstance(fn, int):
nB = os.read(fn, self.bufferLimit)
else:
nB = fd.read(1)
if nB == "":
break
dataString += nB
# break out of potential infinite loop, indicated by dataString growing beyond reason
if len(dataString) + baseLength > self.bufferLimit:
self.log.error("DataString is getting too long (%s): %s " % (len(dataString), dataString[-10000:]))
break
except Exception as x:
self.log.exception("SUBPROCESS: readFromFile exception")
try:
self.log.error('Error reading', 'type(nB) =%s' % type(nB))
self.log.error('Error reading', 'nB =%s' % str(nB))
except Exception:
pass
return S_ERROR('Can not read from output: %s' % str(x))
if len(dataString) + baseLength > self.bufferLimit:
self.log.error('Maximum output buffer length reached')
retDict = S_ERROR('Reached maximum allowed length (%d bytes) for called '
'function return value' % self.bufferLimit)
retDict['Value'] = dataString
return retDict
return S_OK(dataString)
def __readFromSystemCommandOutput(self, fd, bufferIndex):
""" read stdout from file descriptor :fd: """
retDict = self.__readFromFile(fd,
len(self.bufferList[bufferIndex][0]))
if retDict['OK']:
self.bufferList[bufferIndex][0] += retDict['Value']
if self.callback is not None:
while self.__callLineCallback(bufferIndex):
pass
return S_OK()
else: # buffer size limit reached killing process (see comment on __readFromFile)
exitStatus = self.killChild()
return self.__generateSystemCommandError(exitStatus,
"%s for '%s' call" % (retDict['Message'], self.cmdSeq))
def systemCall(self, cmdSeq, callbackFunction=None, shell=False, env=None):
""" system call (no shell) - execute :cmdSeq: """
if shell:
self.log.verbose('shellCall:', cmdSeq)
else:
self.log.verbose('systemCall:', cmdSeq)
self.cmdSeq = cmdSeq
self.callback = callbackFunction
if sys.platform.find("win") == 0:
closefd = False
else:
closefd = True
try:
self.child = subprocess.Popen(self.cmdSeq,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=closefd,
env=env)
self.childPID = self.child.pid
except OSError as v:
retDict = S_ERROR(v)
retDict['Value'] = (-1, '', str(v))
return retDict
except Exception as x:
try:
self.child.stdout.close()
self.child.stderr.close()
except Exception:
pass
retDict = S_ERROR(x)
retDict['Value'] = (-1, '', str(x))
return retDict
try:
self.bufferList = [["", 0], ["", 0]]
initialTime = time.time()
exitStatus = self.__poll(self.child.pid)
while (0, 0) == exitStatus or exitStatus is None:
retDict = self.__readFromCommand()
if not retDict['OK']:
return retDict
if self.timeout and time.time() - initialTime > self.timeout:
exitStatus = self.killChild()
self.__readFromCommand()
return self.__generateSystemCommandError(exitStatus,
"Timeout (%d seconds) for '%s' call" %
(self.timeout, cmdSeq))
time.sleep(0.01)
exitStatus = self.__poll(self.child.pid)
self.__readFromCommand()
if exitStatus:
exitStatus = exitStatus[1]
if exitStatus >= 256:
exitStatus /= 256
return S_OK((exitStatus, self.bufferList[0][0], self.bufferList[1][0]))
finally:
try:
self.child.stdout.close()
self.child.stderr.close()
except Exception:
pass
def getChildPID(self):
""" child pid getter """
return self.childPID
def __readFromCommand(self):
""" read child stdout and stderr """
fdList = []
for i in (self.child.stdout, self.child.stderr):
try:
if not i.closed:
fdList.append(i.fileno())
except Exception:
self.log.exception("SUBPROCESS: readFromCommand exception")
readSeq = self.__selectFD(fdList, True)
if readSeq is False:
return S_OK()
if self.child.stdout.fileno() in readSeq:
retDict = self.__readFromSystemCommandOutput(self.child.stdout, 0)
if not retDict['OK']:
return retDict
if self.child.stderr.fileno() in readSeq:
retDict = self.__readFromSystemCommandOutput(self.child.stderr, 1)
if not retDict['OK']:
return retDict
return S_OK()
def __callLineCallback(self, bufferIndex):
""" line callback execution """
nextLineIndex = self.bufferList[bufferIndex][0][self.bufferList[bufferIndex][1]:].find("\n")
if nextLineIndex > -1:
try:
self.callback(bufferIndex,
self.bufferList[bufferIndex][0][self.bufferList[bufferIndex][1]:
self.bufferList[bufferIndex][1] + nextLineIndex])
# Each line processed is taken out of the buffer to prevent the limit from killing us
nL = self.bufferList[bufferIndex][1] + nextLineIndex + 1
self.bufferList[bufferIndex][0] = self.bufferList[bufferIndex][0][nL:]
self.bufferList[bufferIndex][1] = 0
except Exception:
self.log.exception('Exception while calling callback function',
'%s' % self.callback.__name__)
self.log.showStack()
return False
return True
return False
def systemCall(timeout, cmdSeq, callbackFunction=None, env=None, bufferLimit=52428800):
"""
Use SubprocessExecutor class to execute cmdSeq (it can be a string or a sequence)
with a timeout wrapper, it is executed directly without calling a shell
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout, bufferLimit=bufferLimit)
sysCall = Watchdog(spObject.systemCall, args=(cmdSeq, ), kwargs={"callbackFunction": callbackFunction,
"env": env,
"shell": False})
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = sysCall(timeout + 1)
else:
spObject = Subprocess(timeout, bufferLimit=bufferLimit)
result = spObject.systemCall(cmdSeq,
callbackFunction=callbackFunction,
env=env,
shell=False)
return result
def shellCall(timeout, cmdSeq, callbackFunction=None, env=None, bufferLimit=52428800):
"""
Use SubprocessExecutor class to execute cmdSeq (it can be a string or a sequence)
with a timeout wrapper, cmdSeq it is invoque by /bin/sh
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout, bufferLimit=bufferLimit)
shCall = Watchdog(spObject.systemCall, args=(cmdSeq, ), kwargs={"callbackFunction": callbackFunction,
"env": env,
"shell": True})
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = shCall(timeout + 1)
else:
spObject = Subprocess(timeout, bufferLimit=bufferLimit)
result = spObject.systemCall(cmdSeq,
callbackFunction=callbackFunction,
env=env,
shell=True)
return result
def pythonCall(timeout, function, *stArgs, **stKeyArgs):
"""
Use SubprocessExecutor class to execute function with provided arguments,
with a timeout wrapper.
"""
if timeout > 0 and USE_WATCHDOG:
spObject = Subprocess(timeout=timeout)
pyCall = Watchdog(spObject.pythonCall, args=(function, ) + stArgs, kwargs=stKeyArgs)
spObject.log.verbose('Subprocess Watchdog timeout set to %d' % timeout)
result = pyCall(timeout + 1)
else:
spObject = Subprocess(timeout)
result = spObject.pythonCall(function, *stArgs, **stKeyArgs)
return result
def __getChildrenForPID(ppid):
"""
Get a list of children pids for ppid
"""
magicCmd = "ps --no-headers --ppid %d -o pid" % ppid
try:
import psutil
childrenList = []
for proc in psutil.process_iter():
if proc.ppid == ppid:
childrenList.append(proc.pid)
return childrenList
except Exception:
exc = subprocess.Popen(magicCmd,
stdout=subprocess.PIPE,
shell=True,
close_fds=True)
exc.wait()
return [int(pid.strip()) for pid in exc.stdout.readlines() if pid.strip()]
def getChildrenPIDs(ppid, foreachFunc=None):
"""
Get all children recursively for a given ppid.
Optional foreachFunc will be executed for each children pid
"""
cpids = __getChildrenForPID(ppid)
pids = []
for pid in cpids:
pids.append(pid)
if foreachFunc:
foreachFunc(pid)
pids.extend(getChildrenPIDs(pid, foreachFunc))
return pids
| arrabito/DIRAC | Core/Utilities/Subprocess.py | Python | gpl-3.0 | 20,685 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for network code."""
from nova import test
from nova.network import linux_net
class IptablesManagerTestCase(test.TestCase):
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*filter',
':INPUT ACCEPT [2223527:305688874]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [2172501:140856656]',
':nova-compute-FORWARD - [0:0]',
':nova-compute-INPUT - [0:0]',
':nova-compute-local - [0:0]',
':nova-compute-OUTPUT - [0:0]',
':nova-filter-top - [0:0]',
'-A FORWARD -j nova-filter-top ',
'-A OUTPUT -j nova-filter-top ',
'-A nova-filter-top -j nova-compute-local ',
'-A INPUT -j nova-compute-INPUT ',
'-A OUTPUT -j nova-compute-OUTPUT ',
'-A FORWARD -j nova-compute-FORWARD ',
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with '
'icmp-port-unreachable ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*nat',
':PREROUTING ACCEPT [3936:762355]',
':INPUT ACCEPT [2447:225266]',
':OUTPUT ACCEPT [63491:4191863]',
':POSTROUTING ACCEPT [63112:4108641]',
':nova-compute-OUTPUT - [0:0]',
':nova-compute-floating-ip-snat - [0:0]',
':nova-compute-SNATTING - [0:0]',
':nova-compute-PREROUTING - [0:0]',
':nova-compute-POSTROUTING - [0:0]',
':nova-postrouting-bottom - [0:0]',
'-A PREROUTING -j nova-compute-PREROUTING ',
'-A OUTPUT -j nova-compute-OUTPUT ',
'-A POSTROUTING -j nova-compute-POSTROUTING ',
'-A POSTROUTING -j nova-postrouting-bottom ',
'-A nova-postrouting-bottom -j nova-compute-SNATTING ',
'-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
def setUp(self):
super(IptablesManagerTestCase, self).setUp()
self.manager = linux_net.IptablesManager()
def test_filter_rules_are_wrapped(self):
current_lines = self.sample_filter
table = self.manager.ipv4['filter']
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table)
self.assertTrue('-A runner.py-FORWARD '
'-s 1.2.3.4/5 -j DROP' in new_lines)
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table)
self.assertTrue('-A runner.py-FORWARD '
'-s 1.2.3.4/5 -j DROP' not in new_lines)
def test_nat_rules(self):
current_lines = self.sample_nat
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['nat'])
for line in [':nova-compute-OUTPUT - [0:0]',
':nova-compute-floating-ip-snat - [0:0]',
':nova-compute-SNATTING - [0:0]',
':nova-compute-PREROUTING - [0:0]',
':nova-compute-POSTROUTING - [0:0]']:
self.assertTrue(line in new_lines, "One of nova-compute's chains "
"went missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
last_postrouting_line = ''
for line in new_lines:
if line.startswith('-A POSTROUTING'):
last_postrouting_line = line
self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
"Last POSTROUTING rule does not jump to "
"nova-postouting-bottom: %s" % last_postrouting_line)
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
self.assertTrue('-A %s -j runner.py-%s' \
% (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_filter_rules(self):
current_lines = self.sample_filter
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'])
for line in [':nova-compute-FORWARD - [0:0]',
':nova-compute-INPUT - [0:0]',
':nova-compute-local - [0:0]',
':nova-compute-OUTPUT - [0:0]']:
self.assertTrue(line in new_lines, "One of nova-compute's chains"
" went missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
for chain in ['FORWARD', 'OUTPUT']:
for line in new_lines:
if line.startswith('-A %s' % chain):
self.assertTrue('-j nova-filter-top' in line,
"First %s rule does not "
"jump to nova-filter-top" % chain)
break
self.assertTrue('-A nova-filter-top '
'-j runner.py-local' in new_lines,
"nova-filter-top does not jump to wrapped local chain")
for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
self.assertTrue('-A %s -j runner.py-%s' \
% (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
| rcbops/nova-buildpackage | nova/tests/test_iptables_network.py | Python | apache-2.0 | 7,550 |
# -*- coding: utf-8 -*-
# Author(s): Andrea Colangelo (andreacolangelo@openforce.it)
# Copyright 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright 2018 Sergio Corato (https://efatto.it)
# Copyright 2018-2019 Lorenzo Battistini <https://github.com/eLBati>
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
import logging
import re
from lxml import etree
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.addons.base.ir.ir_mail_server import MailDeliveryException
_logger = logging.getLogger(__name__)
RESPONSE_MAIL_REGEX = '[A-Z]{2}[a-zA-Z0-9]{11,16}_[a-zA-Z0-9]{,5}_[A-Z]{2}_' \
'[a-zA-Z0-9]{,3}'
class FatturaPAAttachmentOut(models.Model):
_inherit = 'fatturapa.attachment.out'
state = fields.Selection([('ready', 'Ready to Send'),
('sent', 'Sent'),
('sender_error', 'Sender Error'),
('recipient_error', 'Not delivered'),
('rejected', 'Rejected (PA)'),
('validated', 'Delivered'),
('accepted', 'Accepted'),
],
string='State',
default='ready', track_visibility='onchange')
last_sdi_response = fields.Text(
string='Last Response from Exchange System', default='No response yet',
readonly=True)
sending_date = fields.Datetime("Sent Date", readonly=True)
delivered_date = fields.Datetime("Delivered Date", readonly=True)
sending_user = fields.Many2one("res.users", "Sending User", readonly=True)
@api.multi
def reset_to_ready(self):
for att in self:
if att.state != 'sender_error':
raise UserError(
_("You can only reset files in 'Sender Error' state.")
)
att.state = 'ready'
@api.model
def _check_fetchmail(self):
server = self.env['fetchmail.server'].search([
('is_fatturapa_pec', '=', True),
('state', '=', 'done')
])
if not server:
raise UserError(_(
"No incoming PEC server found. Please configure it."))
@api.multi
def send_via_pec(self):
self._check_fetchmail()
states = self.mapped('state')
if set(states) != set(['ready']):
raise UserError(
_("You can only send files in 'Ready to Send' state.")
)
for att in self:
if not att.datas or not att.datas_fname:
raise UserError(_("File content and file name are mandatory"))
mail_message = self.env['mail.message'].create({
'model': self._name,
'res_id': att.id,
'subject': att.name,
'body': 'XML file for FatturaPA {} sent to Exchange System to '
'the email address {}.'
.format(
att.name,
self.env.user.company_id.email_exchange_system),
'attachment_ids': [(6, 0, att.ir_attachment_id.ids)],
'email_from': (
self.env.user.company_id.email_from_for_fatturaPA),
'reply_to': (
self.env.user.company_id.email_from_for_fatturaPA),
'mail_server_id': self.env.user.company_id.sdi_channel_id.
pec_server_id.id,
})
mail = self.env['mail.mail'].create({
'mail_message_id': mail_message.id,
'body_html': mail_message.body,
'email_to': self.env.user.company_id.email_exchange_system,
'headers': {
'Return-Path':
self.env.user.company_id.email_from_for_fatturaPA
}
})
if mail:
try:
mail.send(raise_exception=True)
att.state = 'sent'
att.sending_date = fields.Datetime.now()
att.sending_user = self.env.user.id
except MailDeliveryException as e:
att.state = 'sender_error'
mail.body = e[1]
@api.multi
def parse_pec_response(self, message_dict):
message_dict['model'] = self._name
message_dict['res_id'] = 0
regex = re.compile(RESPONSE_MAIL_REGEX)
attachments = [x for x in message_dict['attachments']
if regex.match(x.fname)]
for attachment in attachments:
response_name = attachment.fname
message_type = response_name.split('_')[2]
if attachment.fname.lower().endswith('.zip'):
# not implemented, case of AT, todo
continue
root = etree.fromstring(attachment.content)
file_name = root.find('NomeFile')
fatturapa_attachment_out = False
if file_name is not None:
file_name = file_name.text
fatturapa_attachment_out = self.search(
['|',
('datas_fname', '=', file_name),
('datas_fname', '=', file_name.replace('.p7m', ''))])
if len(fatturapa_attachment_out) > 1:
_logger.info('More than 1 out invoice found for incoming'
'message')
fatturapa_attachment_out = fatturapa_attachment_out[0]
if not fatturapa_attachment_out:
if message_type == 'MT': # Metadati
# out invoice not found, so it is an incoming invoice
return message_dict
else:
_logger.info('Error: FatturaPA {} not found.'.format(
file_name))
# TODO Send a mail warning
return message_dict
if fatturapa_attachment_out:
id_sdi = root.find('IdentificativoSdI')
receipt_dt = root.find('DataOraRicezione')
message_id = root.find('MessageId')
id_sdi = id_sdi.text if id_sdi is not None else False
receipt_dt = receipt_dt.text if receipt_dt is not None \
else False
message_id = message_id.text if message_id is not None \
else False
if message_type == 'NS': # 2A. Notifica di Scarto
error_list = root.find('ListaErrori')
error_str = ''
for error in error_list:
error_str += u"\n[%s] %s %s" % (
error.find('Codice').text if error.find(
'Codice') is not None else '',
error.find('Descrizione').text if error.find(
'Descrizione') is not None else '',
error.find('Suggerimento').text if error.find(
'Suggerimento') is not None else ''
)
fatturapa_attachment_out.write({
'state': 'sender_error',
'last_sdi_response': u'SdI ID: {}; '
u'Message ID: {}; Receipt date: {}; '
u'Error: {}'.format(
id_sdi, message_id, receipt_dt, error_str)
})
elif message_type == 'MC': # 3A. Mancata consegna
missed_delivery_note = root.find('Descrizione').text
fatturapa_attachment_out.write({
'state': 'recipient_error',
'last_sdi_response': u'SdI ID: {}; '
u'Message ID: {}; Receipt date: {}; '
u'Missed delivery note: {}'.format(
id_sdi, message_id, receipt_dt,
missed_delivery_note)
})
elif message_type == 'RC': # 3B. Ricevuta di Consegna
delivery_dt = root.find('DataOraConsegna').text
fatturapa_attachment_out.write({
'state': 'validated',
'delivered_date': fields.Datetime.now(),
'last_sdi_response': 'SdI ID: {}; '
'Message ID: {}; Receipt date: {}; '
'Delivery date: {}'.format(
id_sdi, message_id, receipt_dt, delivery_dt)
})
elif message_type == 'NE': # 4A. Notifica Esito per PA
esito_committente = root.find('EsitoCommittente')
if esito_committente is not None:
# more than one esito?
esito = esito_committente.find('Esito')
if esito is not None:
if esito.text == 'EC01':
state = 'validated'
elif esito.text == 'EC02':
state = 'rejected'
fatturapa_attachment_out.write({
'state': state,
'last_sdi_response': u'SdI ID: {}; '
u'Message ID: {}; Response: {}; '.format(
id_sdi, message_id, esito.text)
})
elif message_type == 'DT': # 5. Decorrenza Termini per PA
description = root.find('Descrizione')
if description is not None:
fatturapa_attachment_out.write({
'state': 'validated',
'last_sdi_response': u'SdI ID: {}; '
u'Message ID: {}; Receipt date: {}; '
u'Description: {}'.format(
id_sdi, message_id, receipt_dt,
description.text)
})
# not implemented - todo
elif message_type == 'AT': # 6. Avvenuta Trasmissione per PA
description = root.find('Descrizione')
if description is not None:
fatturapa_attachment_out.write({
'state': 'accepted',
'last_sdi_response': (
u'SdI ID: {}; Message ID: {}; '
u'Receipt date: {};'
u' Description: {}'
).format(
id_sdi, message_id, receipt_dt,
description.text)
})
message_dict['res_id'] = fatturapa_attachment_out.id
return message_dict
@api.multi
def unlink(self):
for att in self:
if att.state != 'ready':
raise UserError(_(
"You can only delete files in 'Ready to Send' state."
))
return super(FatturaPAAttachmentOut, self).unlink()
| linkitspa/l10n-italy | l10n_it_fatturapa_pec/models/fatturapa_attachment_out.py | Python | agpl-3.0 | 11,390 |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "base_data"
PROJECT_SPACE_DIR = "/home/robin/github/Botnav/turtlebot_base_data/install"
PROJECT_VERSION = "0.0.0"
| lypRobin/Botnav | turtlebot_base_data/build/base_data/catkin_generated/pkg.installspace.context.pc.py | Python | gpl-2.0 | 396 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from pytest import raises
from shapely.geometry import Polygon
from osmcha.changeset import ChangesetList
from osmcha.changeset import Analyse
from osmcha.changeset import WORDS
from osmcha.changeset import find_words
from osmcha.changeset import InvalidChangesetError
from osmcha.warnings import Warnings
def test_find_words():
"""Test the changeset.find_words function and the regular expressions."""
suspect_words = WORDS['sources'] + WORDS['common']
excluded_words = WORDS['exclude']
assert find_words('import buildings', suspect_words)
assert find_words('imported Importação unimportant', suspect_words, excluded_words)
assert not find_words('important edit', suspect_words, excluded_words)
assert not find_words('Where is here?', suspect_words, excluded_words)
assert find_words('GooGle is not important', suspect_words, excluded_words)
assert not find_words('somewhere in the world', suspect_words, excluded_words)
assert find_words('дані по імпорту', suspect_words, excluded_words)
assert find_words('places from яндекс', suspect_words, excluded_words)
assert find_words('places from 2gis', suspect_words, excluded_words)
assert find_words('places from 2гис', suspect_words, excluded_words)
assert find_words('places from yandex', suspect_words, excluded_words)
assert not find_words('Yandex Panorama', suspect_words, excluded_words)
def test_changeset_list():
"""Test ChangesetList class."""
c = ChangesetList('tests/245.osm.gz')
assert len(c.changesets) == 25
assert c.changesets[0]['id'] == '31982803'
assert c.changesets[0]['created_by'] == 'Potlatch 2'
assert c.changesets[0]['user'] == 'GarrettB'
assert c.changesets[0]['uid'] == '352373'
assert c.changesets[0]['comment'] == 'Added Emerald Pool Waterfall'
assert c.changesets[0]['comments_count'] == '0'
assert c.changesets[0]['bbox'] == Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
def test_changeset_list_with_filters():
"""Test ChangesetList class filter method."""
c = ChangesetList('tests/245.osm.gz', 'tests/map.geojson')
assert len(c.changesets) == 1
assert c.changesets[0]['id'] == '31982803'
def test_invalid_changeset_error():
with raises(InvalidChangesetError):
Analyse([999])
def test_analyse_init():
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'comment': 'Put data from Google',
'comments_count': '12',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
assert ch.id == 1
assert ch.editor == 'Potlatch 2'
assert ch.comment == 'Put data from Google'
assert ch.comments_count == 12
assert ch.user == 'JustTest'
assert ch.uid == '123123'
assert ch.date == datetime(2015, 4, 25, 18, 8, 46)
assert ch.metadata == {'build': '2.3-650-gad99430', 'version': '2.3'}
def test_analyse_label_suspicious():
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'comment': 'Put data from Google',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.label_suspicious('some reason')
assert 'some reason' in ch.suspicion_reasons
assert ch.is_suspect
def test_changeset_without_coords():
"""Changeset deleted a relation, so it has not a bbox."""
ch = Analyse(33624206)
assert ch.bbox == 'GEOMETRYCOLLECTION EMPTY'
def test_analyse_verify_words():
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'comment': 'Put data from Google',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_words()
assert ch.is_suspect
assert 'suspect_word' in ch.suspicion_reasons
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'source': 'Waze',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_words()
assert ch.is_suspect
assert 'suspect_word' in ch.suspicion_reasons
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'imagery_used': 'Custom (http://{switch:a,b,c}.tiles.googlemaps.com/{zoom}/{x}/{y}.png)',
'source': 'Bing',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_words()
assert ch.is_suspect
assert 'suspect_word' in ch.suspicion_reasons
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'comment': 'Somewhere in Brazil',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_words()
assert not ch.is_suspect
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'build': '2.3-650-gad99430',
'version': '2.3',
'comment': 'Somewhere in Brazil',
'comments_count': '1',
'source': 'Yandex Panorama',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_words()
assert not ch.is_suspect
def test_analyse_verify_editor_josm():
"""Test if JOSM is a powerfull_editor."""
ch_dict = {
'created_by': 'JOSM/1.5 (8339 en)',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '3',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor
def test_analyse_verify_editor_merkaartor():
"""Test if Merkaartor is a powerfull_editor."""
ch_dict = {
'created_by': 'Merkaartor 0.18 (de)',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '3',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor
def test_analyse_verify_editor_level0():
"""Test if Level0 is a powerfull_editor."""
ch_dict = {
'created_by': 'Level0 v1.1',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor
def test_analyse_verify_editor_qgis():
"""Test if QGIS is a powerfull_editor."""
ch_dict = {
'created_by': 'QGIS plugin',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor
def test_analyse_verify_editor_id_osm():
"""Test if iD is not a powerfull_editor and if https://www.openstreetmap.org/edit
is a trusted instance.
"""
ch_dict = {
'created_by': 'iD 1.7.3',
'host': 'https://www.openstreetmap.org/edit',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert ch.suspicion_reasons == []
def test_analyse_verify_editor_id_improveosm():
"""Test if iD is not a powerfull_editor and if http://improveosm.org
is a trusted instance.
"""
ch_dict = {
'created_by': 'iD 1.7.3',
'host': 'http://improveosm.org/',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert ch.suspicion_reasons == []
def test_analyse_verify_editor_id_strava():
"""Test if iD is not a powerfull_editor and if https://strava.github.io/iD/
is a trusted instance.
"""
ch_dict = {
'created_by': 'iD 1.7.3',
'host': 'https://strava.github.io/iD/',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert ch.suspicion_reasons == []
def test_analyse_verify_editor_rapid():
"""Test if RapiD is not a powerfull_editor and a trusted instance."""
ch_dict = {
'created_by': 'RapiD 0.9.0',
'host': 'https://mapwith.ai/rapid',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert ch.suspicion_reasons == []
def test_analyse_verify_editor_rapid_test():
"""Test if RapiD test is not a powerfull_editor and a trusted instance."""
ch_dict = {
'created_by': 'RapiD 0.9.0',
'host': 'https://mapwith.ai/rapidtest',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '5',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert ch.suspicion_reasons == []
def test_verify_editor_id_unknown_instance():
"""Test if iD is not a powerfull_editor and if 'Unknown iD instance' is added
to suspicion_reasons.
"""
ch_dict = {
'created_by': 'iD 1.7.3',
'host': 'http://anotherhost.com/iD',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '2',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' in ch.suspicion_reasons
assert ch.is_suspect
def test_verify_editor_id_is_known_instance():
"""Test if iD is not a powerfull_editor and if 'Unknown iD instance' is added
to suspicion_reasons.
"""
ch_dict = {
'created_by': 'iD 1.7.3',
'host': 'https://www.openstreetmap.org/iD',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' not in ch.suspicion_reasons
assert ch.is_suspect is False
def test_verify_editor_netlify_id_is_known_instance():
"""Test if iD is not a powerfull_editor and if 'Unknown iD instance' is added
to suspicion_reasons.
"""
ch_dict = {
'created_by': 'iD 2.17.3',
'host': 'https://ideditor.netlify.app/',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '4',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' not in ch.suspicion_reasons
assert ch.is_suspect is False
def test_verify_id_editor_amazon_is_known_instance():
"""Test if iD is not a powerfull_editor and if 'Unknown iD instance' is added
to suspicion_reasons.
"""
ch_dict = {
'created_by': 'iD 2.17.3',
'host': 'https://ideditor.amazon.com/',
'created_at': '2020-09-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '4',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' not in ch.suspicion_reasons
assert ch.is_suspect is False
def test_verify_id_editor_lyft_is_known_instance():
"""Test if iD is not a powerfull_editor and 'Unknown iD instance' not added
to suspicion_reasons.
"""
ch_dict = {
'created_by': 'iD 2.17.3',
'host': 'https://lyft.com/',
'created_at': '2020-09-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '4',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' not in ch.suspicion_reasons
assert ch.is_suspect is False
def test_verify_hotosm_id_is_known_instance():
"""Test if iD is not a powerfull_editor and if 'Unknown iD instance' is added
to suspicion_reasons.
"""
ch1 = {
'created_by': 'iD 1.7.3',
'host': 'https://tasks.teachosm.org/projects/23/map/',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch2 = {
'created_by': 'iD 1.7.3',
'host': 'https://tasks.hotosm.org/projects/23/map/',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch1)
ch.verify_editor()
assert ch.powerfull_editor is False
assert 'Unknown iD instance' not in ch.suspicion_reasons
assert ch.is_suspect is False
ch_2 = Analyse(ch2)
ch_2.verify_editor()
assert ch_2.powerfull_editor is False
assert 'Unknown iD instance' not in ch_2.suspicion_reasons
assert ch_2.is_suspect is False
def test_analyse_verify_editor_Potlatch2():
"""Test if Potlatch 2 is not a powerfull_editor."""
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
ch = Analyse(ch_dict)
ch.verify_editor()
assert ch.powerfull_editor is False
def test_analyse_count():
ch = Analyse(32663070)
ch.full_analysis()
assert ch.create == 8
assert ch.modify == 3
assert ch.delete == 2
assert ch.is_suspect is False
assert len(ch.suspicion_reasons) == 0
def test_analyse_import():
"""Created: 1900. Modified: 16. Deleted: 320 / JOSM"""
ch = Analyse(10013029)
ch.full_analysis()
assert ch.is_suspect
assert 'possible import' in ch.suspicion_reasons
def test_new_user_custom_create_value():
"""Created: 1900. Modified: 16. Deleted: 320 / JOSM"""
ch = Analyse(10013029, create_threshold=2000)
ch.full_analysis()
assert ch.is_suspect is True
assert 'possible import' not in ch.suspicion_reasons
assert 'New mapper' in ch.suspicion_reasons
assert len(ch.suspicion_reasons) == 1
def test_analyse_mass_modification():
"""Created: 322. Modified: 1115. Deleted: 140 / Potlatch 2"""
ch = Analyse(19863853)
ch.full_analysis()
assert ch.is_suspect
assert 'mass modification' in ch.suspicion_reasons
def test_custom_modify_value():
"""Created: 322. Modified: 1115. Deleted: 140 / Potlatch 2"""
ch = Analyse(19863853, modify_threshold=1200)
ch.full_analysis()
assert ch.is_suspect is False
assert len(ch.suspicion_reasons) == 0
def test_analyse_mass_deletion():
"""Created: 0. Modified: 0. Deleted: 1019 / Potlatch 2"""
ch = Analyse(31450443)
ch.full_analysis()
assert ch.is_suspect
assert 'mass deletion' in ch.suspicion_reasons
def test_custom_delete_value():
"""C/M/D = 0 0 61 / iD"""
ch = Analyse(45901540, delete_threshold=100)
ch.full_analysis()
assert ch.is_suspect is False
assert len(ch.suspicion_reasons) == 0
def test_custom_percentage():
"""C/M/D = 481 620 80 / JOSM"""
ch = Analyse(45082154)
ch.full_analysis()
assert ch.is_suspect is False
assert len(ch.suspicion_reasons) == 0
ch = Analyse(45082154, percentage=0.5)
ch.full_analysis()
assert ch.is_suspect
assert 'mass modification' in ch.suspicion_reasons
def test_custom_top_threshold():
"""C/M/D = 1072 124 282 / made with iD"""
ch = Analyse(45862717)
ch.full_analysis()
assert ch.is_suspect
assert 'possible import' in ch.suspicion_reasons
ch = Analyse(45862717, top_threshold=1100)
ch.full_analysis()
assert ch.is_suspect is False
assert len(ch.suspicion_reasons) == 0
def test_no_duplicated_reason():
"""Changeset with word import in comment and source fields."""
ch = Analyse(45632780)
ch.full_analysis()
assert ch.is_suspect
assert ch.suspicion_reasons == ['suspect_word']
def test_redacted_changeset():
"""Redacted changesets have no metadata so those cases need to be threated
to avoid a ZeroDivisionError in the Analyse.count() method.
"""
ch = Analyse(34495147)
ch.full_analysis()
assert ch.is_suspect is False
def test_get_dict():
"""Test if get_dict function return only the fields that osmcha-django needs
to save in the database.
"""
# An iD changeset
ch = Analyse(46286980)
ch.full_analysis()
assert 'id' in ch.get_dict().keys()
assert 'user' in ch.get_dict().keys()
assert 'uid' in ch.get_dict().keys()
assert 'editor' in ch.get_dict().keys()
assert 'bbox' in ch.get_dict().keys()
assert 'date' in ch.get_dict().keys()
assert 'comment' in ch.get_dict().keys()
assert 'comments_count' in ch.get_dict().keys()
assert 'source' in ch.get_dict().keys()
assert 'imagery_used' in ch.get_dict().keys()
assert 'is_suspect' in ch.get_dict().keys()
assert 'powerfull_editor' in ch.get_dict().keys()
assert 'suspicion_reasons' in ch.get_dict().keys()
assert 'create' in ch.get_dict().keys()
assert 'modify' in ch.get_dict().keys()
assert 'delete' in ch.get_dict().keys()
assert 'metadata' in ch.get_dict().keys()
assert ch.get_dict()['metadata']['host'] == 'https://www.openstreetmap.org/id'
assert len(ch.get_dict().keys()) == 17
# An iD changeset with warnings:
ch = Analyse(72783703)
ch.full_analysis()
assert 'id' in ch.get_dict().keys()
assert 'user' in ch.get_dict().keys()
assert 'uid' in ch.get_dict().keys()
assert 'editor' in ch.get_dict().keys()
assert 'bbox' in ch.get_dict().keys()
assert 'date' in ch.get_dict().keys()
assert 'comment' in ch.get_dict().keys()
assert 'comments_count' in ch.get_dict().keys()
assert 'source' in ch.get_dict().keys()
assert 'imagery_used' in ch.get_dict().keys()
assert 'is_suspect' in ch.get_dict().keys()
assert 'powerfull_editor' in ch.get_dict().keys()
assert 'suspicion_reasons' in ch.get_dict().keys()
assert 'create' in ch.get_dict().keys()
assert 'modify' in ch.get_dict().keys()
assert 'delete' in ch.get_dict().keys()
assert 'metadata' in ch.get_dict().keys()
assert ch.get_dict()['metadata']['host'] == 'https://www.openstreetmap.org/edit'
assert ch.get_dict()['metadata']['locale'] == 'en-US'
assert ch.get_dict()['metadata']['warnings:crossing_ways'] == 1
assert ch.get_dict()['metadata']['changesets_count'] == 5970
assert ch.get_dict()['comments_count'] == 2
assert len(ch.get_dict().keys()) == 17
# A JOSM changeset
ch = Analyse(46315321)
ch.full_analysis()
assert 'id' in ch.get_dict().keys()
assert 'user' in ch.get_dict().keys()
assert 'uid' in ch.get_dict().keys()
assert 'editor' in ch.get_dict().keys()
assert 'bbox' in ch.get_dict().keys()
assert 'date' in ch.get_dict().keys()
assert 'comment' in ch.get_dict().keys()
assert 'comments_count' in ch.get_dict().keys()
assert 'source' in ch.get_dict().keys()
assert 'imagery_used' in ch.get_dict().keys()
assert 'is_suspect' in ch.get_dict().keys()
assert 'powerfull_editor' in ch.get_dict().keys()
assert 'suspicion_reasons' in ch.get_dict().keys()
assert 'create' in ch.get_dict().keys()
assert 'modify' in ch.get_dict().keys()
assert 'delete' in ch.get_dict().keys()
assert 'metadata' in ch.get_dict().keys()
assert ch.get_dict()['metadata'] == {}
assert len(ch.get_dict().keys()) == 17
def test_changeset_without_tags():
ch = Analyse(46755934)
ch.full_analysis()
assert ch.powerfull_editor
assert ch.is_suspect
assert 'Software editor was not declared' in ch.suspicion_reasons
def test_changeset_by_user_with_more_than_one_block():
changeset = Analyse(34879408)
changeset.full_analysis()
assert 'User has multiple blocks' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_by_new_mapper():
changeset = Analyse(46756461)
changeset.full_analysis()
assert 'New mapper' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_by_another_new_mapper():
changeset = Analyse(36700893)
changeset.full_analysis()
assert 'New mapper' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_by_old_mapper_with_unicode_username():
changeset = Analyse(46790192)
changeset.full_analysis()
assert 'New mapper' not in changeset.suspicion_reasons
assert not changeset.is_suspect
def test_changeset_by_old_mapper_with_special_character_username():
changeset = Analyse(46141825)
changeset.full_analysis()
assert 'New mapper' not in changeset.suspicion_reasons
assert not changeset.is_suspect
def test_changeset_with_review_requested():
ch_dict = {
'created_by': 'Potlatch 2',
'created_at': '2015-04-25T18:08:46Z',
'comment': 'add pois',
'comments_count': '1',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'review_requested': 'yes',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert 'Review requested' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_with_warning_tag_almost_junction():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'comments_count': '3',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:almost_junction:highway-highway': '1',
'warnings:missing_role': '1',
'warnings:missing_tag:any': '1',
'warnings:private_data': '1',
'warnings:mismatched_geometry': '1',
'warnings:unsquare_way': '1',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert 'Almost junction' in changeset.suspicion_reasons
assert 'Missing role' in changeset.suspicion_reasons
assert 'Missing tag' in changeset.suspicion_reasons
assert 'Private information' in changeset.suspicion_reasons
assert 'Mismatched geometry' in changeset.suspicion_reasons
assert 'Unsquare corners' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_with_warning_tag_close_nodes():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'comments_count': '13',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:close_nodes:detached': '1',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert 'Very close points' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_with_warning_tag_crossing_ways():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'comments_count': '0',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:crossing_ways:building-building': '1',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert 'Crossing ways' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_with_warning_tag_disconnected_way():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'comments_count': '2',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:disconnected_way:highway': '4',
'warnings:suspicious_name:generic_name': '4',
'warnings:impossible_oneway:highway': '4',
'warnings:incompatible_source': '4',
'warnings:outdated_tags:incomplete_tags': '9',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert 'Disconnected way' in changeset.suspicion_reasons
assert 'Generic name' in changeset.suspicion_reasons
assert 'Impossible oneway' in changeset.suspicion_reasons
assert 'suspect_word' in changeset.suspicion_reasons
assert 'Outdated tags' in changeset.suspicion_reasons
assert changeset.is_suspect
def test_changeset_with_warning_tag_fix_me():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'comments_count': '3',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:fix_me': '0',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert changeset.suspicion_reasons == []
assert not changeset.is_suspect
def test_changeset_with_warning_tag_invalid_format():
ch_dict = {
'created_by': 'iD',
'created_at': '2019-04-25T18:08:46Z',
'host': 'https://www.openstreetmap.org/edit',
'comment': 'add pois',
'id': '1',
'user': 'JustTest',
'uid': '123123',
'warnings:invalid_format': '0',
'bbox': Polygon([
(-71.0646843, 44.2371354), (-71.0048652, 44.2371354),
(-71.0048652, 44.2430624), (-71.0646843, 44.2430624),
(-71.0646843, 44.2371354)
])
}
changeset = Analyse(ch_dict)
changeset.full_analysis()
assert changeset.suspicion_reasons == []
assert not changeset.is_suspect
def test_enabled_warnings():
warnings = Warnings()
assert warnings.get_non_exact_match_warnings() == [
{'tag': 'warnings:almost_junction', 'reason': 'Almost junction', 'exact_match': False},
{'tag': 'warnings:close_nodes', 'reason': 'Very close points', 'exact_match': False},
{'tag': 'warnings:crossing_ways', 'reason': 'Crossing ways', 'exact_match': False},
{'tag': 'warnings:disconnected_way', 'reason': 'Disconnected way', 'exact_match': False},
{'tag': 'warnings:impossible_oneway', 'reason': 'Impossible oneway', 'exact_match': False},
{'tag': 'warnings:incompatible_source', 'reason': 'suspect_word', 'exact_match': False},
{'tag': 'warnings:mismatched_geometry', 'reason': 'Mismatched geometry', 'exact_match': False},
{'tag': 'warnings:missing_role', 'reason': 'Missing role', 'exact_match': False},
{'tag': 'warnings:missing_tag', 'reason': 'Missing tag', 'exact_match': False},
{'tag': 'warnings:outdated_tags', 'reason': 'Outdated tags', 'exact_match': False},
{'tag': 'warnings:private_data', 'reason': 'Private information', 'exact_match': False},
{'tag': 'warnings:unsquare_way', 'reason': 'Unsquare corners', 'exact_match': False},
]
assert warnings.get_exact_match_warnings() == [
{'tag': 'warnings:suspicious_name:generic_name', 'reason': 'Generic name', 'exact_match': True},
]
assert warnings.is_enabled('warnings:crossing_ways:building-building') == 'Crossing ways'
assert warnings.is_enabled('warnings:crossing_ways:highway-building') == 'Crossing ways'
assert warnings.is_enabled('warnings:impossible_oneway:highway') == 'Impossible oneway'
assert warnings.is_enabled('warnings:suspicious_name:not-name') is None
assert warnings.is_enabled('warnings:suspicious_name:') is None
assert warnings.is_enabled('warnings:') is None
assert warnings.is_enabled('warnings') is None
assert warnings.is_enabled('warnings:suspicious_name:generic_name') == 'Generic name'
| willemarcel/osmcha | tests/test_mod.py | Python | gpl-3.0 | 36,086 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pysemeels.si.test_map
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Tests for the module :py:mod:`pysemeels.si.map`.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import unittest
import os
# Third party modules.
import h5py
import pytest
# Local modules.
# Project modules.
from pysemeels import get_current_module_path
from pysemeels.si.map import Map
from tests import is_bad_file
# Globals and constants variables.
class TestMap(unittest.TestCase):
"""
TestCase class for the module `pysemeels.si.map`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self.name_ref = "TestMap"
self.map = Map(self.name_ref)
self.test_data_path = get_current_module_path(__file__, '../../test_data')
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
# self.fail("Test if the testcase is working.")
self.assert_(True)
def test_init(self):
"""
Test __init__ method.
"""
name_ref = "TestMap_init"
map = Map(name_ref)
self.assertEqual(name_ref, map.name)
# self.fail("Test if the testcase is working.")
def test_write_hdf5(self):
"""
Test write_hdf5 method.
"""
filepath = os.path.join(self.test_data_path, "test_map_write_hdf5.hdf5")
with h5py.File(filepath, "w") as hdf5_file:
self.map.write_hdf5(hdf5_file)
self.assertTrue(self.name_ref in hdf5_file)
root_group = hdf5_file[self.name_ref]
os.remove(filepath)
# self.fail("Test if the testcase is working.")
def test_read_hdf5(self):
"""
Test read_hdf5 method.
"""
filepath = os.path.join(self.test_data_path, "test_map_read_hdf5.hdf5")
if is_bad_file(filepath):
pytest.skip("File not found: {}".format(filepath))
with h5py.File(filepath, "r") as hdf5_file:
self.map.read_hdf5(hdf5_file)
# self.fail("Test if the testcase is working.")
def test_read_hdf5_bad_project(self):
"""
Test read_hdf5 method with a different project name.
"""
name_ref = "TestMap_init"
map = Map(name_ref)
filepath = os.path.join(self.test_data_path, "test_map_read_hdf5.hdf5")
if is_bad_file(filepath):
pytest.skip("File not found: {}".format(filepath))
with h5py.File(filepath, "r") as hdf5_file:
self.assertRaises(ValueError, map.read_hdf5, hdf5_file)
# self.fail("Test if the testcase is working.")
def test_import_data(self):
"""
Test import_data method.
"""
si_map_folder = os.path.join(self.test_data_path, "hitachi/eels_su/zlp_2.5kx_60eV_map_lower")
if not os.path.isdir(si_map_folder):
pytest.skip("File not found: {}".format(si_map_folder))
name = os.path.basename(si_map_folder)
si_map = Map(name)
si_map.import_data(si_map_folder)
# self.assertEqual(-32.00, map.energies_eV[0])
# self.assertEqual(2282, map.raw_counts[0])
# self.assertEqual(21.84, map.energies_eV[-1])
# self.assertEqual(0, map.raw_counts[-1])
# self.assertEqual(1024, len(map.energies_eV))
# self.assertEqual(1024, len(map.raw_counts))
# self.fail("Test if the testcase is working.")
| drix00/pysemeels | tests/si/test_map.py | Python | apache-2.0 | 4,419 |
# Copyright (C) 2020 Xin Liang <XLiang@suse.com>
# See COPYING for license information.
import re
import time
from contextlib import contextmanager
from . import utils
from . import config
from . import log
logger = log.setup_logger(__name__)
class SSHError(Exception):
"""
Custom exception for ssh error
"""
class ClaimLockError(Exception):
"""
Custom exception if claiming lock failed or wait lock release timed out
"""
class Lock(object):
"""
A base class define a lock mechanism used to exclude other nodes
"""
LOCK_DIR = "/run/.crmsh_lock_directory"
MKDIR_CMD = "mkdir {}".format(LOCK_DIR)
RM_CMD = "rm -rf {}".format(LOCK_DIR)
def __init__(self):
"""
Init function
"""
# only the lock owner can unlock
self.lock_owner = False
def _run(self, cmd):
"""
Run command on local
"""
return utils.get_stdout_stderr(cmd)
def _create_lock_dir(self):
"""
Create lock directory, mkdir command was atomic
"""
rc, _, _ = self._run(self.MKDIR_CMD)
if rc == 0:
self.lock_owner = True
return True
return False
def _lock_or_fail(self):
"""
Raise ClaimLockError if claiming lock failed
"""
if not self._create_lock_dir():
raise ClaimLockError("Failed to claim lock (the lock directory exists at {})".format(self.LOCK_DIR))
def _unlock(self):
"""
Remove the lock directory
"""
if self.lock_owner:
self._run(self.RM_CMD)
@contextmanager
def lock(self):
"""
Create lock directory on local, and remove it finally
Might raise ClaimLockError
"""
try:
self._lock_or_fail()
yield
except:
raise
finally:
self._unlock()
class RemoteLock(Lock):
"""
A class inherited from Lock class
Define the behavior how to claim lock on remote node and how to wait the lock released
"""
SSH_TIMEOUT = 10
SSH_OPTION = "-o ConnectTimeout={} -o StrictHostKeyChecking=no".format(SSH_TIMEOUT)
SSH_EXIT_ERR = 255
MIN_LOCK_TIMEOUT = 120
WAIT_INTERVAL = 10
def __init__(self, remote_node):
"""
Init function
"""
self.remote_node = remote_node
super(__class__, self).__init__()
def _run(self, cmd):
"""
Run command on remote node
"""
cmd = "ssh {} root@{} \"{}\"".format(self.SSH_OPTION, self.remote_node, cmd)
rc, out, err = utils.get_stdout_stderr(cmd)
if rc == self.SSH_EXIT_ERR:
raise SSHError(err)
return rc, out, err
@property
def lock_timeout(self):
"""
Get lock_timeout from config.core
"""
try:
value = int(config.core.lock_timeout)
except ValueError:
raise ValueError("Invalid format of core.lock_timeout(should be a number)")
if value < self.MIN_LOCK_TIMEOUT:
raise ValueError("Minimum value of core.lock_timeout should be {}".format(self.MIN_LOCK_TIMEOUT))
return value
def _get_online_nodelist(self):
"""
Get the online node list from remote node
"""
rc, out, err = self._run("crm_node -l")
if rc != 0 and err:
raise ValueError(err)
return re.findall('[0-9]+ (.*) member', out)
def _lock_or_wait(self):
"""
Try to claim lock on remote node, wait if failed to claim
raise ClaimLockError if reached the lock_timeout
"""
warned_once = False
online_list = []
pre_online_list = []
expired_error_str = "Cannot continue since the lock directory exists at the node ({}:{})".format(self.remote_node, self.LOCK_DIR)
current_time = int(time.time())
timeout = current_time + self.lock_timeout
while current_time <= timeout:
# Try to claim the lock
if self._create_lock_dir():
# Success
break
# Might lose claiming lock again, start to wait again
online_list = self._get_online_nodelist()
if pre_online_list and pre_online_list != online_list:
timeout = current_time + self.lock_timeout
pre_online_list = online_list
if not warned_once:
warned_once = True
logger.warning("Might have unfinished process on other nodes, wait %ss...", self.lock_timeout)
time.sleep(self.WAIT_INTERVAL)
current_time = int(time.time())
else:
raise ClaimLockError("Timed out after {} seconds. {}".format(self.lock_timeout, expired_error_str))
@contextmanager
def lock(self):
"""
Create lock directory on remote, and remove it finally
Might raise SSHError, ClaimLockError and ValueError
"""
try:
self._lock_or_wait()
yield
except:
raise
finally:
self._unlock()
| ClusterLabs/crmsh | crmsh/lock.py | Python | gpl-2.0 | 5,164 |
import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models import Index
from django.utils.regex_helper import _lazy_re_compile
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk', 'has_json_constraint'))
field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m[1]) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'bigint unsigned': 'PositiveBigIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return 'AutoField'
if description.has_json_constraint:
return 'JSONField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name))
table_info = cursor.fetchall()
json_columns = set()
if self.connection.features.can_introspect_json_field:
for line in table_info:
column = line[1]
json_constraint_sql = '%%json_valid("%s")%%' % column
has_json_constraint = cursor.execute("""
SELECT sql
FROM sqlite_master
WHERE
type = 'table' AND
name = %s AND
sql LIKE %s
""", [table_name, json_constraint_sql]).fetchone()
if has_json_constraint:
json_columns.add(column)
return [
FieldInfo(
name, data_type, None, get_field_size(data_type), None, None,
not notnull, default, pk == 1, name in json_columns
)
for cid, name, data_type, notnull, default, pk in table_info
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m[1].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m[1] if m[1] else m[2]
return None
def _get_foreign_key_constraints(self, cursor, table_name):
constraints = {}
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest.
id_, _, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ')'):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ','):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(sqlparse.tokens.Keyword, 'CONSTRAINT')
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, 'CHECK'):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = {
'unique': True,
'columns': unique_columns,
'primary_key': False,
'foreign_key': None,
'check': False,
'index': False,
} if unique_columns else None
check_constraint = {
'check': True,
'columns': check_columns,
'primary_key': False,
'unique': False,
'foreign_key': None,
'index': False,
} if check_columns else None
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
break
# Parse columns and constraint definition
while True:
constraint_name, unique, check, end_token = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = check
if end_token.match(sqlparse.tokens.Punctuation, ')'):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (
self.connection.ops.quote_name(table_name),
)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {info.name for info in self.get_table_description(cursor, table_name)}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
# There's at most one row.
sql, = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
constraints.update(self._get_foreign_key_constraints(cursor, table_name))
return constraints
| theo-l/django | django/db/backends/sqlite3/introspection.py | Python | bsd-3-clause | 19,222 |
"""
Enumerates active processes as seen under windows Task Manager on Win NT/2k/XP using PSAPI.dll
(new api for processes) and using ctypes.Use it as you please.
Based on information from http://support.microsoft.com/default.aspx?scid=KB;EN-US;Q175030&ID=KB;EN-US;Q175030
By Eric Koome
email ekoome@yahoo.com
license GPL
"""
from ctypes import *
#PSAPI.DLL
psapi = windll.psapi
#Kernel32.DLL
kernel = windll.kernel32
def EnumProcesses():
arr = c_ulong * 256
lpidProcess= arr()
cb = sizeof(lpidProcess)
cbNeeded = c_ulong()
hModule = c_ulong()
count = c_ulong()
modname = c_buffer(30)
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
#Call Enumprocesses to get hold of process id's
psapi.EnumProcesses(byref(lpidProcess),
cb,
byref(cbNeeded))
#Number of processes returned
nReturned = cbNeeded.value/sizeof(c_ulong())
pidProcess = [i for i in lpidProcess][:nReturned]
for pid in pidProcess:
#Get handle to the process based on PID
hProcess = kernel.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
False, pid)
if hProcess:
psapi.EnumProcessModules(hProcess, byref(hModule), sizeof(hModule), byref(count))
psapi.GetModuleBaseNameA(hProcess, hModule.value, modname, sizeof(modname))
print "".join([ i for i in modname if i != '\x00'])
#-- Clean up
for i in range(modname._length_):
modname[i]='\x00'
kernel.CloseHandle(hProcess)
if __name__ == '__main__':
EnumProcesses()
| ActiveState/code | recipes/Python/305279_getting_process/recipe-305279.py | Python | mit | 1,710 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Survey.number_of_household_per_investigator'
db.delete_column(u'survey_survey', 'number_of_household_per_investigator')
# Deleting field 'Survey.rapid_survey'
db.delete_column(u'survey_survey', 'rapid_survey')
# Adding field 'Survey.sample_size'
db.add_column(u'survey_survey', 'sample_size',
self.gf('django.db.models.fields.PositiveIntegerField')(default=10, max_length=2),
keep_default=False)
# Adding field 'Survey.type'
db.add_column(u'survey_survey', 'type',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Removing M2M table for field questions on 'Survey'
db.delete_table('survey_survey_questions')
def backwards(self, orm):
# Adding field 'Survey.number_of_household_per_investigator'
db.add_column(u'survey_survey', 'number_of_household_per_investigator',
self.gf('django.db.models.fields.PositiveIntegerField')(default=10, max_length=2),
keep_default=False)
# Adding field 'Survey.rapid_survey'
db.add_column(u'survey_survey', 'rapid_survey',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'Survey.sample_size'
db.delete_column(u'survey_survey', 'sample_size')
# Deleting field 'Survey.type'
db.delete_column(u'survey_survey', 'type')
# Adding M2M table for field questions on 'Survey'
db.create_table(u'survey_survey_questions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('survey', models.ForeignKey(orm['survey.survey'], null=False)),
('question', models.ForeignKey(orm['survey.question'], null=False))
))
db.create_unique(u'survey_survey_questions', ['survey_id', 'question_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_denominator'", 'to': "orm['survey.Question']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'to': "orm['survey.Question']"})
},
'survey.groupcondition': {
'Meta': {'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey'] | antsmc2/mics | survey/migrations/0063_auto__del_field_survey_number_of_household_per_investigator__del_field.py | Python | bsd-3-clause | 27,171 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
_MESH_ENTITIES = frozenset(["POLYFACE", "POLYMESH", "MESH", "POINT", "3DFACE", "SOLID", "TRACE"])
def mesh_entity(entity):
return entity.dxftype in _MESH_ENTITIES
def mesh(typestr):
return typestr in _MESH_ENTITIES
_CURVE_ENTITIES = frozenset(("POLYLINE", "POLYGON", "LWPOLYLINE", "SPLINE",
"CIRCLE", "ARC", "ELLIPSE", "LINE", "HELIX"))
def curve_entity(entity):
return entity.dxftype in _CURVE_ENTITIES
def curve(typestr):
return typestr in _CURVE_ENTITIES
_NURBS_ENTITIES = frozenset(("BODY", "REGION", "PLANESURFACE", "SURFACE", "3DSOLID"))
def nurbs_entity(entity):
return entity.dxftype in _NURBS_ENTITIES
def nurbs(typestr):
return typestr in _NURBS_ENTITIES
_TEXT_ENTITIES = frozenset(("MTEXT", "TEXT"))
def text_entity(entity):
return entity.dxftype in _TEXT_ENTITIES
def text(typestr):
return typestr in _TEXT_ENTITIES
def insert_entity(entity):
return entity.dxftype == "INSERT"
def insert(typestr):
return typestr == "INSERT"
def light_entity(entity):
return entity.dxftype == "LIGHT"
def light(typestr):
return typestr == "LIGHT"
def attrib(entity):
return entity.dxftype == "ATTDEF"
def attrib(typestr):
return typestr == "ATTDEF"
_2D_ENTITIES = frozenset(("CIRCLE", "ARC", "SOLID", "TRACE", "TEXT", "ATTRIB", "ATTDEF", "SHAPE",
"INSERT", "LWPOLYLINE", "HATCH", "IMAGE", "ELLIPSE"))
def _2D_entity(entity):
return entity.dxftype in _2D_ENTITIES or (entity.dxftype == "POLYGON" and entity.mode == "spline2d")
def varying_width(entity):
if hasattr(entity, "width"):
ew = entity.width
if hasattr(ew, "__iter__"):
return ew.count(ew[0]) != len(ew) or ew[0][0] != ew[0][1]
return False
_SEPERATED_ENTITIES = frozenset(("POLYFACE", "POLYMESH", "LIGHT", "MTEXT", "TEXT", "INSERT", "BLOCK"))
def separated_entity(entity):
"""
Indicates if the entity should be imported to one single Blender object or if it can be merged with other entities.
This depends not only on the type of a dxf-entity but also whether the width values are varying or all the same.
"""
return entity.dxftype in _SEPERATED_ENTITIES or varying_width(entity)
def separated(typestr):
return typestr in _SEPERATED_ENTITIES
_NOT_COMBINED_ENTITIES = frozenset(tuple(_SEPERATED_ENTITIES) + ("ATTDEF",))
def combined_entity(entity):
return not separated_entity(entity) and not entity.dxftype == "ATTDEF"
def combined(typestr):
return typestr not in _NOT_COMBINED_ENTITIES
| Passtechsoft/TPEAlpGen | blender/release/scripts/addons/io_import_dxf/dxfimport/is_.py | Python | gpl-3.0 | 3,391 |
#!usr/bin/python3
#-*- coding:utf-8 -*-
import random
def startGame():
print("<<< 游戏开始 >>>")
print("请输入1~100之间任意一个数字,一共有5次猜中的机会。")
def endGame(number,isGuess=False):
if isGuess:
print("恭喜你,答对了,就是它:{0}".format(number))
else:
print("很遗憾,机会已用完,正确答案是:{0}".format(number))
print("<<< 游戏结束 >>>")
def guessNum():
number = random.randint(1,100)
totalCount = 5
startGame()
while totalCount > 0:
choiseNum = input("请输入:")
if choiseNum.isdigit():
totalCount -= 1
choiseNum = int(choiseNum)
if choiseNum == number:
endGame(number,True)
break
elif choiseNum > number and totalCount > 0:
print("数字大了,还有{0}次机会".format(totalCount))
elif choiseNum < number and totalCount > 0:
print("数字小了,还有{0}次机会".format(totalCount))
if(totalCount == 0):
endGame(number)
else:
print("输入有误")
if __name__ == '__main__':
guessNum()
| pythonzhichan/DailyQuestion | chenfeng/question2.py | Python | mit | 1,237 |
import time as tm
from threading import Lock
import rospy
from rospy.rostime import Duration, Time
from python_qt_binding.QtCore import QTranslator, QObject
from helper_functions import prepare_number_for_representation, topic_statistics_state_to_string, \
ALIVE_TIMER_CALLBACK, MAXIMUM_OFFLINE_TIME, WARNING_TIMEOUT
class AbstractItem(QObject):
"""
Provides a unified interface to access the items of the model.
INTERNAL: WARNING! Whenever the key-values at the beginning are not set right, the oddest things may occur!
"""
def __init__(self, logger, seuid, parent=None):
"""
Initializes the AbstractItem.
:param seuid: the seuid of the AbstractItem
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param parent: the parent-item
:type parent: AbstractItem
"""
super(AbstractItem, self).__init__(parent)
self._logger = logger
self._data = {}
self.counter = 0
"""
_rated_data is dict containing the rated data. state, window_start and window_end are simply lists
with the corresponding entries. Any other values typically is a list containing lists which however contain the
values. This is equivalent to the representation in the RatedStatistics/Entity.
"""
self._rated_data = {}
self._child_items = []
self.__parent = parent
self.seuid = seuid
self._type = "type"
self.__data_attribute = "data"
self.__state = []
# self.__last_update = Time.now()
self.__creation_time = Time.now()
self.marked = False
# self.markation_date = Time.now()
self._add_data_list("window_start")
self._add_data_list("window_stop")
self._add_rated_data_list("window_start")
self._add_rated_data_list("window_stop")
self._length_of_data = 0
self._length_of_rated_data = 0
self._data_lock = Lock()
self._rated_data_lock = Lock()
self._rated_attributes = []
self._rated_attributes.append("alive.actual_value")
self._rated_attributes.append("alive.expected_value")
self._rated_attributes.append("alive.state")
self.is_subscriber = False
def get_type(self):
"""
Returns the type of the item
:return: the type
:rtype: str
"""
return self._type
def get_seuid(self):
"""
Returns the seuid as a string.
:returns: seuid of the item
:rtype: str
"""
return self.seuid
def add_state(self, state):
"""
Used to simply add a state to the list of states.
"""
self.__state.append(state)
def set_state(self, state):
if len(self.__state) is not 0:
self.__state[-1] = state
else:
self.__state.append(state)
def get_state(self):
"""
Returns the state as a string.
:returns: state of the item
:rtype: str
"""
if self.__state:
return self.__state[-1]
return "unknown"
def _add_data_list(self, name):
"""
Adds keys to the data_list.
:param name: the key to be added
:type name: str
"""
self._data[name] = []
def _add_rated_data_list(self, name):
"""
Adds keys to the rated_data_list.
:param name: the key to be added
:type name: str
"""
self._rated_data[name] = []
def append_child(self, child):
"""
Append a child to the list of childs.
:param child: the child item
:type child: AbstractItem
"""
self._child_items.append(child)
def _update_current_state(self):
"""
This method updates the current state of the AbstractItem.
:raises TypeError: at the initialization, it's possible that last_states["state"] has no entries and a TypeError occures
"""
if self.get_state():
if self.get_state() is not "error":
last_states = self.get_rated_items_younger_than(Time.now() - (
Duration(secs=WARNING_TIMEOUT) if int(Duration(secs=5).to_sec()) <= int(Time.now().to_sec()) else Time(0)),
"state")
try:
for i in range(0, len(last_states["state"])):
if last_states["state"][i] is "error":
self.set_state("warning")
break
except TypeError:
return
def append_data(self, message):
"""
Appends data to the data of the AbstractItem.
:param message: the message to append
:type message: one of the different message types TopicStatistics, HostStatistics or NodeStatistics
:raises KeyError: if an entry is in the rated dictionary but not found in the message
"""
self._data_lock.acquire()
#self._alive_timer = rospy.Time.now()
for attribute in self._data:
try:
if attribute is "frequency":
self._data[attribute].append(message.delivered_msgs / (message.window_stop - message.window_start).to_sec())
elif attribute is "bandwidth":
self._data[attribute].append(message.traffic / (message.window_stop - message.window_start).to_sec())
else:
self._data[attribute].append(getattr(message, attribute))
except KeyError:
print("KeyError occurred when trying to access %s", attribute)
raise
self._length_of_data += 1
self._data_lock.release()
def update_rated_data(self, data):
"""
Appends data to the rated_data of the AbstractItem.
:param data: the data to append in key value form
:type data: RatedStatistics
:raises KeyError: if an entry is in the rated dictionary but not found in the message
"""
self._rated_data_lock.acquire()
self._rated_data["window_start"].append(data.window_start)
self._rated_data["window_stop"].append(data.window_stop)
last_state = self.get_state()
new_state = "unknown"
for element in data.rated_statistics_entity:
self._rated_data[element.statistic_type + ".actual_value"].append(element.actual_value)
self._rated_data[element.statistic_type + ".expected_value"].append(element.expected_value)
for i in range(0, len(element.state)):
state = topic_statistics_state_to_string(element, element.state[i])
self._rated_data[element.statistic_type + ".state"].append(state)
if (state is "low" or state is "high") and state is not "ok" and state is not "unkown":
new_state = "error"
elif state is "ok" and new_state is not "error":
new_state = "ok"
self.add_state(new_state)
self._update_current_state()
if new_state is "error" and last_state is not "error":
self._logger.log("error", Time.now(), self.seuid, self.get_erroneous_entries_for_log())
self._rated_data_lock.release()
def child_count(self, parent=None):
"""
Returns the number of children from the AbstractItem.
:returns: number of childs
:rtype: int
"""
return len(self._child_items)
def column_count(self):
"""
Returns the number of columns.
:returns: the number of columns
:rtype: int
"""
return 4
def get_childs(self, parent=None):
"""
Returns a list with all children.
:returns: list of children
:rtype: list
"""
return self._child_items
def get_child(self, row, parent=None):
"""
Returns the child at the position row.
:param row: the index of the row
:type row: int
:returns: the child at the position row
:rtype: AbstractItem
"""
return self._child_items[row]
def row(self, parent=None):
"""
Returns the index of the Item.
:returns: the index of the Item
:rtype: int
"""
if self.__parent:
return self.__parent.get_childs().index(self)
return 0
def get_amount_of_entries(self):
"""
Returns the amount of entries in the data part of the item
:return: amount of entries
:rtype: int
"""
return self._length_of_data
def get_latest_data(self, *args):
"""
Returns the latest dict of the data_list or the item of the dict with the given key.
:param kwargs: the keys to the dict
:type kwargs: str
:returns: dict of the item
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data) or attributes (namely name, type, data and state)
"""
self._data_lock.acquire()
return_dict = {}
if args:
for key in args:
if key is 'name':
return_dict['name'] = self.seuid
elif key is 'type':
return_dict['type'] = self._type
# elif key is 'data':
# return_dict['data'] = self.get_short_data()
elif key is 'state':
if len(self.__state) is not 0:
return_dict['state'] = self.get_state()
else:
return_dict["state"] = "unknown"
else:
if key in self._data:
if self._data[key]:
return_dict[key] = self._data[key][-1]
else:
if key == 'window_stop':
return_dict[key] = Time(0)
elif key in self.get_list_items():
return_dict[key] = [self.tr("Currently no value available")]
else:
return_dict[key] = self.tr("Currently no value available")
elif key in self._rated_data:
if self._rated_data[key]:
return_dict[key] = self._rated_data[key][-1]
else:
return_dict[key] = self.tr("Currently no value available")
# raise KeyError("item " + key + "was not found")
else:
return_dict['name'] = self.seuid
return_dict['type'] = self._type
# return_dict['data'] = self.get_short_data()
for entry in self._data:
if self._data[entry]:
return_dict[entry] = self._data[entry][-1]
else:
if entry == 'window_stop':
return_dict[entry] = Time(0)
elif entry in self.get_list_items():
return_dict[entry] = [self.tr("Currently no value available")]
else:
return_dict[entry] = self.tr("Currently no value available")
for entry in self._rated_data:
if entry == 'window_start' or entry == 'window_stop':
continue
if self._rated_data[entry]:
return_dict[entry] = self._rated_data[entry][-1]
else:
return_dict[entry] = self.tr("Currently no value available")
if len(self.__state) is not 0:
return_dict['state'] = self.get_state()
else:
return_dict['state'] = "unknown"
self._data_lock.release()
return return_dict
def parent(self):
"""
Returns the parent of this or None if there is no parent.
:returns: parent
:rtype: AbstractItem
"""
return self.__parent
def get_items_older_than(self, time):
"""
Returns all items which are older than time.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
WARNING: This method is only thread-safe if used via delete_items_older_than() otherwise the
method may result in undetermined behaviour.
:param time: the upper bound in seconds
:type time: rospy.Time
:returns: dict of lists with the data
:rtype: dict
"""
return_values = {}
breakpoint = 0
list_of_time = self._data["window_stop"]
return_values["window_stop"] = []
length = len(list_of_time)
if length is not 0:
if list_of_time[-1] < time:
for key in return_values:
return_values[key] = self._data[key]
else:
i = length - 1
while i > 0 and list_of_time[i] > time:
i -= 1
breakpoint = i
for key in self._data:
return_values[key] = self._data[key][0:breakpoint]
# todo: currently this is not right for rated data... FIX!!! --> probably move this to another function!
return_values["state"] = self.__state[breakpoint:length]
# self._data_lock.release()
return return_values
def delete_items_older_than(self, time):
"""
Deletes all items which are older than time.
:param time: the upper bound
:type time: rospy.Time
"""
self._data_lock.acquire()
self._rated_data_lock.acquire()
list_of_time = self._data["window_stop"]
if len(list_of_time) is not 0:
i = 0
entries_to_delete = self.get_items_older_than(time)
i += len(entries_to_delete["window_stop"])
for j in range(0, len(entries_to_delete["window_stop"])):
for value in self._data.values():
del value[0]
self._length_of_data -= i
self.delete_rated_items_older_than(time)
self._rated_data_lock.release()
self._data_lock.release()
def get_rated_items_older_than(self, time):
"""
Returns all items which are older than time.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
WARNING: This method is only thread-safe if used via delete_items_older_than() otherwise the
method may result in undetermined behaviour.
:param time: the upper bound in seconds
:type time: rospy.Time
:returns: dict of lists with the data
:rtype: dict
"""
return_values = {}
breakpoint = 0
list_of_time = self._rated_data["window_stop"]
return_values["window_stop"] = []
length = len(list_of_time)
if length is not 0:
if list_of_time[-1] < time:
for key in return_values:
return_values[key] = self._rated_data[key]
else:
i = length - 1
while i > 0 and list_of_time[i] > time:
i -= 1
breakpoint = i
for key in self._rated_data:
return_values[key] = self._rated_data[key][0:breakpoint]
return_values["state"] = self.__state[breakpoint:length]
return return_values
def delete_rated_items_older_than(self, time):
"""
Deletes all items which are older than time.
:param time: the upper bound
:type time: rospy.Time
:raises IndexError: Because in most cases not all values are monitored, it is possible that a reated_data_value is empty
"""
list_of_time = self._rated_data["window_stop"]
if len(list_of_time) is not 0:
i = 0
entries_to_delete = self.get_rated_items_older_than(time)
i += len(entries_to_delete["window_stop"])
for j in range(0, len(entries_to_delete["window_stop"])):
for value in self._rated_data.values():
try:
del value[0]
except IndexError:
j += 1
self._length_of_rated_data -= i
def get_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
self._data_lock.acquire()
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
return_values["window_stop"] = None
for key in self._data:
return_values[key] = None
breakpoint = 0
list_of_time = self._data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self._data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._data:
return_values[key] = self._data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
self._data_lock.release()
return return_values
def get_rated_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._rated_data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self._rated_data:
return_values[key] = None
return_values["state"] = None
breakpoint = 0
list_of_time = self._rated_data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
if key is 'state':
return_values[key] = self.__state
else:
try:
return_values[key] = self._rated_data[key]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._rated_data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._rated_data:
return_values[key] = self._rated_data[key][breakpoint:length]
elif key is "state":
return_values[key] = self.__state[breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
return return_values
def execute_action(self, action):
"""
Executes a action on the current item like stop or restart. Calls to this method should be redirected to the remote host on executed there.
:param action: the action which should be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns detailed description of current state as html text. Has to be implemented in subclasses.
:returns: detailed data
:return: str
"""
raise NotImplementedError()
def get_plotable_items(self):
"""
Returns the plotable entries in the item. Has to be implemented in subclasses.
:return: list of the items(str)
:rtype: list
"""
raise NotImplementedError()
def get_erroneous_entries(self):
"""
Returns the erroneous entries as a html string
:returns: an html string containing the erroneous entries yet preformatted
:rtype: str
"""
self._data_lock.acquire()
content = "<p class=\"get_erroneous_entries\">"
return_values = {}
if self.__state:
if self.get_state() is not "ok" and self.get_state() is not "unknown":
if self._rated_data["alive.state"]:
if self._rated_data["alive.state"][-1] is "high" or self._rated_data["alive.state"][-1] is "low":
content += self.tr("alive actual_value:") + \
" <span class=\"erroneous_entry\">" + prepare_number_for_representation(
self._rated_data["alive.actual_value"][-1][0]) + "</span>" + \
"<br>"
content += self.tr("alive expected_value:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data["alive.expected_value"][-1][0]) + "</span>" + \
"<br>"
content += self.tr("alive state:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data["alive.state"][-1]) + "</span>" + "<br>"
for entry in self._attributes:
if self._rated_data[entry + ".state"]:
if self._rated_data[entry + ".state"][-1] is "high" or self._rated_data[entry + ".state"][
-1] is "low":
content += self.tr(entry) + \
self.tr(" actual_value:") + \
" <span class=\"erroneous_entry\">" + prepare_number_for_representation(
self._rated_data[entry + ".actual_value"][-1][0]) + "</span> " + \
self.tr(entry + "_unit") + "<br>"
content += self.tr(entry) + \
self.tr(" expected_value:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data[entry + ".expected_value"][-1][0]) + "</span> " + \
self.tr(entry + "_unit") + "<br>"
content += self.tr(entry) + \
self.tr(" state:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data[entry + ".state"][-1]) + "</span>" + "<br>"
content += "<br>"
content += "</p>"
self._data_lock.release()
return content
def can_execute_actions(self):
"""
This item cannot execute actions
:return: False
"""
return False
def get_short_data(self):
return self.get_erroneous_entries_for_log()
def get_erroneous_entries_for_log(self):
"""
Returns the erroneous entries for the log as a string
:returns: an string containing the erroneous entries yet preformatted
:rtype: str
"""
self._data_lock.acquire()
content = ""
if len(self._rated_data["window_stop"]) != 0:
if self.get_state() is not "ok" and self.get_state() is not "unknown":
if self._rated_data["alive.state"][-1] == "high" or self._rated_data["alive.state"][-1] == "low":
content += self.tr("alive") + ": " + str(self._rated_data["alive.actual_value"][-1][-1]) + ", "
for entry in self._attributes:
if self._rated_data[entry + ".state"]:
if self._rated_data[entry + ".state"][-1] == "high" or self._rated_data[entry + ".state"][-1] is "low":
content += self.tr(entry) + ": " + str(self._rated_data[entry + ".state"][-1]) + ", "
else:
return "Not sufficient rated data yet"
self._data_lock.release()
return content | ROS-PSE/arni | arni_gui/src/arni_gui/abstract_item.py | Python | bsd-2-clause | 26,534 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo.config import cfg
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
import nova.openstack.common.rpc.proxy
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
def _compute_topic(topic, ctxt, host, instance):
'''Get the topic to use for a message.
:param topic: the base topic
:param ctxt: request context
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A topic string
'''
if not host:
if not instance:
raise exception.NovaException(_('No compute host specified'))
host = instance['host']
if not host:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return rpc.queue_get_for(ctxt, topic, host)
class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the compute rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
1.4 - Remove instance_uuid, add instance argument to reboot_instance()
1.5 - Remove instance_uuid, add instance argument to pause_instance(),
unpause_instance()
1.6 - Remove instance_uuid, add instance argument to suspend_instance()
1.7 - Remove instance_uuid, add instance argument to
get_console_output()
1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
1.9 - Remove instance_uuid, add instance argument to attach_volume()
1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
1.12 - Remove instance_uuid, add instance argument to confirm_resize()
1.13 - Remove instance_uuid, add instance argument to detach_volume()
1.14 - Remove instance_uuid, add instance argument to finish_resize()
1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
1.16 - Remove instance_uuid, add instance argument to get_diagnostics()
1.17 - Remove instance_uuid, add instance argument to get_vnc_console()
1.18 - Remove instance_uuid, add instance argument to inject_file()
1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
1.23 - Remove instance_id, add instance argument to
pre_live_migration()
1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
1.28 - Remove instance_uuid, add instance argument to reset_network()
1.29 - Remove instance_uuid, add instance argument to resize_instance()
1.30 - Remove instance_uuid, add instance argument to resume_instance()
1.31 - Remove instance_uuid, add instance argument to revert_resize()
1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
1.38 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.39 - Remove instance_uuid, add instance argument to run_instance()
1.40 - Remove instance_id, add instance argument to live_migration()
1.41 - Adds refresh_instance_security_rules()
1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
1.43 - Add migrate_data to live_migration()
1.44 - Adds reserve_block_device_name()
2.0 - Remove 1.x backwards compat
2.1 - Adds orig_sys_metadata to rebuild_instance()
2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
2.4 - Add bdms to terminate_instance
2.5 - Add block device and network info to reboot_instance
2.6 - Remove migration_id, add migration to resize_instance
2.7 - Remove migration_id, add migration to confirm_resize
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
2.11 - Adds soft_delete_instance() and restore_instance()
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
2.16 - Add instance_type to resize_instance
2.17 - Add get_backdoor_port()
2.18 - Add bdms to rebuild_instance
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
def __init__(self):
super(ComputeAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('add_aggregate_host',
aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.14')
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('add_fixed_ip_to_instance',
instance=instance_p, network_id=network_id),
topic=_compute_topic(self.topic, ctxt, None, instance))
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('attach_interface',
instance=instance_p, network_id=network_id,
port_id=port_id, requested_ip=requested_ip),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.25')
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('attach_volume',
instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint),
topic=_compute_topic(self.topic, ctxt, None, instance))
def change_instance_metadata(self, ctxt, instance, diff):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('change_instance_metadata',
instance=instance_p, diff=diff),
topic=_compute_topic(self.topic, ctxt, None, instance))
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt,
self.make_msg('check_can_live_migrate_destination',
instance=instance_p,
block_migration=block_migration,
disk_over_commit=disk_over_commit),
topic=_compute_topic(self.topic,
ctxt, destination, None))
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('check_can_live_migrate_source',
instance=instance_p,
dest_check_data=dest_check_data),
topic=_compute_topic(self.topic, ctxt, None,
instance))
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
rpc_method = self.cast if cast else self.call
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
return rpc_method(ctxt, self.make_msg('confirm_resize',
instance=instance_p, migration=migration_p,
reservations=reservations),
topic=_compute_topic(self.topic, ctxt, host, instance),
version='2.7')
def detach_interface(self, ctxt, instance, port_id):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('detach_interface',
instance=instance_p, port_id=port_id),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.25')
def detach_volume(self, ctxt, instance, volume_id):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('detach_volume',
instance=instance_p, volume_id=volume_id),
topic=_compute_topic(self.topic, ctxt, None, instance))
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('finish_resize',
instance=instance_p, migration=migration_p,
image=image, disk_info=disk_info, reservations=reservations),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.8')
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('finish_revert_resize',
instance=instance_p, migration=migration_p,
reservations=reservations),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.13')
def get_console_output(self, ctxt, instance, tail_length):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('get_console_output',
instance=instance_p, tail_length=tail_length),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_console_pool_info(self, ctxt, console_type, host):
return self.call(ctxt, self.make_msg('get_console_pool_info',
console_type=console_type),
topic=_compute_topic(self.topic, ctxt, host, None))
def get_console_topic(self, ctxt, host):
return self.call(ctxt, self.make_msg('get_console_topic'),
topic=_compute_topic(self.topic, ctxt, host, None))
def get_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('get_diagnostics',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_vnc_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('get_vnc_console',
instance=instance_p, console_type=console_type),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_spice_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('get_spice_console',
instance=instance_p, console_type=console_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.24')
def validate_console_port(self, ctxt, instance, port, console_type):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('validate_console_port',
instance=instance_p, port=port, console_type=console_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.26')
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
return self.call(ctxt, self.make_msg('host_maintenance_mode',
host=host_param, mode=mode),
topic=_compute_topic(self.topic, ctxt, host, None))
def host_power_action(self, ctxt, action, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('host_power_action',
action=action), topic)
def inject_file(self, ctxt, instance, path, file_contents):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('inject_file',
instance=instance_p, path=path,
file_contents=file_contents),
topic=_compute_topic(self.topic, ctxt, None, instance))
def inject_network_info(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('inject_network_info',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('live_migration', instance=instance_p,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data),
topic=_compute_topic(self.topic, ctxt, host, None))
def pause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('pause_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt,
self.make_msg('post_live_migration_at_destination',
instance=instance_p, block_migration=block_migration),
_compute_topic(self.topic, ctxt, host, None))
def power_off_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('power_off_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def power_on_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('power_on_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('pre_live_migration',
instance=instance_p,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data),
_compute_topic(self.topic, ctxt, host, None),
version='2.21')
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('prep_resize',
instance=instance_p, instance_type=instance_type_p,
image=image, reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node),
_compute_topic(self.topic, ctxt, host, None),
version='2.20')
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
instance=instance_p,
block_device_info=block_device_info,
reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.23')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
recreate=recreate, on_shared_storage=on_shared_storage),
topic=_compute_topic(self.topic, ctxt, host, instance),
version='2.22')
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
_compute_topic(self.topic, ctxt, host, None))
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
aggregate_p = jsonutils.to_primitive(aggregate)
self.cast(ctxt, self.make_msg('remove_aggregate_host',
aggregate=aggregate_p, host=host_param,
slave_info=slave_info),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.15')
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('remove_fixed_ip_from_instance',
instance=instance_p, address=address),
topic=_compute_topic(self.topic, ctxt, None, instance))
def remove_volume_connection(self, ctxt, instance, volume_id, host):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('remove_volume_connection',
instance=instance_p, volume_id=volume_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def rescue_instance(self, ctxt, instance, rescue_password):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('rescue_instance',
instance=instance_p,
rescue_password=rescue_password),
topic=_compute_topic(self.topic, ctxt, None, instance))
def reset_network(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reset_network',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
topic = _compute_topic(self.topic, ctxt, None, instance)
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
instance_type_p = jsonutils.to_primitive(instance_type)
self.cast(ctxt, self.make_msg('resize_instance',
instance=instance_p, migration=migration_p,
image=image, reservations=reservations,
instance_type=instance_type_p), topic,
version='2.16')
def resume_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('resume_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
instance_p = jsonutils.to_primitive(instance)
migration_p = jsonutils.to_primitive(migration)
self.cast(ctxt, self.make_msg('revert_resize',
instance=instance_p, migration=migration_p,
reservations=reservations),
topic=_compute_topic(self.topic, ctxt, host, instance),
version='2.12')
def rollback_live_migration_at_destination(self, ctxt, instance, host):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('rollback_live_migration_at_destination',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, host, None))
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('run_instance', instance=instance_p,
request_spec=request_spec, filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files, admin_password=admin_password,
is_first_time=is_first_time, node=node),
topic=_compute_topic(self.topic, ctxt, host, None),
version='2.19')
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('set_admin_password',
instance=instance_p, new_pass=new_pass),
topic=_compute_topic(self.topic, ctxt, None, instance))
def set_host_enabled(self, ctxt, enabled, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('set_host_enabled',
enabled=enabled), topic)
def get_host_uptime(self, ctxt, host):
topic = _compute_topic(self.topic, ctxt, host, None)
return self.call(ctxt, self.make_msg('get_host_uptime'), topic)
def reserve_block_device_name(self, ctxt, instance, device, volume_id):
instance_p = jsonutils.to_primitive(instance)
return self.call(ctxt, self.make_msg('reserve_block_device_name',
instance=instance_p, device=device, volume_id=volume_id),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.3')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
backup_type=None, rotation=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('snapshot_instance',
instance=instance_p, image_id=image_id,
image_type=image_type, backup_type=backup_type,
rotation=rotation),
topic=_compute_topic(self.topic, ctxt, None, instance))
def start_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('start_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def stop_instance(self, ctxt, instance, cast=True):
rpc_method = self.cast if cast else self.call
instance_p = jsonutils.to_primitive(instance)
return rpc_method(ctxt, self.make_msg('stop_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def suspend_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('suspend_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('terminate_instance',
instance=instance_p, bdms=bdms_p,
reservations=reservations),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.27')
def unpause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('unpause_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def unrescue_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('unrescue_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def get_backdoor_port(self, ctxt, host):
return self.call(ctxt, self.make_msg('get_backdoor_port'),
topic=_compute_topic(self.topic, ctxt, host, None))
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
def soft_delete_instance(self, ctxt, instance, reservations=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('soft_delete_instance',
instance=instance_p, reservations=reservations),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='2.27')
def restore_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('restore_instance',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
def __init__(self):
super(SecurityGroupAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
def refresh_security_group_rules(self, ctxt, security_group_id, host):
self.cast(ctxt, self.make_msg('refresh_security_group_rules',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def refresh_security_group_members(self, ctxt, security_group_id,
host):
self.cast(ctxt, self.make_msg('refresh_security_group_members',
security_group_id=security_group_id),
topic=_compute_topic(self.topic, ctxt, host, None))
def refresh_instance_security_rules(self, ctxt, host, instance):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('refresh_instance_security_rules',
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, instance['host'],
instance))
| gspilio/nova | nova/compute/rpcapi.py | Python | apache-2.0 | 32,188 |
'''
logger_setup.py customizes the app's logging module. Each time an event is
logged the logger checks the level of the event (eg. debug, warning, info...).
If the event is above the approved threshold then it goes through. The handlers
do the same thing; they output to a file/shell if the event level is above their
threshold.
:Example:
>> from website import logger
>> logger.info('event', foo='bar')
**Levels**:
- logger.debug('For debugging purposes')
- logger.info('An event occured, for example a database update')
- logger.warning('Rare situation')
- logger.error('Something went wrong')
- logger.critical('Very very bad')
You can build a log incrementally as so:
>> log = logger.new(date='now')
>> log = log.bind(weather='rainy')
>> log.info('user logged in', user='John')
'''
import datetime as dt
import logging
from logging.handlers import RotatingFileHandler
import pytz
from flask import request, session
from structlog import wrap_logger
from structlog.processors import JSONRenderer
from app import app
# Set the logging level
app.logger.setLevel(app.config['LOG_LEVEL'])
# Remove the stdout handler
app.logger.removeHandler(app.logger.handlers[0])
TZ = pytz.timezone(app.config['TIMEZONE'])
def add_fields(_, level, event_dict):
''' Add custom fields to each record. '''
now = dt.datetime.now()
#event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['timestamp'] = TZ.localize(now, True).astimezone\
(pytz.timezone(app.config['TIMEZONE'])).strftime(app.config['TIME_FMT'])
event_dict['level'] = level
if request:
try:
#event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
event_dict['ip_address'] = request.headers.get('X-Forwarded-For', request.remote_addr)
#event_dict['ip_address'] = request.header.get('X-Real-IP')
except:
event_dict['ip_address'] = 'unknown'
return event_dict
# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
maxBytes=app.config['LOG_MAXBYTES'],
backupCount=app.config['LOG_BACKUPS'],
mode='a',
encoding='utf-8')
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
# Wrap the application logger with structlog to format the output
logger = wrap_logger(
app.logger,
processors=[
add_fields,
JSONRenderer(indent=None)
]
) | Kbman99/NetSecShare | app/logger_setup.py | Python | mit | 2,739 |
#!/usr/bin/env python
# to run the script with the correct version of uvcdat:
# source /usr/local/uvcdat/1.4.0/bin/setup_runtime.sh
import cdms2
from cdms2 import MV2
import numpy
import glob
import sys
import os
from os import path
import shutil
import re
import string
import random
import gc
import logging
import logging.handlers
# ____________________________
def usage():
textUsage='SYNOPSIS:\n\tmake_ensemble_Mean_tzyx.py -v VARIABLE -path PATHIN -outdir PATHOUT [-tmpdir TMPPATH] [keepTmp] \n\t-minVar MINVAL -maxVar MAXVAL\tn-model MODELLIST -startYear STARTYEAR -endYear ENDYEAR [-monthList MONTHLIST]\n\t[-regridFirst REGRIDBOOL] [-deleteGrid DELETEBOOL] -rcp RCP\n'
textUsage=textUsage+'\tVARIABLE: a netcdf CMIP5 variable name, such as tos, zos, so, thetao;\n'
textUsage=textUsage+'\tPATHIN: input data directory (does not support sub-directories);\n'
textUsage=textUsage+'\tPATHOUT: output directory, created if does not exist;\n'
textUsage=textUsage+'\tTMPPATH: temporary path. Default: a random pathname is defined at runtime, as a leaf of PATHOUT;\n'
textUsage=textUsage+'\tkeepTmp: do not remove temporary directories;\n'
textUsage=textUsage+'\tMINVAL: any value below minVar is considered as nodata;\n'
textUsage=textUsage+'\tMAXVAL: any value above maxVar is considered as nodata;\n'
textUsage=textUsage+'\tMODELLIST: a text file with a model name per name, the model name is used to select the files to process;\n'
textUsage=textUsage+'\tSTARTYEAR: first year in the series of dates to process;\n'
textUsage=textUsage+'\tENDYEAR: last year in the series of date to process;\n'
textUsage=textUsage+'\tMONTHLIST: a comma separated list of month, such as "1,2,3" or "1,6,12". Values range is [1, 12].\n'
textUsage=textUsage+'In first place, the programme will average model output per model (if a model output has several rXiYpZ ensemble, they are averaged. Then, the averages are averaged to produce the ensemble mean;\n'
textUsage=textUsage+'\tREGRIDBOOL\n'
textUsage=textUsage+'\tDELETEBOOL\n'
textUsage=textUsage+'\tRCP a string corresponding to the RCP string to match in filenames.\n'
textUsage=textUsage+'Averages are computed for each month of the year.\n'
return textUsage
# ____________________________
def exitMessage(msg, exitCode='1'):
thisLogger.critical(msg)
print msg
print
print usage()
sys.exit(exitCode)
# ___________________________
def boolConvert(code):
if code=='0':
return False
if code.lower()=='false':
return False
if code.lower()=='no':
return False
if code=='1':
return True
if code.lower()=='true':
return True
if code.lower()=='yes':
return True
# ____________________________
def decodeMonthList(parameter):
listMonth = [int(x) for x in parameter.strip().split(',')]
for ii in listMonth:
if ii<1 or ii>12:
print 'month defined in the month list must be in [1, 12]. Exit(100).'
sys.exit(100)
return listMonth
# ____________________________
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
#_____________________________
def flatten(foo):
for x in foo:
if hasattr(x, '__iter__'):
for y in flatten(x):
yield y
else:
yield x
# ____________________________
# dict{date:[filename]}
def agregateDict(refDict, newDict):
if refDict is None and newDict is None:
return None
# get list of all keys
if refDict is None:
return newDict
if len(refDict)==0:
return newDict
if newDict is None:
return refDict
if len(newDict)==0:
return refDict
keyList = sorted(set(refDict.keys() + newDict.keys()))
result={}
for ikey in keyList:
val = []
if ikey in refDict.keys(): val.append( refDict[ikey] )
if ikey in newDict.keys(): val.append( newDict[ikey] )
result[ikey] = [ x for x in flatten(val) ]
#del val
#gc.collect()
return result
# ____________________________
def make_levels():
values = [3.3, 10, 20, 30, 50, 75, 100, 125, 150, 200, 250, 300, 400, 500]
levelAxis = cdms2.createAxis( values )
bounds = [0]
for ii in xrange(len(values)-1):
bounds.append( 0.5*(values[ii] + values[ii+1]) )
bounds.append( values[-1] + 0.5 * (values[-1] + values[-2]) )
levelAxis.setBounds(numpy.array(bounds))
levelAxis.id='levels'
levelAxis.designateLevel(True)
levelAxis.units='meters'
return levelAxis
# ____________________________
def makeGrid(thisStep=0.5):
xstart=0
xend=360
xstep=thisStep
ystart=-85
yend=85
ystep=thisStep
lon_bnds=[]
lon=[]
for ii in numpy.arange(xstart, xend, xstep):
lon_bnds.append( [ii, ii + xstep] )
lon.append(ii+0.5*xstep)
lon_bnds=numpy.array(lon_bnds)
lon=numpy.array(lon)
lat_bnds=[]
lat=[]
for ii in numpy.arange(ystart, yend, ystep):
lat_bnds.append([ii, ii + ystep])
lat.append(ii+0.5*ystep)
lat_bnds=numpy.array(lat_bnds)
lat=numpy.array(lat)
latAxis = cdms2.createAxis(lat, lat_bnds)
latAxis.designateLatitude(True)
latAxis.units='degrees_north'
latAxis.id='latitude'
latAxis.long_name='Latitude'
lonAxis = cdms2.createAxis(lon, lon_bnds)
lonAxis.designateLongitude(True, xend)
lonAxis.designateCircular(xend)
lonAxis.units='degrees_east'
lonAxis.id='longitude'
lonAxis.long_name='Longitude'
return((cdms2.createGenericGrid(latAxis, lonAxis, lat_bnds, lon_bnds), latAxis, lonAxis, lat_bnds, lon_bnds))
# ____________________________
def do_cleanNodataLines(var, nodata):
oneSlice = numpy.squeeze(var[:,:,0])
refShape=oneSlice.shape
# where are the nodata vertical lines?
# 1./ transform the slice: 0=data, 1=nodata
test = numpy.zeros(oneSlice.shape)
wto1 = oneSlice >= nodata
if wto1.any():
test[wto1] = 1
else:
thisLogger.info('do_cleanNodataLines: no-data is missing from this dataset. Return.')
return var
# 2./ multiplications: if there are only nodata, results is 1
line = numpy.array(oneSlice[0, :]) # copy first line
for il in range(oneSlice.shape[1]):
line = line * oneSlice[il, :]
# 3./ do we have a 1 somewhere? It means that there was only nodata along the line
wone = line == 1
if wone.any():
thisLogger.info('do_cleanNodataLines: found {0} lines to correct.'.format(len(wone)))
else:
thisLogger.info('do_cleanNodataLines: found no line to correct.')
return var
# ____________________________
# auto mask based on the principle that the mask does not change in-between dates
def autoMask(var, nodata):
refshape = var.shape
if len(refshape)==3:
tmp = numpy.reshape(var, (refshape[0], refshape[1] * refshape[2]) )
elif len(refshape)==4:
tmp = numpy.reshape(var, (refshape[0], refshape[1] * refshape[2] * refshape[3]) )
wtnodata = (tmp.max(axis=0) - tmp.min(axis=0)) < 0.001
if wtnodata.any():
for ii in range(refshape[0]):
tmp[ii, wtnodata] = nodata
var[:] = numpy.reshape(tmp, refshape)
#del tmp, wtnodata
#gc.collect()
return var
# ____________________________
def updateCounters(accum, N, mini, maxi, data, minVar, maxVar, nodata=1.e20):
if data is None:
print 'no data passed!'
sys.exit()
return [accum, N, mini, maxi]
dim = numpy.squeeze(data[:]).shape
if accum is None:
accum = numpy.zeros(dim) + nodata
N = numpy.zeros(dim) + nodata
mini = data.copy()
maxi = data.copy()
wtadd = (data >= minVar ) * (data < maxVar) * (accum < nodata) # add where not nodata
wtreplace = (data >= minVar) * (data < maxVar) * (accum >= nodata) # replace if no data
wmax = (data >= maxi) * (data < nodata) * (data >= minVar) * (data < maxVar)
wmaxReplace = (mini >= nodata) * (data < nodata) * (data >= minVar)
wmin = (data <= mini) * (data >= minVar) * ( data < maxVar) * ( maxi < nodata )
wminReplace = (mini >= nodata) * (data < nodata) * (data >= minVar)
if wtadd.any():
accum[wtadd] = accum[wtadd] + data[wtadd]
N[wtadd] = N[wtadd] + 1 #numpy.ones(dim)
print '!!N is not null'
if wtreplace.any():
accum[wtreplace] = data[wtreplace]
N[wtreplace] = 1 #numpy.ones(dim)
print 'xx N is not null'
if wmax.any():
maxi[wmax] = data[wmax]
if wmin.any():
mini[wmin] = data[wmin]
if wmaxReplace.any():
maxi[wmaxReplace] = data[wmaxReplace]
if wminReplace.any():
mini[wminReplace] = data[wminReplace]
#del wtadd, wtreplace, wmax, wmaxReplace, wmin, wminReplace
#gc.collect()
if N is None:
print 'N is None'
return [accum, N, mini, maxi]
# ___________________________
def do_regrid(variable, lstInFile, outdir, stringBefore, yearStart, yearEnd, topLevel=0, bottomLevel=1000):
createdFiles=[]
nodata=1.e20
if lstInFile is None:
thisLogger.info( 'do_regrid: No file to process. Return' )
sys.exit()
return None
if len(lstInFile)==0:
print 'lstInFile'
thisLogger.info('do_regrid: Found no file to process, consider revising search pattern. Return.')
sys.exit()
return None
(newGrid, latAxis, lonAxis, lat_bnds, lon_bnds) = makeGrid()
for fileName in lstInFile:
thisLogger.info('Regriding file: {0}'.format(fileName))
thisFile = cdms2.open(fileName)
# to reduce output file size and memory use, collect start/end times according to internal file encoding
startTime = [t for t in thisFile[variable].getTime().asComponentTime() if (t.year==startYear)]
endTime = [t for t in thisFile[variable].getTime().asComponentTime() if (t.year==endYear)]
if len(startTime)==0 and len(endTime)==0: # this file does not contain useful data, next iteration
continue
if len(startTime)==0: # the first date is not in this file, process from the start
startTime = thisFile[variable].getTime().asComponentTime()
if len(endTime)==0: # the last date is not in this file, process up to the end
endTime = thisFile[variable].getTime().asComponentTime()
thisLogger.info('start time = {0}-{1:02}'.format(startTime[0].year, startTime[0].month) )
thisLogger.info('end time = {0}-{1:02}'.format(endTime[-1].year, endTime[-1].month))
if thisFile[variable].getLevel() is None:
# some files do not have nodata set to 1.e20 (EC-EARTH), some have masked values set to something else (0 and 1.e20, for MRI):
# let's process our mask by identifying unchanged values
tmp = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
data = autoMask(tmp, nodata)
#del tmp
gc.collect()
else:
verticalGrid = make_levels()
# print dir(verticalGrid)
# print verticalGrid.getBounds()
print verticalGrid.getBounds().min() , verticalGrid.getBounds().max()
topLevel = verticalGrid.getBounds().min()
bottomLevel = verticalGrid.getBounds().max()
if thisFile[variable].getMissing() is None:
tmp = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
data = autoMask(tmp, nodata)
#del tmp
#gc.collect()
else:
data = cdms2.createVariable(thisFile[variable].subRegion( time=(startTime[0], endTime[-1], 'cc'), level=(topLevel, bottomLevel,'cc') ))
mask = numpy.array(data) < nodata
if thisFile[variable].getLevel() is None:
regrided = data.regrid(newGrid, missing=nodata, order=thisFile[variable].getOrder(), mask=mask)
else:
tmp = data.regrid(newGrid, missing=nodata, order=thisFile[variable].getOrder(), mask=mask)
regrided = tmp.pressureRegrid( verticalGrid, method='linear')
regrided.id=variable
outfilename = '{0}/{1}{2}'.format(outdir, stringBefore, os.path.basename(fileName))
createdFiles.append(outfilename )
if os.path.exists(outfilename): os.remove(outfilename)
outfile = cdms2.open(outfilename, 'w')
outfile.write(regrided)
outfile.close()
thisFile.close()
#del mask, regrided
#gc.collect()
#del newGrid, latAxis, lonAxis, lat_bnds, lon_bnds
#gc.collect()
return createdFiles
# ___________________________
# for a list of files: open all files, go from date 1 to date 2, compute avg for thisdate, save thisdate
# if a new grid is passed: regrid
def do_stats(variable, validYearList, monthList, lstInFile, outdir, stringBefore, outnameBase, minVar=-1.e20, maxVar=1.e20, doSTD=False):
if validYearList is None:
exitMessage('List of years to process is undefined, edit code. Exit 5.',5)
createdFiles={}
nodata=1.e20
print 'in do_stats, variable={0}'.format(variable)
if lstInFile is None:
thisLogger.info('do_stats: No file to process. Return.')
return
if len(lstInFile)==0:
thisLogger.info('do_stats: Found no file to process, consider revising search pattern.')
return
# open all files
listFID=[]
if type(lstInFile)==type([]):
if len(lstInFile[0]) == 1: #the first element is a char, so there is only 1 file
ifile = ''.join(lstInFile)
thisLogger.debug('Case 2, lstInFile={0}'.format(ifile))
if not os.path.isfile(ifile):
exitMessage('File {0} not found. Exit 202'.format(lstInFile), 202)
listFID.append(cdms2.open(ifile, 'r'))
else:
for ifile in lstInFile:
thisLogger.debug('Case 1, ifile={0}'.format(ifile))
if not os.path.isfile(ifile):
exitMessage('File {0} not found. Exit 201.'.format(ifile), 201)
listFID.append(cdms2.open(ifile, 'r'))
else:
exitMessage('Unknown type for object lstInFile. Exit(200)',200)
# go through the list of dates, compute ensemble average
for iyear in validYearList:
thisLogger.info('Processing year {0}'.format(iyear))
for imonth in monthList:
accumVar=None
accumN=None
mini=None
maxi=None
refGrid=None
dims=None
units=None
for ifile in listFID:
print 'in do_stats, ifile loop,'
print 'ready to get data for time: ',iyear, imonth
if ifile[variable].getTime() is None: # no time reference
print 'no time found'
if refGrid is None:
print 'no refgrid'
refGrid = ifile[variable].getGrid()
# axis=ifile[variable].getAxisList(omit='time')
dims=numpy.squeeze(ifile[variable]).shape
[accumVar, accumN, mini, maxi] = updateCounters( accumVar, accumN, mini, maxi,
numpy.array(ifile[variable]).ravel(),
minVar, maxVar, nodata)
else: # we can do some time slice
print "GETTTING TIME COMPONENET for ",iyear, imonth
thisTime = [ii for ii in ifile[variable].getTime().asComponentTime() if (ii.year==iyear and ii.month==imonth)]
for ii in ifile[variable].getTime().asComponentTime():
print ii, ii.year , ii.month
if len(thisTime)==1: # it must be one
if refGrid is None:
refGrid = ifile[variable].getGrid()
dims = numpy.squeeze(ifile[variable].subRegion(time=thisTime[0])).shape
units= ifile[variable].units
[accumVar, accumN, mini, maxi]= updateCounters(accumVar, accumN, mini, maxi,
numpy.array( ifile[variable].subRegion(time=thisTime[0])).ravel(),
minVar, maxVar, nodata )
else:
print ifile[variable].getTime().asComponentTime()
print '================'
print monthList
print 'iyear',iyear, ' imonth',imonth, len(thisTime), thisTime.shape
for ii in ifile[variable].getTime().asComponentTime():
print '::',ii, ii.year, ii.month
print '________'
exitMessage('Found more that 1 date!!! Stop processing')
units= ifile[variable].units
# compute average
# it can happen that there is no data to process: if the input files for the current model has an ending date before the current date
# in this case, accumN is None: do not save stats, and do not add a file name in createdFiles
# compute average
if accumN is not None:
print 'accumN is not None'
wtdivide = (accumN < nodata) * (accumN > 0)
if wtdivide.any():
accumVar[wtdivide] = accumVar[wtdivide] / accumN[wtdivide]
# compute std
if doSTD:
thisLogger.info('Computing std: to be implemented')
# create and save variables
meanVar = cdms2.createVariable( accumVar.reshape(dims), typecode='f', id='mean_{0}'.format(variable), fill_value=nodata, attributes=dict(long_name='mean', units=units) )
meanVar.setGrid(refGrid)
counter = cdms2.createVariable(accumN.reshape(dims), typecode='i', id='count', fill_value=nodata, attributes=dict(long_name='count', units='None') )
counter.setGrid(refGrid)
miniVar = cdms2.createVariable(mini.reshape(dims), typecode='f', id='minimum', fill_value=nodata, attributes=dict(long_name='minimum', units=units) )
miniVar.setGrid(refGrid)
maxiVar = cdms2.createVariable(maxi.reshape(dims), typecode='f', id='maximum', fill_value=nodata, attributes=dict(long_name='maximum', units=units) )
maxiVar.setGrid(refGrid)
outfilename = '{0}/{1}_{2}_{3}{4:02}.nc'.format(outdir, stringBefore, outnameBase, iyear, imonth )
if os.path.exists(outfilename): os.remove(outfilename)
thisLogger.debug('Saving stats to file {0}'.format(outfilename))
outfile = cdms2.open(outfilename, 'w')
outfile.write(meanVar)
outfile.write(counter)
outfile.write(miniVar)
outfile.write(maxiVar)
outfile.close()
createdFiles['{0}{1:02}'.format(iyear,imonth)] = outfilename
print 'in do_stats, variable={0}, adding in createdFiles {1}'.format(variable, outfilename)
#del wtdivide
#gc.collect()
else:
print 'accumN is None, nothing to do'
sys.exit()
#del accumVar, mini, maxi, accumN
#gc.collect()
# close input files
for ii in listFID: ii.close()
print 'in do_stats, variable={0}, returning with'.format(variable)
print createdFiles
return(createdFiles)
#___________________________
if __name__=="__main__":
variable = None
indir = None
tmpdir = None
outdir = None
modelListFile=None
startYear=None
endYear=None
monthList=range(1,13)
regridFirst = True
deleteRegrid = False
modelStat = True
rcp=None
logFile='{0}.log'.format(__file__)
minVar=-1.e20
maxVar=1.e20
topLevel=0
bottomLevel=300
deleteTmp=True
ii = 1
while ii < len(sys.argv):
arg = sys.argv[ii].lower()
if arg == '-path':
ii = ii + 1
indir = sys.argv[ii]
elif arg == '-outdir':
ii = ii + 1
outdir = sys.argv[ii]
elif arg == '-tmpdir':
ii = ii + 1
tmpdir = sys.argv[ii]
elif arg == '-keeptmp':
deleteTmp=False
elif arg == '-v':
ii = ii + 1
variable = sys.argv[ii]
elif arg=='-minVar':
ii = ii + 1
minVar = float(sys.argv[ii])
elif arg == '-maxVar':
ii = ii + 1
maxVar = float(sys.argv[ii])
elif arg =='-modellist':
ii = ii + 1
modelListFile = sys.argv[ii]
elif arg=='-startyear':
ii = ii + 1
startYear = int(sys.argv[ii])
elif arg=='-endyear':
ii = ii + 1
endYear = int(sys.argv[ii]) + 1
elif arg=='-monthlist':
ii = ii + 1
monthList=decodeMonthList(sys.argv[ii])
elif arg=='-regridfirst':
ii=ii+1
regridFirst=boolConvert(sys.argv[ii])
elif arg=='-deleteregrid':
ii = ii + 1
deleteRegrid = boolConvert(sys.argv[ii])
elif arg=='-rcp':
ii=ii+1
rcp=sys.argv[ii]
elif arg=='-log':
ii = ii + 1
logFile = sys.argv[ii]
ii = ii + 1
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
thisLogger = logging.getLogger('MyLogger')
thisLogger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(logFile, maxBytes=1024*500, backupCount=5)
thisLogger.addHandler(handler)
if variable is None:
exitMessage('Missing variable name, use option -v. Exit(1).', 1)
if indir is None:
exitMessage('Missing input directory, use option -path. Exit(2).',2)
if outdir is None:
exitMessage('Missing output directory, use option -outdir. Exit(3).', 3)
if modelListFile is None:
exitMessage('Missing a model list file, use option -modellist. Exit(12).',12)
if startYear is None:
exitMessage('Please define a starting year, use option -startyear. Exit(13).',13)
if endYear is None:
exitMessage('Please define an ending year, use option -endyear. Exit(14).',14)
if rcp is None:
exitMessage('Please define an rcp, use option -rcp. Exit(15).',15)
if tmpdir is None:
tmpdir = '{0}/tmp_{1}'.format(outdir, id_generator() )
if not os.path.exists(outdir): os.makedirs(outdir)
if not os.path.exists(tmpdir): os.makedirs(tmpdir)
# for netcdf3: set flag to 0
cdms2.setNetcdfShuffleFlag(1)
cdms2.setNetcdfDeflateFlag(1)
cdms2.setNetcdfDeflateLevelFlag(3)
# models list
modelList=[]
try:
with open(modelListFile,"r") as f:
for textLine in f:
thisStr = textLine.replace(" ","").replace('\n','')
if not (thisStr==""):
modelList.append( thisStr )
except IOError as e:
exitMessage('I/O Error {1} while processing text file {0}:{2}. Exit(10).'.format(modelListFile, e.errno, e.strerror), 10)
except:
exitMessage('Unexpected error while processing text file {0}. Exit(11).'.format(modeListFile), 11)
validYearList=range(startYear, endYear)
if len(validYearList)==0:
exitMessage('No date to process, startYear={0}, endYear{1}. Exit(20).'.format(startYear, endYear),20)
processedFiles=None
for thisModel in modelList:
thisLogger.info('Model {0}'.format(thisModel))
pattern=re.compile('{0}_{1}_{2}_{3}_{4}_{5}.nc'.format(variable, 'Omon', thisModel, rcp, 'r.*i.*p.*', '.*') )
print 'indir: {0}'.format(indir)
print 'expression {0}_{1}_{2}_{3}_{4}_{5}.nc'.format(variable, 'Omon', thisModel, rcp, 'r.*i.*p.*', '.*')
lstInFile=[f for f in glob.glob('{0}/*.nc'.format(indir)) if (os.stat(f).st_size and pattern.match(os.path.basename(f) ) ) ]
if regridFirst:
print 'Calling do_regrid with lstInFile=', lstInFile
regridedFiles = do_regrid(variable, lstInFile, tmpdir, 'regrid_', startYear, endYear, topLevel, bottomLevel)
else:
regridedFiles = lstInFile
thisModelFiles = do_stats(variable, validYearList, monthList, regridedFiles, tmpdir, 'stats', '{0}_{1}_{2}'.format(variable,thisModel, rcp), minVar, maxVar )
print 'accumulating thisModelFiles ',thisModelFiles
if deleteRegrid:
for ii in regridedFiles: os.remove(ii)
processedFiles = agregateDict(processedFiles, thisModelFiles)
gc.collect()
if len(modelList)==1:
thisLogger.info('>>> 1 model in input: job finished after first averaging round.')
elif len(processedFiles)==0:
thisLogger.info('>>>> no data to process')
else:
thisLogger.info( '>> Averaging models averages, for each date')
for idate in processedFiles: # iteration over keys
thisYear = int(idate[0:4])
thisMonth= int(idate[4:6])
thisLogger.info('>> Averaging date {0}'.format(idate))
listFiles = [x for x in flatten(processedFiles[idate])]
thisLogger.info('>> averaging files '.format(listFiles))
returnedList = do_stats('mean_{0}'.format(variable), [thisYear], [thisMonth], listFiles, outdir, 'ensemble', '{0}_{1}'.format(variable, rcp) , minVar, maxVar)
gc.collect()
# delete tmpdir
if deleteTmp:
shutil.rmtree(tmpdir)
# end of file
| BrunoCombal/climate | make_ensembleMean_tzyx.py | Python | gpl-2.0 | 26,188 |
# -*- coding: utf-8 -*-
#
# Copyright 2015 VNG Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from hdfs import InsecureClient
from hdfs.ext.kerberos import KerberosClient
from nose.plugins.attrib import attr
from helpers import with_config
from webhdfs_minicluster import WebHdfsMiniClusterTestCase
from contrib.hdfs_test import HdfsTargetTestMixin
from luigi.contrib.hdfs import WebHdfsClient
@attr('minicluster')
class WebHdfsTargetTest(WebHdfsMiniClusterTestCase, HdfsTargetTestMixin):
def run(self, result=None):
conf = {'hdfs': {'client': 'webhdfs'},
'webhdfs': {'port': str(self.cluster.webhdfs_port)},
}
with_config(conf)(super(WebHdfsTargetTest, self).run)(result)
def test_actually_using_webhdfs(self):
self.assertTrue(isinstance(self.create_target().fs, WebHdfsClient))
# Here is a bunch of tests that are currently failing. As should be
# mentioned in the WebHdfsClient docs, it is not yet feature complete.
test_slow_exists = None
test_glob_exists = None
test_with_close = None
test_with_exception = None
# This one fails when run together with the whole test suite
test_write_cleanup_no_close = None
@attr('apache')
class TestWebHdfsClient(unittest.TestCase):
@with_config({'webhdfs': {'client_type': 'insecure'}})
def test_insecure_client_type(self):
client = WebHdfsClient(host='localhost').client
self.assertIsInstance(client, InsecureClient)
@with_config({'webhdfs': {'client_type': 'kerberos'}})
def test_kerberos_client_type(self):
client = WebHdfsClient(host='localhost').client
self.assertIsInstance(client, KerberosClient)
| rayrrr/luigi | test/contrib/hdfs/webhdfs_client_test.py | Python | apache-2.0 | 2,227 |
"""
Tar archive parser.
Author: Victor Stinner
"""
from resources.lib.externals.hachoir.hachoir_parser import Parser
from resources.lib.externals.hachoir.hachoir_core.field import (FieldSet,
Enum, UInt8, SubFile, String, NullBytes)
from resources.lib.externals.hachoir.hachoir_core.tools import humanFilesize, paddingSize, timestampUNIX
from resources.lib.externals.hachoir.hachoir_core.endian import BIG_ENDIAN
import re
class FileEntry(FieldSet):
type_name = {
# 48 is "0", 49 is "1", ...
0: u"Normal disk file (old format)",
48: u"Normal disk file",
49: u"Link to previously dumped file",
50: u"Symbolic link",
51: u"Character special file",
52: u"Block special file",
53: u"Directory",
54: u"FIFO special file",
55: u"Contiguous file"
}
def getOctal(self, name):
return self.octal2int(self[name].value)
def getDatetime(self):
"""
Create modification date as Unicode string, may raise ValueError.
"""
timestamp = self.getOctal("mtime")
return timestampUNIX(timestamp)
def createFields(self):
yield String(self, "name", 100, "Name", strip="\0", charset="ISO-8859-1")
yield String(self, "mode", 8, "Mode", strip=" \0", charset="ASCII")
yield String(self, "uid", 8, "User ID", strip=" \0", charset="ASCII")
yield String(self, "gid", 8, "Group ID", strip=" \0", charset="ASCII")
yield String(self, "size", 12, "Size", strip=" \0", charset="ASCII")
yield String(self, "mtime", 12, "Modification time", strip=" \0", charset="ASCII")
yield String(self, "check_sum", 8, "Check sum", strip=" \0", charset="ASCII")
yield Enum(UInt8(self, "type", "Type"), self.type_name)
yield String(self, "lname", 100, "Link name", strip=" \0", charset="ISO-8859-1")
yield String(self, "magic", 8, "Magic", strip=" \0", charset="ASCII")
yield String(self, "uname", 32, "User name", strip=" \0", charset="ISO-8859-1")
yield String(self, "gname", 32, "Group name", strip=" \0", charset="ISO-8859-1")
yield String(self, "devmajor", 8, "Dev major", strip=" \0", charset="ASCII")
yield String(self, "devminor", 8, "Dev minor", strip=" \0", charset="ASCII")
yield NullBytes(self, "padding", 167, "Padding (zero)")
filesize = self.getOctal("size")
if filesize:
yield SubFile(self, "content", filesize, filename=self["name"].value)
size = paddingSize(self.current_size//8, 512)
if size:
yield NullBytes(self, "padding_end", size, "Padding (512 align)")
def convertOctal(self, chunk):
return self.octal2int(chunk.value)
def isEmpty(self):
return self["name"].value == ""
def octal2int(self, text):
try:
return int(text, 8)
except ValueError:
return 0
def createDescription(self):
if self.isEmpty():
desc = "(terminator, empty header)"
else:
filename = self["name"].value
filesize = humanFilesize(self.getOctal("size"))
desc = "(%s: %s, %s)" % \
(filename, self["type"].display, filesize)
return "Tar File " + desc
class TarFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "tar",
"category": "archive",
"file_ext": ("tar",),
"mime": (u"application/x-tar", u"application/x-gtar"),
"min_size": 512*8,
"magic": (("ustar \0", 257*8),),
"subfile": "skip",
"description": "TAR archive",
}
_sign = re.compile("ustar *\0|[ \0]*$")
def validate(self):
if not self._sign.match(self.stream.readBytes(257*8, 8)):
return "Invalid magic number"
if self[0].name == "terminator":
return "Don't contain any file"
try:
int(self["file[0]/uid"].value, 8)
int(self["file[0]/gid"].value, 8)
int(self["file[0]/size"].value, 8)
except ValueError:
return "Invalid file size"
return True
def createFields(self):
while not self.eof:
field = FileEntry(self, "file[]")
if field.isEmpty():
yield NullBytes(self, "terminator", 512)
break
yield field
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
def createContentSize(self):
return self["terminator"].address + self["terminator"].size
| azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/externals/hachoir/hachoir_parser/archive/tar.py | Python | gpl-2.0 | 4,571 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetGroup
# Returns a list of users that are in the specified group.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetGroup(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetGroup Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetGroup, self).__init__(temboo_session, '/Library/Amazon/IAM/GetGroup')
def new_input_set(self):
return GetGroupInputSet()
def _make_result_set(self, result, path):
return GetGroupResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetGroupChoreographyExecution(session, exec_id, path)
class GetGroupInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetGroup
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(GetGroupInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(GetGroupInputSet, self)._set_input('AWSSecretKeyId', value)
def set_GroupName(self, value):
"""
Set the value of the GroupName input for this Choreo. ((required, string) The name of the group to return.)
"""
super(GetGroupInputSet, self)._set_input('GroupName', value)
def set_Marker(self, value):
"""
Set the value of the Marker input for this Choreo. ((optional, string) Used for pagination to indicate the starting point of the results to return.)
"""
super(GetGroupInputSet, self)._set_input('Marker', value)
def set_MaxItems(self, value):
"""
Set the value of the MaxItems input for this Choreo. ((optional, integer) Used for pagination to limit the number of results returned. Defaults to 100.)
"""
super(GetGroupInputSet, self)._set_input('MaxItems', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(GetGroupInputSet, self)._set_input('ResponseFormat', value)
class GetGroupResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetGroup Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class GetGroupChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetGroupResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Amazon/IAM/GetGroup.py | Python | apache-2.0 | 4,279 |
# -*- coding: utf-8 -*-
"""Queue management implementation for Plaso.
This file contains an implementation of a queue used by plaso for
queue management.
The queue has been abstracted in order to provide support for different
implementations of the queueing mechanism, to support multi processing and
scalability.
"""
import abc
class QueueAbort(object):
"""Class that implements a queue abort."""
class Queue(object):
"""Class that implements the queue interface."""
@abc.abstractmethod
def IsEmpty(self):
"""Determines if the queue is empty."""
@abc.abstractmethod
def PushItem(self, item, block=True):
"""Pushes an item onto the queue.
Args:
item (object): item to add.
block (bool): whether to block if the queue is full.
Raises:
QueueFull: if the queue is full, and the item could not be added.
"""
@abc.abstractmethod
def PopItem(self):
"""Pops an item off the queue.
Raises:
QueueEmpty: when the queue is empty.
"""
@abc.abstractmethod
def Close(self, abort=False):
"""Closes the queue.
Args:
abort (Optional[bool]): whether the Close is the result of an abort
condition. If True, queue contents may be lost.
"""
@abc.abstractmethod
def Open(self):
"""Opens the queue, ready to enqueue or dequeue items."""
| log2timeline/plaso | plaso/multi_process/plaso_queue.py | Python | apache-2.0 | 1,342 |
#!/usr/bin/python3
from pyrob.api import *
@task
def task_8_22():
while(wall_is_above() != True):
move_up()
if (wall_is_on_the_right() != True):
while(wall_is_on_the_right() != True):
move_right()
else:
while(wall_is_on_the_left() != True):
move_left()
if __name__ == '__main__':
run_tasks() | lesina/labs2016 | Laba04/task_16.py | Python | gpl-3.0 | 360 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt
from frappe import _
from frappe.model.document import Document
from operator import itemgetter
class BOM(Document):
def autoname(self):
last_name = frappe.db.sql("""select max(name) from `tabBOM`
where name like "BOM/%s/%%" """ % frappe.db.escape(self.item))
if last_name:
idx = cint(cstr(last_name[0][0]).split('/')[-1].split('-')[0]) + 1
else:
idx = 1
self.name = 'BOM/' + self.item + ('/%.3i' % idx)
def validate(self):
self.clear_operations()
self.validate_main_item()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "qty", "BOM Item")
self.validate_materials()
self.set_bom_material_details()
self.calculate_cost()
self.validate_operations()
def on_update(self):
self.check_recursion()
self.update_exploded_items()
def on_submit(self):
self.manage_default_bom()
def on_cancel(self):
frappe.db.set(self, "is_active", 0)
frappe.db.set(self, "is_default", 0)
# check if used in any other bom
self.validate_bom_links()
self.manage_default_bom()
def on_update_after_submit(self):
self.validate_bom_links()
self.manage_default_bom()
def get_item_det(self, item_code):
item = frappe.db.sql("""select name, item_name, is_asset_item, is_purchase_item,
docstatus, description, image, is_sub_contracted_item, stock_uom, default_bom,
last_purchase_rate
from `tabItem` where name=%s""", item_code, as_dict = 1)
if not item:
frappe.throw(_("Item: {0} does not exist in the system").format(item_code))
return item
def validate_rm_item(self, item):
if item[0]['name'] == self.item:
frappe.throw(_("Raw material cannot be same as main Item"))
def set_bom_material_details(self):
for item in self.get("items"):
ret = self.get_bom_material_detail({"item_code": item.item_code, "item_name": item.item_name, "bom_no": item.bom_no,
"qty": item.qty})
for r in ret:
if not item.get(r):
item.set(r, ret[r])
def get_bom_material_detail(self, args=None):
""" Get raw material details like uom, desc and rate"""
if not args:
args = frappe.form_dict.get('args')
if isinstance(args, basestring):
import json
args = json.loads(args)
item = self.get_item_det(args['item_code'])
self.validate_rm_item(item)
args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or ''
args.update(item[0])
rate = self.get_rm_rate(args)
ret_item = {
'item_name' : item and args['item_name'] or '',
'description' : item and args['description'] or '',
'image' : item and args['image'] or '',
'stock_uom' : item and args['stock_uom'] or '',
'bom_no' : args['bom_no'],
'rate' : rate
}
return ret_item
def get_rm_rate(self, arg):
""" Get raw material rate as per selected method, if bom exists takes bom cost """
rate = 0
if arg['bom_no']:
rate = self.get_bom_unitcost(arg['bom_no'])
elif arg and (arg['is_purchase_item'] == 1 or arg['is_sub_contracted_item'] == 1):
if self.rm_cost_as_per == 'Valuation Rate':
rate = self.get_valuation_rate(arg)
elif self.rm_cost_as_per == 'Last Purchase Rate':
rate = arg['last_purchase_rate']
elif self.rm_cost_as_per == "Price List":
if not self.buying_price_list:
frappe.throw(_("Please select Price List"))
rate = frappe.db.get_value("Item Price", {"price_list": self.buying_price_list,
"item_code": arg["item_code"]}, "price_list_rate") or 0
return rate
def update_cost(self):
if self.docstatus == 2:
return
items_rate = frappe._dict()
for d in self.get("items"):
rate = self.get_bom_material_detail({'item_code': d.item_code, 'bom_no': d.bom_no,
'qty': d.qty})["rate"]
if rate:
d.rate = rate
items_rate.setdefault(d.item_code, d.rate)
for e in self.get("exploded_items"):
if items_rate.get(e.item_code):
e.rate = items_rate.get(e.item_code)
if self.docstatus == 1:
self.flags.ignore_validate_update_after_submit = True
self.calculate_cost()
self.save()
frappe.msgprint(_("Cost Updated"))
def get_bom_unitcost(self, bom_no):
bom = frappe.db.sql("""select name, total_cost/quantity as unit_cost from `tabBOM`
where is_active = 1 and name = %s""", bom_no, as_dict=1)
return bom and bom[0]['unit_cost'] or 0
def get_valuation_rate(self, args):
""" Get weighted average of valuation rate from all warehouses """
total_qty, total_value, valuation_rate = 0.0, 0.0, 0.0
for d in frappe.db.sql("""select actual_qty, stock_value from `tabBin`
where item_code=%s""", args['item_code'], as_dict=1):
total_qty += flt(d.actual_qty)
total_value += flt(d.stock_value)
if total_qty:
valuation_rate = total_value / total_qty
if valuation_rate <= 0:
last_valuation_rate = frappe.db.sql("""select valuation_rate
from `tabStock Ledger Entry`
where item_code = %s and valuation_rate > 0
order by posting_date desc, posting_time desc, name desc limit 1""", args['item_code'])
valuation_rate = flt(last_valuation_rate[0][0]) if last_valuation_rate else 0
return valuation_rate
def manage_default_bom(self):
""" Uncheck others if current one is selected as default,
update default bom in item master
"""
if self.is_default and self.is_active:
from frappe.model.utils import set_default
set_default(self, "item")
item = frappe.get_doc("Item", self.item)
if item.default_bom != self.name:
item.default_bom = self.name
item.save()
else:
frappe.db.set(self, "is_default", 0)
item = frappe.get_doc("Item", self.item)
if item.default_bom == self.name:
item.default_bom = None
item.save()
def clear_operations(self):
if not self.with_operations:
self.set('operations', [])
def validate_main_item(self):
""" Validate main FG item"""
item = self.get_item_det(self.item)
if not item:
frappe.throw(_("Item {0} does not exist in the system or has expired").format(self.item))
else:
ret = frappe.db.get_value("Item", self.item, ["description", "stock_uom", "item_name"])
self.description = ret[0]
self.uom = ret[1]
self.item_name= ret[2]
def validate_materials(self):
""" Validate raw material entries """
if not self.get('items'):
frappe.throw(_("Raw Materials cannot be blank."))
check_list = []
for m in self.get('items'):
if m.bom_no:
validate_bom_no(m.item_code, m.bom_no)
if flt(m.qty) <= 0:
frappe.throw(_("Quantity required for Item {0} in row {1}").format(m.item_code, m.idx))
check_list.append(cstr(m.item_code))
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list):
frappe.throw(_("Same item has been entered multiple times."))
def check_recursion(self):
""" Check whether recursion occurs in any bom"""
check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']]
for d in check_list:
bom_list, count = [self.name], 0
while (len(bom_list) > count ):
boms = frappe.db.sql(" select %s from `tabBOM Item` where %s = %s " %
(d[0], d[1], '%s'), cstr(bom_list[count]))
count = count + 1
for b in boms:
if b[0] == self.name:
frappe.throw(_("BOM recursion: {0} cannot be parent or child of {2}").format(b[0], self.name))
if b[0]:
bom_list.append(b[0])
def update_cost_and_exploded_items(self, bom_list=[]):
bom_list = self.traverse_tree(bom_list)
for bom in bom_list:
bom_obj = frappe.get_doc("BOM", bom)
bom_obj.on_update()
return bom_list
def traverse_tree(self, bom_list=[]):
def _get_children(bom_no):
return [cstr(d[0]) for d in frappe.db.sql("""select bom_no from `tabBOM Item`
where parent = %s and ifnull(bom_no, '') != ''""", bom_no)]
count = 0
if self.name not in bom_list:
bom_list.append(self.name)
while(count < len(bom_list)):
for child_bom in _get_children(bom_list[count]):
if child_bom not in bom_list:
bom_list.append(child_bom)
count += 1
bom_list.reverse()
return bom_list
def calculate_cost(self):
"""Calculate bom totals"""
self.calculate_op_cost()
self.calculate_rm_cost()
self.total_cost = self.operating_cost + self.raw_material_cost
def calculate_op_cost(self):
"""Update workstation rate and calculates totals"""
self.operating_cost = 0
for d in self.get('operations'):
if d.workstation:
if not d.hour_rate:
d.hour_rate = flt(frappe.db.get_value("Workstation", d.workstation, "hour_rate"))
if d.hour_rate and d.time_in_mins:
d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0
self.operating_cost += flt(d.operating_cost)
def calculate_rm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_rm_cost = 0
for d in self.get('items'):
if d.bom_no:
d.rate = self.get_bom_unitcost(d.bom_no)
d.amount = flt(d.rate, self.precision("rate", d)) * flt(d.qty, self.precision("qty", d))
d.qty_consumed_per_unit = flt(d.qty, self.precision("qty", d)) / flt(self.quantity, self.precision("quantity"))
total_rm_cost += d.amount
self.raw_material_cost = total_rm_cost
def update_exploded_items(self):
""" Update Flat BOM, following will be correct data"""
self.get_exploded_items()
self.add_exploded_items()
def get_exploded_items(self):
""" Get all raw materials including items from child bom"""
self.cur_exploded_items = {}
for d in self.get('items'):
if d.bom_no:
self.get_child_exploded_items(d.bom_no, d.qty)
else:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d.item_code,
'item_name' : d.item_name,
'description' : d.description,
'image' : d.image,
'stock_uom' : d.stock_uom,
'qty' : flt(d.qty),
'rate' : flt(d.rate),
}))
def add_to_cur_exploded_items(self, args):
if self.cur_exploded_items.get(args.item_code):
self.cur_exploded_items[args.item_code]["qty"] += args.qty
else:
self.cur_exploded_items[args.item_code] = args
def get_child_exploded_items(self, bom_no, qty):
""" Add all items from Flat BOM of child BOM"""
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
child_fb_items = frappe.db.sql("""select bom_item.item_code, bom_item.item_name, bom_item.description,
bom_item.stock_uom, bom_item.qty, bom_item.rate,
bom_item.qty / ifnull(bom.quantity, 1) as qty_consumed_per_unit
from `tabBOM Explosion Item` bom_item, tabBOM bom
where bom_item.parent = bom.name and bom.name = %s and bom.docstatus = 1""", bom_no, as_dict = 1)
for d in child_fb_items:
self.add_to_cur_exploded_items(frappe._dict({
'item_code' : d['item_code'],
'item_name' : d['item_name'],
'description' : d['description'],
'stock_uom' : d['stock_uom'],
'qty' : d['qty_consumed_per_unit']*qty,
'rate' : flt(d['rate']),
}))
def add_exploded_items(self):
"Add items to Flat BOM table"
frappe.db.sql("""delete from `tabBOM Explosion Item` where parent=%s""", self.name)
self.set('exploded_items', [])
for d in sorted(self.cur_exploded_items, key=itemgetter(0)):
ch = self.append('exploded_items', {})
for i in self.cur_exploded_items[d].keys():
ch.set(i, self.cur_exploded_items[d][i])
ch.amount = flt(ch.qty) * flt(ch.rate)
ch.qty_consumed_per_unit = flt(ch.qty) / flt(self.quantity)
ch.docstatus = self.docstatus
ch.db_insert()
def validate_bom_links(self):
if not self.is_active:
act_pbom = frappe.db.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item
where bom_item.bom_no = %s and bom_item.docstatus = 1
and exists (select * from `tabBOM` where name = bom_item.parent
and docstatus = 1 and is_active = 1)""", self.name)
if act_pbom and act_pbom[0][0]:
frappe.throw(_("Cannot deactivate or cancel BOM as it is linked with other BOMs"))
def validate_operations(self):
if self.with_operations and not self.get('operations'):
frappe.throw(_("Operations cannot be left blank."))
def get_bom_items_as_dict(bom, company, qty=1, fetch_exploded=1):
item_dict = {}
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
query = """select
bom_item.item_code,
item.item_name,
sum(bom_item.qty/ifnull(bom.quantity, 1)) * %(qty)s as qty,
item.description,
item.image,
item.stock_uom,
item.default_warehouse,
item.expense_account as expense_account,
item.buying_cost_center as cost_center
from
`tab{table}` bom_item, `tabBOM` bom, `tabItem` item
where
bom_item.parent = bom.name
and bom_item.docstatus < 2
and bom_item.parent = %(bom)s
and item.name = bom_item.item_code
and is_stock_item = 1
{conditions}
group by item_code, stock_uom"""
if fetch_exploded:
query = query.format(table="BOM Explosion Item",
conditions="""and item.is_sub_contracted_item = 0""")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
else:
query = query.format(table="BOM Item", conditions="")
items = frappe.db.sql(query, { "qty": qty, "bom": bom }, as_dict=True)
# make unique
for item in items:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = item
for item, item_details in item_dict.items():
for d in [["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "default_warehouse", ""]]:
company_in_record = frappe.db.get_value(d[0], item_details.get(d[1]), "company")
if not item_details.get(d[1]) or (company_in_record and company != company_in_record):
item_dict[item][d[1]] = frappe.db.get_value("Company", company, d[2]) if d[2] else None
return item_dict
@frappe.whitelist()
def get_bom_items(bom, company, qty=1, fetch_exploded=1):
items = get_bom_items_as_dict(bom, company, qty, fetch_exploded).values()
items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1)
return items
def validate_bom_no(item, bom_no):
"""Validate BOM No of sub-contracted items"""
bom = frappe.get_doc("BOM", bom_no)
if not bom.is_active:
frappe.throw(_("BOM {0} must be active").format(bom_no))
if bom.docstatus != 1:
if not getattr(frappe.flags, "in_test", False):
frappe.throw(_("BOM {0} must be submitted").format(bom_no))
if item and not (bom.item.lower() == item.lower() or \
bom.item.lower() == cstr(frappe.db.get_value("Item", item, "variant_of")).lower()):
frappe.throw(_("BOM {0} does not belong to Item {1}").format(bom_no, item))
| indictranstech/trufil-erpnext | erpnext/manufacturing/doctype/bom/bom.py | Python | agpl-3.0 | 14,720 |
from django.contrib import admin
from school_year_manager.models import SchoolYear, Semester
class SchoolYearAdmin(admin.ModelAdmin):
model = SchoolYear
ordering = ('start_year', )
list_display = ('__unicode__', 'start_year', 'end_year', 'is_active', )
list_filter = ('is_active', )
search_fields = ('start_year', 'end_year', )
class SemesterAdmin(admin.ModelAdmin):
model = Semester
ordering = ('school_year', )
list_display = ('__unicode__', 'semester', 'school_year', 'get_student_count', )
list_filter = ('semester', 'school_year__start_year', )
search_fields = ('semester', )
filter_horizontal = ('students', )
admin.site.register(SchoolYear, SchoolYearAdmin)
admin.site.register(Semester, SemesterAdmin)
| njncalub/pct-infosys | school_year_manager/admin.py | Python | mit | 812 |
import h5py
import numpy as np
#
#
file = h5py.File('FakeDim_remove.h5','w')
#
# Create dataset under the Root group.
#
comp_type = np.dtype([('Orbit', 'i'), ('Temperature', 'f8')])
dataset = file.create_dataset("DSC",(2,), maxshape=(None,),chunks=(2,),dtype=comp_type)
data = np.array([(1153, 53.23), (1184, 55.12)], dtype = comp_type)
dataset[...] = data
comp_type1 = np.dtype([('Pressure', 'f8'), ('index', 'i')])
dataset = file.create_dataset("dummy",(3,), maxshape=(None,),chunks=(3,),dtype=comp_type1)
data = np.array([(999.0, 1), (997.0, 2),(996.0,3)], dtype = comp_type1)
dataset[...] = data
atomic_array = np.arange(4)
dataset = file.create_dataset("atomic",(4,),maxshape=(None,),chunks=(4,),data=atomic_array,dtype='i2')
dataset_dim = file.create_dataset("dima",(4,),maxshape=(None,),chunks=(4,),data=atomic_array,dtype='i2')
file["dima"].dims.create_scale(file["dima"])
file["atomic"].dims[0].attach_scale(file["dima"])
#
# Close the file before exiting
#
file.close()
| OPENDAP/hdf5_handler | data/src/compound_unlimited.py | Python | lgpl-2.1 | 983 |
from django.conf.urls import patterns
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^', include('app1.urls', namespace='app1')),
url(r'^admin/', include(admin.site.urls)),
)
| arthuralvim/django-reversion-example | rev/urls.py | Python | mit | 274 |
from malcolm.yamlutil import check_yaml_names, make_block_creator
pva_server_block = make_block_creator(__file__, "pva_server_block.yaml")
pva_client_block = make_block_creator(__file__, "pva_client_block.yaml")
__all__ = check_yaml_names(globals())
| dls-controls/pymalcolm | malcolm/modules/pva/blocks/__init__.py | Python | apache-2.0 | 252 |
# -*- coding: utf-8 -*-
# CREATED ON DATE: 06.05.15
__author__ = 'mail@pythonic.ninija' | PythonicNinja/UrlShortener | url_shortener/shorter/management/commands/__init__.py | Python | bsd-3-clause | 87 |
from django.test import SimpleTestCase, override_settings
from django.test.utils import require_jinja2
@override_settings(ROOT_URLCONF='shortcuts.urls')
class RenderTests(SimpleTestCase):
def test_render(self):
response = self.client.get('/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertFalse(hasattr(response.context.request, 'current_app'))
def test_render_with_multiple_templates(self):
response = self.client.get('/render/multiple_templates/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/multiple_templates/\n')
def test_render_with_content_type(self):
response = self.client.get('/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'FOO.BAR../render/content_type/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, b'FOO.BAR../render/status/\n')
@require_jinja2
def test_render_with_using(self):
response = self.client.get('/render/using/')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=django')
self.assertEqual(response.content, b'DTL\n')
response = self.client.get('/render/using/?using=jinja2')
self.assertEqual(response.content, b'Jinja2\n')
| Beauhurst/django | tests/shortcuts/tests.py | Python | bsd-3-clause | 1,737 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.core.tasks.task import Task
from pants.base.payload_field import DeferredSourcesField
from pants.base.source_root import SourceRoot
from pants.build_graph.address_lookup_error import AddressLookupError
logger = logging.getLogger(__name__)
class DeferredSourcesMapper(Task):
"""Map DeferredSorucesFields to files that produce product 'unpacked_archives', like UnpackJars
If you want a task to be able to map sources like this, make it require the 'deferred_sources'
product.
"""
class SourcesTargetLookupError(AddressLookupError):
"""Raised when the referenced target cannot be found in the build graph"""
pass
class NoUnpackedSourcesError(AddressLookupError):
"""Raised when there are no files found unpacked from the archive"""
pass
@classmethod
def product_types(cls):
"""
Declare product produced by this task
deferred_sources does not have any data associated with it. Downstream tasks can
depend on it just make sure that this task completes first.
:return:
"""
return ['deferred_sources']
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('unpacked_archives')
def execute(self):
deferred_sources_fields = []
def find_deferred_sources_fields(target):
for name, payload_field in target.payload.fields:
if isinstance(payload_field, DeferredSourcesField):
deferred_sources_fields.append((target, name, payload_field))
addresses = [target.address for target in self.context.targets()]
self.context.build_graph.walk_transitive_dependency_graph(addresses,
find_deferred_sources_fields)
unpacked_sources = self.context.products.get_data('unpacked_archives')
for (target, name, payload_field) in deferred_sources_fields:
sources_target = self.context.build_graph.get_target(payload_field.address)
if not sources_target:
raise self.SourcesTargetLookupError(
"Couldn't find {sources_spec} referenced from {target} field {name} in build graph"
.format(sources_spec=payload_field.address.spec, target=target.address.spec, name=name))
if not sources_target in unpacked_sources:
raise self.NoUnpackedSourcesError(
"Target {sources_spec} referenced from {target} field {name} did not unpack any sources"
.format(spec=sources_target.address.spec, target=target.address.spec, name=name))
sources, rel_unpack_dir = unpacked_sources[sources_target]
SourceRoot.register_mutable(rel_unpack_dir)
payload_field.populate(sources, rel_unpack_dir)
| slyphon/pants | src/python/pants/backend/core/tasks/deferred_sources_mapper.py | Python | apache-2.0 | 2,979 |
import sys
import re
import numpy as np
import json
import pickle
from string import ascii_letters
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from ivanatrumpalot import clean_text, predict, sample
# This code is heavily influenced by the Keras example code on LSTM for text generation :
# https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py
# USAGE :
# python train_lstm.py [mode]
# If no arguments are passed, this will train a new model, saving the model's architecture
# to model.json and its weights to weights.h5.
# If [mode] is passed, valid options are "extend" and "predict".
# If the string "extend" is passed, they must be the files saved by train_lstm.py previously.
# If the string "predict" is passed,
# Code directory
os.chdir("/root/ivanatrumpalot/code")
# Read and clean corpus
text = clean_text(open("../data/trump_corpus").read())
# Corpus length
print("Corpus : {} characters, approximately {} sentences.".format(len(text), len(text.split("."))))
# Generate a dictionaries mapping from characters in our alphabet to an index, and the reverse
alphabet = set(text).union(set(ascii_letters)).union(set("1234567890"))
alphabet_size = len(alphabet)
alphabet_indices = dict((c, i) for i, c in enumerate(alphabet))
indices_alphabet = dict((i, c) for i, c in enumerate(alphabet))
print("Size of the alphabet : {} characters.".format(alphabet_size))
# Generate sequences of characters that the RNN will use to predict the next character.
primer_length = 50
step = 3
sentences = []
next_character = []
for i in range(0, len(text) - primer_length, step):
sentences.append(text[i : i + primer_length])
next_character.append(text[i + primer_length])
print("Number of sequences generated from the corpus : {}.".format(len(sentences)))
# Vectorise the text sequences : go from N sentences of length primer_length to
# a binary array of size (N, primer_length, alphabet_size). Do the same for the
# next_character array.
print("Vectorising.")
X = np.zeros((len(sentences), primer_length, alphabet_size), dtype=np.bool)
y = np.zeros((len(sentences), alphabet_size), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, alphabet_indices[char]] = 1
y[i, alphabet_indices[next_character[i]]] = 1
# Pickle the necessary objects for future prediction
required_objects = { "alphabet" : alphabet,
"alphabet_indices" : alphabet_indices,
"indices_alphabet" : indices_alphabet,
"primer_length" : primer_length
}
with open("required_objects.pickle", "wb") as f:
pickle.dump(required_objects, f)
# The current model is a four-layer LSTM network with a dropout layer between each hidden layer.
print("Building the model.")
model = Sequential()
model.add(LSTM(128, return_sequences=True, init="glorot_uniform",
input_shape=(primer_length, len(alphabet))))
model.add(Dropout(0.2))
model.add(LSTM(256, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=True, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(LSTM(512, return_sequences=False, init="glorot_uniform"))
model.add(Dropout(0.2))
model.add(Dense(len(alphabet)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
# Train the model for 250 epochs, outputting some generated text every five iterations
# Save the model every five epochs, just in case training is interrupted
for iteration in range(1, 50):
print("\n" + "-" * 50)
print("Iteration {}".format(iteration))
# Train the model for five epochs
model.fit(X, y, batch_size=128, nb_epoch=5, shuffle=True)
# Pick a random part of the text to use as a prompt
start_index = np.random.randint(0, len(text) - primer_length - 1)
# For various energies in the probability distribution,
# create some 200-character sample strings
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("\n----- Diversity : {}".format(diversity))
generated = ""
sentence = text[start_index : start_index + primer_length]
generated += sentence
print("----- Generating with prompt : {}".format(sentence))
sys.stdout.write(generated)
# Generate 100 characters
for i in range(100):
x = np.zeros((1, primer_length, len(alphabet)))
for t, char in enumerate(sentence):
x[0, t, alphabet_indices[char]] = 1.
predictions = model.predict(x, verbose=0)[0]
next_index = sample(predictions, diversity)
next_char = indices_alphabet[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print("\n")
# Save the model architecture and weights to file
model.save_weights("weights.h5", overwrite=True)
with open("model.json", "w") as f:
f.write(model.to_json())
| QCaudron/ivanatrumpalot | code/train_lstm.py | Python | mit | 5,169 |
#!/usr/bin/env python3
# https://leetcode.com/problems/two-sum/
import unittest
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
if nums is None:
return None
mapped = {}
for i, num in enumerate(nums):
expected = target - num
if expected in mapped:
found = mapped[expected]
return [found, i]
mapped[num] = i
return None
class TestCode(unittest.TestCase):
def test_0(self):
self.assertEqual(Solution().twoSum(None, 0), None)
def test_00(self):
self.assertEqual(Solution().twoSum([], 0), None)
def test_1(self):
self.assertEqual(Solution().twoSum([2, 7, 11, 15], 9), [0, 1])
def test_2(self):
self.assertEqual(Solution().twoSum([3, 2, 4], 6), [1, 2])
def test_3(self):
self.assertEqual(Solution().twoSum([3, 3], 6), [0, 1])
| altermarkive/Coding-Interviews | algorithm-design/leetcode/lc001_two_sum/lc001_two_sum.py | Python | mit | 959 |
from conans import python_requires
import os
common = python_requires('llvm-common/0.0.0@orbitdeps/stable')
class LLVMCore(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_core'
llvm_component = 'llvm'
llvm_module = 'Core'
llvm_requires = ['llvm_headers', 'llvm_binary_format', 'llvm_remarks',
'llvm_support']
| pierricgimmig/orbitprofiler | contrib/conan/recipes/llvm_core/conanfile.py | Python | bsd-2-clause | 387 |
#!/usr/bin/python2
# encoding: utf-8
"""
Storage.py
Created by Alexander Rössler on 2015-01-03.
"""
import time
import sys
import os
import argparse
import ConfigParser
import hal
class Pin:
def __init__(self):
self.halPin = 0
self.halName = ''
self.section = ''
self.name = ''
self.lastValue = 0.0
def savePins(cfg, filename, pins):
for pin in pins:
cfg.set(pin.section, pin.name, str(pin.halPin.value))
with open(filename, 'w') as f:
cfg.write(f)
f.close()
def readPins(cfg, filename, pins):
cfg.read(filename)
for pin in pins:
pin.lastValue = float(cfg.get(pin.section, pin.name))
pin.halPin.value = pin.lastValue
parser = argparse.ArgumentParser(description='HAL component to store and load values')
parser.add_argument('-n', '--name', help='HAL component name', required=True)
parser.add_argument('-f', '--file', help='Filename to store values', required=True)
parser.add_argument('-x', '--on_exit', help='Save on exit', action='store_true')
parser.add_argument('-a', '--autosave', help='Automatically save on value change', action='store_true')
parser.add_argument('-l', '--autoload', help='Automatically load the file values', action='store_true')
parser.add_argument('-i', '--interval', help='Update interval', default=1.00)
args = parser.parse_args()
updateInterval = float(args.interval)
autosave = args.autosave
autoload = args.autoload
saveOnExit = args.on_exit
filename = args.file
loaded = False
# Create pins
pins = []
if not os.path.isfile(filename):
sys.stderr.write('Error: File does not exist.\n')
sys.exit(1)
cfg = ConfigParser.ConfigParser()
cfg.read(filename)
h = hal.component(args.name)
for section in cfg.sections():
for item in cfg.items(section):
pin = Pin()
pin.section = section
pin.name = item[0]
pin.halName = section.lower() + '.' + item[0].lower()
pin.halPin = h.newpin(pin.halName, hal.HAL_FLOAT, hal.HAL_IO)
pins.append(pin)
halReadTriggerPin = h.newpin("read-trigger", hal.HAL_BIT, hal.HAL_IN)
halWriteTriggerPin = h.newpin("write-trigger", hal.HAL_BIT, hal.HAL_IN)
h.ready()
if autoload:
readPins(cfg, filename, pins)
loaded = True
lastReadTrigger = 0
lastWriteTrigger = 0
try:
while (True):
if lastReadTrigger ^ halReadTriggerPin.value:
lastReadTrigger = halReadTriggerPin.value
readPins(cfg, filename, pins)
loaded = True
if lastWriteTrigger ^ halWriteTriggerPin.value:
lastWriteTrigger = halWriteTriggerPin.value
savePins(cfg, filename, pins)
if autosave and loaded:
for pin in pins:
if pin.halPin.value != pin.lastValue:
pin.lastValue = pin.halPin.value
savePins(cfg, filename, pins)
time.sleep(updateInterval)
except KeyboardInterrupt:
if saveOnExit:
savePins(cfg, filename, pins)
print(("exiting HAL component " + args.name))
h.exit()
| strahlex/machinekit | src/hal/user_comps/hal_storage.py | Python | lgpl-2.1 | 3,146 |
# Copyright 2019 Silvio Gregorini <silviogregorini@openforce.it>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
{
'name': 'Fields Relation Data',
'summary': "Show relations data in ir.model.fields tree views",
'version': '11.0.1.0.0',
'category': 'Tools',
'author': 'Openforce, Odoo Community Association (OCA)',
'website': 'https://github.com/OCA/server-tools',
'license': 'AGPL-3',
'depends': [
'base'
],
'data': [
'views/ir_model.xml',
'views/ir_model_fields.xml',
],
'installable': True,
}
| brain-tec/server-tools | fields_relation_data/__manifest__.py | Python | agpl-3.0 | 585 |
# This file is part of cloud-init. See LICENSE file for license information.
"""cloud-init Integration Test Verify Script."""
from tests.cloud_tests.testcases import base
class TestHostname(base.CloudTestCase):
"""Test hostname module."""
ex_hostname = "cloudinit2"
def test_hostname(self):
"""Test hostname command shows correct output."""
out = self.get_data_file('hostname')
self.assertIn(self.ex_hostname, out)
# vi: ts=4 expandtab
| larsks/cloud-init | tests/cloud_tests/testcases/modules/set_hostname.py | Python | gpl-3.0 | 478 |
from __future__ import unicode_literals
import re
import six
from moto.core.utils import str_to_rfc_1123_datetime
from six.moves.urllib.parse import parse_qs, urlparse, unquote
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin
from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, \
parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys
from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, \
MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
FakeTag
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
from xml.dom import minidom
DEFAULT_REGION_NAME = 'us-east-1'
def parse_key_name(pth):
return pth.lstrip("/")
def is_delete_keys(request, path, bucket_name):
return path == u'/?delete' or (
path == u'/' and
getattr(request, "query_string", "") == "delete"
)
class ResponseObject(_TemplateEnvironmentMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
@property
def should_autoescape(self):
return True
def all_buckets(self):
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
host = request.headers.get('host', request.headers.get('Host'))
if not host:
host = urlparse(request.url).netloc
if (not host or host.startswith('localhost') or host.startswith('localstack') or
re.match(r'^[^.]+$', host) or re.match(r'^.*\.svc\.cluster\.local$', host)):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r'^([^\[\]:]+)(:\d+)?$', host)
if match:
match = re.match(r'((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}',
match.groups()[0])
if match:
return False
match = re.match(r'^\[(.+)\](:\d+)?$', host)
if match:
match = re.match(
r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z',
match.groups()[0], re.IGNORECASE)
if match:
return False
path_based = (host == 's3.amazonaws.com' or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host))
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
def bucket_response(self, request, full_url, headers):
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, six.string_types):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, six.binary_type):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b''
if isinstance(body, six.binary_type):
body = body.decode('utf-8')
body = u'{0}'.format(body).encode('utf-8')
if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers)
elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT':
return self._bucket_response_put(request, body, region_name, bucket_name, querystring, headers)
elif method == 'DELETE':
return self._bucket_response_delete(body, bucket_name, querystring, headers)
elif method == 'POST':
return self._bucket_response_post(request, body, bucket_name, headers)
else:
raise NotImplementedError(
"Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
try:
self.backend.get_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
for unsup in ('delimiter', 'max-uploads'):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(unsup))
multiparts = list(
self.backend.get_all_multiparts(bucket_name).values())
if 'prefix' in querystring:
prefix = querystring.get('prefix', [None])[0]
multiparts = [
upload for upload in multiparts if upload.key_name.startswith(prefix)]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(
bucket_name=bucket_name,
uploads=multiparts)
elif 'location' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
return template.render(location=bucket.location)
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(
S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
elif 'versioning' in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif 'policy' in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif 'website' in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif 'acl' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif 'tagging' in querystring:
bucket = self.backend.get_bucket(bucket_name)
# "Special Error" if no tags:
if len(bucket.tagging.tag_set.tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_TAGGING_RESPONSE)
return template.render(bucket=bucket)
elif 'logging' in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=bucket.logging)
elif "cors" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if len(bucket.cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(bucket=bucket)
elif "notification" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(bucket=bucket)
elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0]
encoding_type = querystring.get('encoding-type', [None])[0]
key_marker = querystring.get('key-marker', [None])[0]
max_keys = querystring.get('max-keys', [None])[0]
prefix = querystring.get('prefix', [''])[0]
version_id_marker = querystring.get('version-id-marker', [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix
)
latest_versions = self.backend.get_bucket_latest_versions(
bucket_name=bucket_name
)
key_list = []
delete_marker_list = []
for version in versions:
if isinstance(version, FakeKey):
key_list.append(version)
else:
delete_marker_list.append(version)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return 200, {}, template.render(
key_list=key_list,
delete_marker_list=delete_marker_list,
latest_versions=latest_versions,
bucket=bucket,
prefix='',
max_keys=1000,
delimiter='',
is_truncated='false',
)
elif querystring.get('list-type', [None])[0] == '2':
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get('delimiter', [None])[0]
max_keys = int(querystring.get('max-keys', [1000])[0])
marker = querystring.get('marker', [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, _ = self._truncate_result(result_keys, max_keys)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return 200, {}, template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
max_keys=max_keys
)
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
if prefix and isinstance(prefix, six.binary_type):
prefix = prefix.decode("utf-8")
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter)
fetch_owner = querystring.get('fetch-owner', [False])[0]
max_keys = int(querystring.get('max-keys', [1000])[0])
continuation_token = querystring.get('continuation-token', [None])[0]
start_after = querystring.get('start-after', [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
result_keys = self._get_results_from_token(result_keys, limit)
result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys)
return template.render(
bucket=bucket,
prefix=prefix or '',
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after
)
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if key.name > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if len(result_keys) > max_keys:
is_truncated = 'true'
result_keys = result_keys[:max_keys]
next_continuation_token = result_keys[-1].name
else:
is_truncated = 'false'
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers):
if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required"
if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif 'lifecycle' in querystring:
rules = xmltodict.parse(body)['LifecycleConfiguration']['Rule']
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif 'policy' in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return 'True'
elif 'acl' in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.set_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif 'website' in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(bucket_name, self._logging_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(bucket_name,
self._notification_config_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
if body:
try:
region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint']
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(
bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
if 'x-amz-acl' in request.headers:
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers))
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring, headers):
if 'policy' in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(
S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name, headers):
if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required"
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = request.full_path if hasattr(request, 'full_path') else request.path_url
if self.is_delete_keys(request, path, bucket_name):
return self._bucket_response_delete_keys(request, body, bucket_name, headers)
# POST to bucket-url should create file from form
if hasattr(request, 'form'):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = {}
for kv in body.split('&'):
k, v = kv.split('=')
form[k] = v
key = form['key']
if 'file' in form:
f = form['file']
else:
f = request.files['file'].stream.read()
new_key = self.backend.set_key(bucket_name, key, f)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return 200, {}, ""
def _bucket_response_delete_keys(self, request, body, bucket_name, headers):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
keys = minidom.parseString(body).getElementsByTagName('Key')
deleted_names = []
error_names = []
for k in keys:
key_name = k.firstChild.nodeValue
success = self.backend.delete_key(bucket_name, key_name)
if success:
deleted_names.append(key_name)
else:
error_names.append(key_name)
return 200, {}, template.render(deleted=deleted_names, delete_errors=error_names)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get('range').split('=')
if ',' in rspec:
raise NotImplementedError(
"Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split('-'))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
return 416, response_headers, ""
response_headers['content-range'] = "bytes {0}-{1}/{2}".format(
begin, end, length)
return 206, response_headers, response_content[begin:end + 1]
def key_response(self, request, full_url, headers):
response_headers = {}
try:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, six.string_types):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if status_code == 200 and 'range' in request.headers:
return self._handle_range_header(request, response_headers, response_content)
return status_code, response_headers, response_content
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if 'Authorization' not in request.headers:
if hasattr(request, 'url'):
signed_url = 'Signature=' in request.url
elif hasattr(request, 'requestline'):
signed_url = 'Signature=' in request.path
key = self.backend.get_key(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
if hasattr(request, 'body'):
# Boto
body = request.body
if hasattr(body, 'read'):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b''
if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT':
return self._key_response_put(request, body, bucket_name, query, key_name, headers)
elif method == 'HEAD':
return self._key_response_head(bucket_name, query, key_name, headers=request.headers)
elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST':
return self._key_response_post(request, body, bucket_name, query, key_name, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers):
response_headers = {}
if query.get('uploadId'):
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return 200, response_headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
)
version_id = query.get('versionId', [None])[0]
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if key is None:
raise MissingKey(key_name)
if 'acl' in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
if 'tagging' in query:
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(obj=key)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
response_headers = {}
if query.get('uploadId') and query.get('partNumber'):
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
if 'x-amz-copy-source' in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_range = request.headers.get(
'x-amz-copy-source-range', '').split("bytes=")[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
key = self.backend.copy_part(
bucket_name, upload_id, part_number, src_bucket,
src_key, start_byte, end_byte)
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(
bucket_name, upload_id, part_number, body)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if 'acl' in query:
key = self.backend.get_key(bucket_name, key_name)
# TODO: Support the XML-based ACL format
key.set_acl(acl)
return 200, response_headers, ""
if 'tagging' in query:
tagging = self._tagging_from_xml(body)
self.backend.set_key_tagging(bucket_name, key_name, tagging)
return 200, response_headers, ""
if 'x-amz-copy-source' in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key_parsed = urlparse(request.headers.get("x-amz-copy-source"))
src_bucket, src_key = unquote(src_key_parsed.path).\
lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class, acl=acl, src_version_id=src_version_id)
new_key = self.backend.get_key(bucket_name, key_name)
mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE':
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_key(bucket_name, key_name, body,
storage=storage_class)
request.streaming = True
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get('x-amz-website-redirect-location')
new_key.set_tagging(tagging)
template = self.response_template(S3_OBJECT_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
version_id = query.get('versionId', [None])[0]
if_modified_since = headers.get('If-Modified-Since', None)
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_modified_since and key.last_modified < if_modified_since:
return 304, response_headers, 'Not Modified'
else:
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = [
"READ",
"WRITE",
"READ_ACP",
"WRITE_ACP",
"FULL_CONTROL"
]
if not isinstance(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = \
[parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]]
grants = self._get_grants_from_xml(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError, permissions)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in ["CanonicalUser", "AmazonCustomerByEmail", "Group"]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(FakeGrant(
[FakeGrantee(id=grant["Grantee"].get("ID", ""), display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""))],
[grant["Permission"]])
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get('x-amz-acl', '')
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
if not header.startswith('x-amz-grant-'):
continue
permission = {
'read': 'READ',
'write': 'WRITE',
'read-acp': 'READ_ACP',
'write-acp': 'WRITE_ACP',
'full-control': 'FULL_CONTROL',
}[header[len('x-amz-grant-'):]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="([^"]+)"', key_and_value.strip()).groups()
if key.lower() == 'id':
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
if headers.get('x-amz-tagging'):
parsed_header = parse_qs(headers['x-amz-tagging'], keep_blank_values=True)
tags = []
for tag in parsed_header.items():
tags.append(FakeTag(tag[0], tag[1][0]))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
else:
return FakeTagging()
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={'Tag': True})
tags = []
for tag in parsed_xml['Tagging']['TagSet']['Tag']:
tags.append(FakeTag(tag['Key'], tag['Value']))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = []
# Optional if no tags are being sent:
if parsed_xml['Tagging'].get('TagSet'):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml['Tagging']['TagSet']['Tag'], list):
tags.append(FakeTag(parsed_xml['Tagging']['TagSet']['Tag']['Key'],
parsed_xml['Tagging']['TagSet']['Tag']['Value']))
else:
for tag in parsed_xml['Tagging']['TagSet']['Tag']:
tags.append(FakeTag(tag['Key'], tag['Value']))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = [
"READ",
"WRITE",
"FULL_CONTROL"
]
if not isinstance(parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], list):
target_grants = self._get_grants_from_xml(
[parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"]],
MalformedXML,
permissions
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"],
MalformedXML,
permissions
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda")
]
event_names = [
's3:ReducedRedundancyLostObject',
's3:ObjectCreated:*',
's3:ObjectCreated:Put',
's3:ObjectCreated:Post',
's3:ObjectCreated:Copy',
's3:ObjectCreated:CompleteMultipartUpload',
's3:ObjectRemoved:*',
's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated'
]
found_notifications = 0 # Tripwire -- if this is not ever set, then there were no notifications
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get("{}Configuration".format(name))
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"]["{}Configuration".format(name)] \
= [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [n["Filter"]["S3Key"]["FilterRule"]]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _key_response_delete(self, bucket_name, query, key_name, headers):
if query.get('uploadId'):
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get('versionId', [None])[0]
self.backend.delete_key(bucket_name, key_name, version_id=version_id)
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, {}, template.render()
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName('Part')
prev = 0
for p in ps:
pn = int(p.getElementsByTagName(
'PartNumber')[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name, headers):
if body == b'' and 'uploads' in query:
metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(
bucket_name, key_name, metadata)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, {}, response
if query.get('uploadId'):
body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
elif 'restore' in query:
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far")
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>2006-02-03T16:45:09.000Z</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>{{ max_keys }}</MaxKeys>
<Delimiter>{{ delimiter }}</Delimiter>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ result_keys | length }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{{ location }}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% if rule.filter.tag %}
<Tag>
<Key>{{ rule.filter.tag.key }}</Key>
<Value>{{ rule.filter.tag.value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% for tag in rule.filter.and_filter.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
<Prefix>{{ rule.prefix if rule.prefix != None }}</Prefix>
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
<Prefix>{{ prefix }}</Prefix>
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{{ key.version_id }}</VersionId>
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k in deleted %}
<Deleted>
<Key>{{k}}</Key>
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_OBJECT_SUCCESS = """<DeleteObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteObjectResponse>
<Code>200</Code>
<Description>OK</Description>
</DeleteObjectResponse>
</DeleteObjectResponse>"""
S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<PutObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</PutObjectResponse>
</PutObjectResponse>"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in obj.tagging.tag_set.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<Tagging>
<TagSet>
{% for tag in bucket.tagging.tag_set.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in bucket.cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }}</NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>False</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::123456789012:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in bucket.notification_configuration.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in bucket.notification_configuration.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in bucket.notification_configuration.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
| rocky4570/moto | moto/s3/responses.py | Python | apache-2.0 | 65,762 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pigeon.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| xouillet/RendezMoiMesPlumes | pigeon/manage.py | Python | gpl-3.0 | 804 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.