code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8
# ----------------------------------------------------------------------
# Flask web service for POS taggin
# ----------------------------------------------------------------------
# Ivan Vladimir Meza-Ruiz/ ivanvladimir at turing.iimas.unam.mx
# 2015/IIMAS/UNAM
# ----------------------------------------------------------------------
from __future__ import print_function
from flask import Flask, request
from tempfile import NamedTemporaryFile
from os import remove
from subprocess import Popen, PIPE
import codecs
import json
import argparse
app = Flask('poswebservice')
languages={
'es':{
'cmd':'java -classpath lib/*:src SpanishTagger {0}'},
'en':{
'cmd':'java -classpath lib/*:src EnglishTagger {0}'},
}
@app.route('/',methods=['GET'])
def index():
return "Service up"
@app.route('/api/v1.0/languages',methods=['GET'])
def get_languages():
res=[lan for lan in languages.keys()]
return json.dumps({"languages":res})
@app.route('/api/v1.0/tag/<string:sntc>',defaults={'lang':None},methods=['GET'] )
@app.route('/api/v1.0/tag/<string:lang>/<string:sntc>',methods=['GET'] )
def tag(lang,sntc):
if not lang:
lang="es"
pos=languages[lang]
pos_=[]
try:
temp=NamedTemporaryFile(delete=False)
temp.close()
file = codecs.open(temp.name, "w", "utf-8")
file.write(sntc)
file.close()
cmd=pos['cmd'].format(temp.name).split()
p = Popen(cmd, stdin=None, stdout=PIPE, stderr=None)
output,err = p.communicate()
for line in output.decode('utf-8').split(u'\n'):
line=line.strip()
if len(line)==0:
continue
line_=line.split()
pos_.append((line_[0],line_[1]))
finally:
remove(temp.name)
return json.dumps({"POS":pos_},ensure_ascii=False)
@app.route('/api/v1.0/tag',defaults={'lang':None},methods=['POST'] )
@app.route('/api/v1.0/tag/<string:lang>',methods=['POST'] )
def tag_post(lang):
if not lang:
lang="es"
pos=languages[lang]
pos_=[]
try:
text=request.data
temp=NamedTemporaryFile(delete=False)
temp.close()
file = codecs.open(temp.name, "w", "utf-8")
file.write(text.decode('utf-8'))
file.close()
cmd=pos['cmd'].format(temp.name).split()
p = Popen(cmd, stdin=None, stdout=PIPE, stderr=None)
output,err = p.communicate()
for line in output.decode('utf-8').split(u'\n'):
line=line.strip()
if len(line)==0:
continue
line_=line.split()
pos_.append((line_[0],line_[1]))
finally:
remove(temp.name)
return json.dumps({"POS":pos_},ensure_ascii=False)
if __name__ == '__main__':
p = argparse.ArgumentParser("Author identification")
p.add_argument("--host",default="127.0.0.1",
action="store", dest="host",
help="Root url [127.0.0.1]")
p.add_argument("--port",default=5000,type=int,
action="store", dest="port",
help="Port url [500]")
p.add_argument("--debug",default=False,
action="store_true", dest="debug",
help="Use debug deployment [Flase]")
p.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
help="Verbose mode [Off]")
opts = p.parse_args()
app.run(debug=opts.debug,
host=opts.host,
port=opts.port)
| jakaton/pos_flask | src/poswebservice.py | Python | gpl-2.0 | 3,506 |
#!/usr/bin/env python3
#
# db.py by Bill Weinman <http://bw.org/contact/>
# This is part of jurl - Jump to URL (a private short URL service)
# Copyright (c) 2010-2017 The BearHeart Group, LLC
# update 2017-09-29 - for Python 3 EssT
#
import sys, os
import sqlite3
from hashlib import md5
from bwCGI import bwCGI
from bwDB import bwDB
from bwTL import tlFile
from bwConfig import configFile
__version__ = "2.0.1"
# namespace container for global variables
g = dict(
VERSION = 'db.py {} bwDB {}'.format(__version__, bwDB.version()),
config_file = 'db.conf',
template_ext = '.html',
table_name = 'jurl',
stacks = dict(
messages = [],
errors = [],
hiddens = []
)
)
def main():
init()
if 'a' in g['vars']: dispatch()
main_page()
def init():
g['cgi'] = bwCGI()
g['cgi'].send_header()
g['vars'] = g['cgi'].vars()
g['linkback'] = g['cgi'].linkback()
g['config'] = configFile(g['config_file']).recs()
g['tl'] = tlFile(None, showUnknowns = True)
g['db'] = bwDB( filename = g['config']['db'], table = g['table_name'] )
def dispatch():
v = g['vars']
a = v.getfirst('a')
if a == 'add':
add()
elif a == 'edit_del':
if 'edit' in v: edit()
elif 'delete' in v: delete_confirm()
else: error("invalid edit_del")
elif a == 'update':
if 'cancel' in v:
message('Edit canceled')
main_page()
else: update()
elif a == 'delete_do':
if 'cancel' in v:
message('Delete canceled')
main_page()
else: delete_do()
else:
error("unhandled jump: ", a)
main_page()
def main_page():
# save values
unkflag = g['tl'].flags['showUnknowns']
g['tl'].flags['showUnknowns'] = False;
tURL = var('targetURL')
sURL = var('shortURL')
g['tl'].flags['showUnknowns'] = unkflag;
listrecs()
if tURL is not None: var('targetURL', tURL)
if sURL is not None: var('shortURL', sURL)
hidden('a', 'add')
page('main', 'Enter a new short URL')
def listrecs():
''' display the database content '''
db = g['db']
v = g['vars']
sql_limit = int(g['config'].get('sql_limit', 25))
# how many records do we have?
count = db.countrecs()
message('There are {} records in the database. Add some more!'.format(count or 'no'))
# how many pages do we have?
numpages = count // int(sql_limit)
if count % int(sql_limit): numpages += 1
# what page is this?
curpage = 0
if 'jumppage' in v:
curpage = int(v.getfirst('jumppage'))
elif 'nextpage' in v:
curpage = int(v.getfirst('pageno')) + 1
elif 'prevpage' in v:
curpage = int(v.getfirst('pageno')) - 1
pagebar = list_pagebar(curpage, numpages)
a = ''
q = '''
SELECT * FROM {}
ORDER BY shortURL
LIMIT ?
OFFSET ?
'''.format(g['table_name'])
for r in db.sql_query(q, [sql_limit, (curpage * sql_limit)]):
set_form_vars(**r)
a += getpage('recline')
set_form_vars()
var('CONTENT', pagebar + a + pagebar )
def list_pagebar(pageno, numpages):
''' return the html for the pager line '''
prevlink = '<span class="n"><<</span>'
nextlink = '<span class="n">>></span>'
linkback = g['linkback']
if pageno > 0:
prevlink = '<a href="{}?pageno={}&prevpage=1"><<</a>'.format(linkback, pageno)
if pageno < ( numpages - 1 ):
nextlink = '<a href="{}?pageno={}&nextpage=1">>></a>'.format(linkback, pageno)
pagebar = ''
for n in range(0, numpages):
if n is pageno: pagebar += '<span class="n">{}</span>'.format(n + 1)
else: pagebar += '<a href="{}?jumppage={}">{}</a>'.format(linkback, n, n + 1)
var('prevlink', prevlink)
var('nextlink', nextlink)
var('pagebar', pagebar)
p = getpage('nextprev')
return p
def page(pagename, title = ''):
''' display a page from html template '''
tl = g['tl']
htmldir = g['config']['htmlDir']
file_ext = g['template_ext']
var('pageTitle', title)
var('VERSION', g['VERSION'])
set_stack_vars()
for p in ( 'header', pagename, 'footer' ):
try:
tl.file(os.path.join(htmldir, p + file_ext))
for line in tl.readlines(): print(line, end='') # lines are already terminated
except IOError as e:
errorexit('Cannot open file ({})'.format(e))
exit()
def getpage(p):
''' return a page as text from an html template '''
tl = g['tl']
htmldir = g['config']['htmlDir']
file_ext = g['template_ext']
a = ''
try:
tl.file(os.path.join(htmldir, p + file_ext))
for line in tl.readlines(): a += line # lines are already terminated
except IOError as e:
errorexit('Cannot open file ({})'.format(e))
return(a)
### actions
def add():
db = g['db']
v = g['vars']
cgi = g['cgi']
sURL = tURL = ''
if 'shortURL' in v: sURL = v.getfirst('shortURL')
else: sURL = ''
if 'targetURL' in v: tURL = v.getfirst('targetURL')
else: main_page()
rec = dict(
shortURL = cgi.entity_encode(sURL),
targetURL = cgi.entity_encode(tURL)
)
if 'generate' in v:
rec['shortURL'] = shorten(tURL)
set_form_vars(**rec)
hidden('a', 'add')
main_page()
if 'shortURL' in v:
try:
db.insert(rec)
except (sqlite3.IntegrityError) as e:
error('Duplicate Short URL is not allowed')
set_form_vars(**rec)
hidden('a', 'add')
main_page()
message('Record ({}) added'.format(rec['shortURL']))
main_page()
def edit():
id = g['vars'].getfirst('id')
rec = g['db'].getrec(id)
set_form_vars(**rec)
hidden('a', 'update')
hidden('id', id)
hidden('sURL', rec['shortURL'])
page('edit', 'Edit this short URL')
def delete_confirm():
id = g['vars'].getfirst('id')
rec = g['db'].getrec(id)
set_form_vars(**rec)
hidden('a', 'delete_do')
hidden('id', id)
hidden('shortURL', rec['shortURL'])
page('delconfirm', 'Delete this short URL?')
def delete_do():
db = g['db']
v = g['vars']
id = v.getfirst('id')
shortURL = v.getfirst('shortURL')
db.delete(id)
message('Record ({}) deleted'.format(shortURL))
main_page()
def update():
db = g['db']
v = g['vars']
cgi = g['cgi']
sURL = cgi.entity_encode(v.getfirst('sURL'))
id = v.getfirst('id')
rec = dict(
id = id,
targetURL = cgi.entity_encode(v.getfirst('targetURL'))
)
db.update(id, rec)
message('Record ({}) updated'.format(sURL))
main_page()
### manage template variables
def var(n, v = None):
''' shortcut for setting a variable '''
return g['tl'].var(n, v)
def set_form_vars(**kwargs):
s = kwargs.get('shortURL', '')
t = kwargs.get('targetURL', '')
id = kwargs.get('id', '')
var('shortURL', s)
var('targetURL', t)
var('id', id)
var('SELF', g['linkback'])
def stackmessage(stack, *list, **kwargs):
sep = kwargs.get('sep', ' ')
m = sep.join(str(i) for i in list)
g['stacks'][stack].append(m)
def message(*list, **kwargs):
stackmessage('messages', *list, **kwargs)
def error(*list, **kwargs):
if 'cgi' in g:
stackmessage('errors', *list, **kwargs)
else:
errorexit(' '.join(list))
def hidden(n, v):
g['stacks']['hiddens'].append([n, v])
def set_stack_vars():
a = ''
for m in g['stacks']['messages']:
a += '<p class="message">{}</p>\n'.format(m)
var('MESSAGES', a)
a = ''
for m in g['stacks']['errors']:
a += '<p class="error">{}</p>\n'.format(m)
var('ERRORS', a)
a = ''
for m in g['stacks']['hiddens']:
a += '<input type="hidden" name="{}" value="{}" />\n'.format(*m)
var('hiddens', a)
### utilities
def shorten(s):
lookup = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
lsz = len(lookup)
m = md5(s.encode('utf-8')) # md5 because it's short - doesn't need to be secure or reversible
out = m.digest();
return ''.join('{}'.format(lookup[x % lsz]) for x in out)
def errorexit(e):
me = os.path.basename(sys.argv[0])
print('<p style="color:red">')
print('{}: {}'.format(me, e))
print('</p>')
exit(0)
def message_page(*list):
message(*list)
main_page()
def debug(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__": main()
| shucommon/little-routine | python/ex_files_python_esst/exercise_files/Chap15/jurl/db.py | Python | gpl-3.0 | 8,552 |
#!/usr/bin/python
import os,sys,datetime,re
import shlex
import subprocess
from experiments import *
from helper import *
from run_config import *
import glob
now = datetime.datetime.now()
strnow=now.strftime("%Y%m%d-%H%M%S")
os.chdir('..')
PATH=os.getcwd()
result_dir = PATH + "/results/"
test_dir = PATH + "/tests-" + strnow
test_dir_name = "tests-" + strnow
cfgs = configs
execute = True
remote = False
cluster = None
skip = False
exps=[]
arg_cluster = False
if len(sys.argv) < 2:
sys.exit("Usage: %s [-exec/-e/-noexec/-ne] [-c cluster] experiments\n \
-exec/-e: compile and execute locally (default)\n \
-noexec/-ne: compile first target only \
-c: run remote on cluster; possible values: istc, vcloud\n \
" % sys.argv[0])
for arg in sys.argv[1:]:
if arg == "-help" or arg == "-h":
sys.exit("Usage: %s [-exec/-e/-noexec/-ne] [-skip] [-c cluster] experiments\n \
-exec/-e: compile and execute locally (default)\n \
-noexec/-ne: compile first target only \
-skip: skip any experiments already in results folder\n \
-c: run remote on cluster; possible values: istc, vcloud\n \
" % sys.argv[0])
if arg == "-exec" or arg == "-e":
execute = True
elif arg == "-noexec" or arg == "-ne":
execute = False
elif arg == "-skip":
skip = True
elif arg == "-c":
remote = True
arg_cluster = True
elif arg_cluster:
cluster = arg
arg_cluster = False
else:
exps.append(arg)
for exp in exps:
fmt,experiments = experiment_map[exp]()
for e in experiments:
cfgs = get_cfgs(fmt,e)
if remote:
cfgs["TPORT_TYPE"],cfgs["TPORT_TYPE_IPC"],cfgs["TPORT_PORT"]="\"tcp\"","false",7000
output_f = get_outfile_name(cfgs)
# Check whether experiment has been already been run in this batch
if skip:
if len(glob.glob('{}*{}*.out'.format(result_dir,output_f))) > 0:
print "Experiment exists in results folder... skipping"
continue
output_dir = output_f + "/"
output_f = output_f + strnow
print output_f
f = open("config.h",'r');
lines = f.readlines()
f.close()
with open("config.h",'w') as f_cfg:
for line in lines:
found_cfg = False
for c in cfgs:
found_cfg = re.search("#define "+c + "\t",line) or re.search("#define "+c + " ",line);
if found_cfg:
f_cfg.write("#define " + c + " " + str(cfgs[c]) + "\n")
break
if not found_cfg: f_cfg.write(line)
cmd = "make clean; make -j"
os.system(cmd)
if not execute:
exit()
if execute:
cmd = "mkdir -p {}".format(result_dir)
os.system(cmd)
cmd = "cp config.h {}{}.cfg".format(result_dir,output_f)
os.system(cmd)
if remote:
if cluster == 'istc':
machines_ = istc_machines
uname = istc_uname
cfg_fname = "istc_ifconfig.txt"
elif cluster == 'vcloud':
machines_ = vcloud_machines
uname = vcloud_uname
cfg_fname = "vcloud_ifconfig.txt"
else:
assert(False)
machines = sorted(machines_[:(cfgs["NODE_CNT"] + cfgs["CLIENT_NODE_CNT"])])
# TODO: ensure that machine order and node order is the same for ifconfig
f = open(cfg_fname,'r');
lines = f.readlines()
f.close()
with open("ifconfig.txt",'w') as f_ifcfg:
for line in lines:
line = line.rstrip('\n')
if cluster == 'istc':
line = re.split(' ',line)
if line[0] in machines:
f_ifcfg.write(line[1] + "\n")
elif cluster == 'vcloud':
if line in machines:
f_ifcfg.write("172.19.153." + line + "\n")
if cfgs["WORKLOAD"] == "TPCC":
files = ["rundb","runcl","ifconfig.txt","./benchmarks/TPCC_short_schema.txt"]
elif cfgs["WORKLOAD"] == "YCSB":
files = ["rundb","runcl","ifconfig.txt","./benchmarks/YCSB_schema.txt"]
for m,f in itertools.product(machines,files):
if cluster == 'istc':
cmd = 'scp {}/{} {}.csail.mit.edu:/home/{}/'.format(PATH,f,m,uname)
elif cluster == 'vcloud':
cmd = 'scp -i {} {}/{} root@172.19.153.{}:/{}/'.format(identity,PATH,f,m,uname)
print(cmd)
os.system(cmd)
# Be sure all rundb/runcl are killed
if cluster == 'vcloud':
cmd = './scripts/vcloud_cmd.sh \"{}\" \"pkill -f \'rundb\'\"'.format(' '.join(machines))
print(cmd)
os.system(cmd)
cmd = './scripts/vcloud_cmd.sh \"{}\" \"pkill -f \'runcl\'\"'.format(' '.join(machines))
print(cmd)
os.system(cmd)
# Sync clocks before each experiment
if cluster == 'vcloud':
print("Syncing Clocks...")
cmd = './scripts/vcloud_cmd.sh \'{}\' \'ntpdate -b clock-1.cs.cmu.edu\''.format(' '.join(machines))
print(cmd)
os.system(cmd)
print("Deploying: {}".format(output_f))
if cluster == 'istc':
cmd = './scripts/deploy.sh \'{}\' /home/{}/ {}'.format(' '.join(machines),uname,cfgs["NODE_CNT"])
elif cluster == 'vcloud':
cmd = './scripts/vcloud_deploy.sh \'{}\' /{}/ {}'.format(' '.join(machines),uname,cfgs["NODE_CNT"])
print(cmd)
os.system(cmd)
for m,n in zip(machines,range(len(machines))):
if cluster == 'istc':
cmd = 'scp {}.csail.mit.edu:/home/{}/results.out {}{}_{}.out'.format(m,uname,result_dir,n,output_f)
print(cmd)
os.system(cmd)
elif cluster == 'vcloud':
cmd = 'scp -i {} root@172.19.153.{}:/{}/results.out {}{}_{}.out'.format(identity,m,uname,result_dir,n,output_f)
print(cmd)
os.system(cmd)
cmd = 'ssh -i {} root@172.19.153.{} \"rm /{}/results.out\"'.format(identity,m,uname)
print(cmd)
os.system(cmd)
else:
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
pids = []
print("Deploying: {}".format(output_f))
for n in range(nnodes+nclnodes):
if n < nnodes:
cmd = "./rundb -nid{}".format(n)
else:
cmd = "./runcl -nid{}".format(n)
print(cmd)
cmd = shlex.split(cmd)
ofile_n = "{}{}_{}.out".format(result_dir,n,output_f)
ofile = open(ofile_n,'w')
p = subprocess.Popen(cmd,stdout=ofile,stderr=ofile)
pids.insert(0,p)
for n in range(nnodes + nclnodes):
pids[n].wait()
| rharding6373/ddbms | scripts/run_experiments.py | Python | apache-2.0 | 7,751 |
# coding: utf-8
from PoliticalOrientation import Orientation
class Agent:
def __init__(self, generateInterests):
"""On class creation, randomly generate attributes if
generateInterests is True. Else all attributes are 0."""
from random import uniform
from Configuration import MAX_TOLERANCE
from HelperMethods import createEmptyOrientationDictionary
self.orientation = {}
self.tolerance = uniform(0, MAX_TOLERANCE)
if generateInterests:
self.initialiseOrientation()
else:
self.orientation = createEmptyOrientationDictionary()
def initialiseOrientation(self):
"""Set orientations randomly as probabilities.
max(sum(orientation(node))) = 1
min(sum(orientation(node))) = 0"""
from random import randint, sample
orientationNames = sample([name for name, member in Orientation.__members__.items()], len(Orientation))
percentageLeft = 100
for name in orientationNames:
percentage = randint(0, percentageLeft)
percentageLeft -= percentage
self.orientation[name] = percentage / 100 # make it a probability
| 0nse/PolOrSim | Agent.py | Python | gpl-3.0 | 1,102 |
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.9.9 on 2016-09-21 18:00
from django.db import migrations, models
def copy_name(apps, schema_editor):
"""
Copies the exercise name to the original name field
"""
Excercise = apps.get_model("exercises", "Exercise")
for exercise in Excercise.objects.all():
exercise.name_original = exercise.name
exercise.save()
def capitalize_name(apps, schema_editor):
"""
Capitalizes the name of the exercises
The algorithm is copied here as it was implemented on the day the migration
was written.
"""
def capitalize(input):
out = []
for word in input.split(' '):
if len(word) > 2 and word[0] != 'ß':
out.append(word[:1].upper() + word[1:])
else:
out.append(word)
return ' '.join(out)
Excercise = apps.get_model("exercises", "Exercise")
for exercise in Excercise.objects.all():
exercise.name = capitalize(exercise.name_original)
exercise.save()
class Migration(migrations.Migration):
dependencies = [
('exercises', '0002_auto_20150307_1841'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='name_original',
field=models.CharField(default='', max_length=200, verbose_name='Name'),
),
migrations.RunPython(copy_name, reverse_code=migrations.RunPython.noop),
migrations.RunPython(capitalize_name, reverse_code=migrations.RunPython.noop),
]
| rolandgeider/wger | wger/exercises/migrations/0003_auto_20160921_2000.py | Python | agpl-3.0 | 1,569 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
bin/verticalScrolledFrame TEST
Autor: PABLO PIZARRO @ github.com/ppizarror
Fecha: AGOSTO 2016
Licencia: GPLv2
"""
__author__ = "ppizarror"
# Importación de librerías
# noinspection PyUnresolvedReferences
from _testpath import * # @UnusedWildImport
from bin.errors import ERROR_TKINTER_NOT_INSTALLED # @UnusedImport
import unittest
# Constantes de los test
DISABLE_HEAVY_TESTS = True
DISABLE_HEAVY_TESTS_MSG = "Se desactivaron los tests pesados"
# Se cargan argumentos desde la consola
if __name__ == '__main__':
from bin.arguments import argument_parser_factory
argparser = argument_parser_factory("VerticalScrolledFrame Test", verbose=True, version=True,
enable_skipped_test=True).parse_args()
DISABLE_HEAVY_TESTS = argparser.enableHeavyTest
VERBOSE = argparser.verbose
# Clase UnitTest
# noinspection PyUnusedLocal
class VerticalScrolledFrameTest(unittest.TestCase):
def setUp(self):
"""
Inicio de los test.
:return: void
:rtype: None
"""
sucess = False
try:
from bin.verticalscrolledframe import VerticalScrolledFrame # @UnusedWildImport @UnusedImport
sucess = True
except Exception, e: # @UnusedVariable
sucess = False
assert sucess is True, ERROR_TKINTER_NOT_INSTALLED
@staticmethod
def testImportTkinter():
"""
Testeo de la importación de la librería python-tk (Tkinter).
:return: void
:rtype: None
"""
sucess = False
try:
import Tkinter # @UnusedImport
sucess = True
except Exception, e: # @UnusedVariable
sucess = False
assert sucess is True, ERROR_TKINTER_NOT_INSTALLED
# Test
if __name__ == '__main__':
runner = unittest.TextTestRunner()
itersuite = unittest.TestLoader().loadTestsFromTestCase(VerticalScrolledFrameTest)
runner.run(itersuite)
| ppizarror/korektor | test/verticalscrolledframeTest.py | Python | gpl-2.0 | 2,092 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: enable=invalid-name
"""
simulates running a program through a processor architecture
Usage: processorSim.py --processor PROCESSORFILE PROGRAMFILE
"""
############################################################
#
# Copyright 2017, 2019, 2020, 2021 Mohammed El-Afifi
# This file is part of processorSim.
#
# processorSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# processorSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with processorSim. If not, see
# <http://www.gnu.org/licenses/>.
#
# program: processor simulator
#
# file: processorSim.py
#
# function: assembly program execution simulator
#
# description: runs an assembly program on a processor
#
# author: Mohammed El-Afifi (ME)
#
# environment: Visual Studdio Code 1.61.1, python 3.9.7, Fedora release
# 34 (Thirty Four)
#
# notes: This is a private program.
#
############################################################
import csv
import itertools
import logging
import operator
import sys
import argparse
import typing
from typing import Collection, Dict, IO, Iterable, List, Mapping, Optional, \
Sequence, Sized, TextIO, Tuple
import attr
import fastcore.foundation
import more_itertools
from more_itertools import prepend
from container_utils import BagValDict
import hw_loading
import program_utils
import sim_services
from sim_services.sim_defs import InstrState, StallState
# command-line option variables
# variable to receive the processor architecture file
_PROC_OPT_VAR = "processor_file"
_PROG_OPT_VAR = "prog_file" # variable to receive the program file
_T = typing.TypeVar("_T")
def get_in_files(argv: Optional[Sequence[str]]) -> Tuple[TextIO, TextIO]:
"""Create input file objects from the given arguments.
`argv` is the list of arguments.
"""
args = process_command_line(argv)
return typing.cast(Tuple[TextIO, TextIO],
operator.attrgetter(_PROC_OPT_VAR, _PROG_OPT_VAR)(args))
def get_sim_res(processor_file: IO[str],
program_file: Iterable[str]) -> List[List[str]]:
"""Calculate the simulation result table.
`processor_file` is the file containing the processor architecture.
`program_file` is the file containing the program to simulate.
The function reads the program file and simulates its execution on
the processor defined by the architecture provided in the given
processor description file.
"""
proc_desc = hw_loading.read_processor(processor_file)
prog = program_utils.read_program(program_file)
compiled_prog = program_utils.compile_program(prog, proc_desc.isa)
proc_spec = sim_services.HwSpec(proc_desc.processor)
return _get_sim_rows(
enumerate(sim_services.simulate(compiled_prog, proc_spec)), len(prog))
def process_command_line(argv: Optional[Sequence[str]]) -> argparse.Namespace:
"""
Return args object.
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(
add_help=False)
# define options here:
parser.add_argument( # processor architecture file
'--processor', dest=_PROC_OPT_VAR, type=open, required=True,
metavar="PROCESSORFILE",
help='Read the processor architecture from this file.')
parser.add_argument( # program
_PROG_OPT_VAR, type=open, metavar="PROGRAMFILE",
help='Simulate this program file.')
parser.add_argument( # customized description; put --help last
'-h', '--help', action='help',
help='Show this help message and exit.')
args = parser.parse_args(argv)
return args
def main(argv: Optional[Sequence[str]] = None) -> int:
"""Run the program.
`argv` is the command-line arguments, defaulting to None.
The function returns the program exit code.
"""
processor_file, program_file = get_in_files(argv)
logging.basicConfig(level=logging.INFO)
run(processor_file, program_file)
return 0 # success
def run(processor_file: IO[str], program_file: IO[str]) -> None:
"""Simulate the program on the given processor.
`processor_file` is the file containing the processor architecture.
`program_file` is the file containing the program to simulate.
The function reads the program file and simulates its execution on
the processor defined by the architecture provided in the given
processor description file.
"""
with processor_file, program_file:
_ResultWriter.print_sim_res(get_sim_res(processor_file, program_file))
@attr.s(auto_attribs=True, frozen=True)
class _InstrPosition:
"""Instruction position"""
def __str__(self) -> str:
"""Return the printable string of this instruction position.
`self` is this instruction position.
"""
stall_map = {StallState.NO_STALL: 'U', StallState.STRUCTURAL: 'S',
StallState.DATA: 'D'}
return f"{stall_map[self._stalled]}:{self._unit}"
_unit: object
_stalled: StallState
@attr.s(auto_attribs=True, frozen=True)
class _InstrFlight:
"""Instruction flight"""
start_time: int
stops: Iterable[_InstrPosition]
class _ResultWriter:
"""Simulation result writer"""
@classmethod
def print_sim_res(cls, sim_res: Collection[Collection[object]]) -> None:
"""Print the simulation result.
`cls` is the writer class.
`sim_res` is the simulation result to print.
"""
cls._print_tbl_hdr(sim_res)
cls._print_tbl_data(enumerate(sim_res, 1))
@staticmethod
def _get_last_tick(sim_res: Iterable[Sized]) -> int:
"""Calculate the last clock cycle in the simulation.
`sim_res` is the simulation result.
"""
return max(map(len, sim_res), default=0)
@classmethod
def _get_ticks(cls, sim_res: Iterable[Sized]) -> range:
"""Retrieve the clock cycles.
`cls` is the writer class.
`sim_res` is the simulation result.
The method calculates the clock cycles necessary to run the
whole simulation and returns an iterator over them.
"""
return range(1, cls._get_last_tick(sim_res) + 1)
@classmethod
def _print_res_row(cls, row_key: str, res_row: Iterable[object]) -> None:
"""Print the given simulation row.
`cls` is the writer class.
`row_key` is the row key.
`res_row` is the simulation row.
"""
cls._writer.writerow(prepend(row_key, res_row))
@classmethod
def _print_tbl_data(
cls, sim_res: Iterable[Tuple[int, Iterable[object]]]) -> None:
"""Print the simulation table rows.
`cls` is the writer class.
`sim_res` is the simulation result.
"""
for row_idx, fields in sim_res:
cls._print_res_row('I' + str(row_idx), fields)
@classmethod
def _print_tbl_hdr(cls, sim_res: Iterable[Sized]) -> None:
"""Print the simulation table header.
`cls` is the writer class.
`sim_res` is the simulation result.
"""
cls._print_res_row("", cls._get_ticks(sim_res))
_writer = csv.writer(sys.stdout, "excel-tab")
def _create_flight(instr_util: Mapping[int, _InstrPosition]) -> _InstrFlight:
"""Create an instruction flight from its utilization.
`instr_util` is the instruction utilization information.
"""
start_time = min(instr_util.keys())
time_span = len(instr_util)
return _InstrFlight(start_time, fastcore.foundation.map_ex(
range(start_time, start_time + time_span), instr_util, gen=True))
def _cui_to_flights(cxuxi: Iterable[Tuple[int, BagValDict[_T, InstrState]]],
instructions: int) -> "map[_InstrFlight]":
"""Convert a CxUxI utilization map to instruction flights.
`cxuxi` is the ClockxUnitxInstruction utilization map to convert.
`instructions` are the total number of instructions.
"""
return _icu_to_flights(_cui_to_icu(cxuxi, instructions))
def _cui_to_icu(cxuxi: Iterable[Tuple[int, BagValDict[_T, InstrState]]],
instructions: int) -> List[Dict[int, _InstrPosition]]:
"""Convert a CxUxI utilization map to IxCxU format.
`cxuxi` is the ClockxUnitxInstruction utilization map to convert.
`instructions` are the total number of instructions.
"""
ixcxu: List[Dict[int, _InstrPosition]] = list(
more_itertools.repeatfunc(dict, instructions))
for cur_cp, uxi_util in cxuxi:
_fill_cp_util(cur_cp, uxi_util.items(), ixcxu)
return ixcxu
def _fill_cp_util(clock_pulse: int, cp_util: Iterable[
Tuple[object, Iterable[InstrState]]], ixcxu: Sequence[
typing.MutableMapping[int, _InstrPosition]]) -> None:
"""Fill the given clock utilization into the IxCxU map.
`clock_pulse` is the clock pulse.
`cp_util` is the clock pulse utilization information.
`ixcxu` is the InstructionxClockxUnit utilization map to fill.
"""
for unit, instr_lst in cp_util:
for instr in instr_lst:
ixcxu[instr.instr][clock_pulse] = _InstrPosition(
unit, instr.stalled)
def _get_flight_row(flight: _InstrFlight) -> List[str]:
"""Convert the given flight to a row.
`flight` is the flight to convert.
"""
return [*(itertools.repeat("", flight.start_time)),
*(str(stop) for stop in flight.stops)]
def _get_sim_rows(sim_res: Iterable[Tuple[int, BagValDict[_T, InstrState]]],
instructions: int) -> List[List[str]]:
"""Calculate the simulation rows.
`sim_res` is the simulation result.
`instructions` are the total number of instructions.
"""
flights = _cui_to_flights(sim_res, instructions)
return [_get_flight_row(flight) for flight in flights]
def _icu_to_flights(
ixcxu: Iterable[Mapping[int, _InstrPosition]]) -> "map[_InstrFlight]":
"""Convert a IxCxU utilization map to instruction flights.
`ixcxu` is the InstructionxClockxUnit utilization map to convert.
"""
return map(_create_flight, ixcxu)
if __name__ == '__main__':
sys.exit(main())
| MSK61/processorsim | src/processorSim.py | Python | lgpl-3.0 | 10,796 |
import json
from collections import defaultdict
from pathlib import Path
from typing import Dict, List
from unittest import mock
from unittest.mock import MagicMock
import pytest
from ereuse_utils import DeviceHubJSONEncoder
from ereuse_workbench.computer import Computer
def fixture(file_name: str):
with Path(__file__).parent.joinpath('fixtures').joinpath(file_name).open() as file:
return file.read()
def jsonf(file_name: str) -> dict:
"""Gets a json fixture and parses it to a dict."""
with Path(__file__).parent.joinpath('fixtures').joinpath(file_name + '.json').open() as file:
return json.load(file)
@pytest.fixture()
def lshw() -> MagicMock:
"""
Mocks the call to LSHW from Computer.
Set ``mocked.return_value.json`` with a JSON string, where
``mocked`` is the injected parameter you receive in your test.
"""
class Run:
def __init__(self) -> None:
self.json = ''
super().__init__()
def __call__(self, cmd, **kwargs):
cmd = str(cmd)
if 'lshw' in cmd:
return Result(self.json)
elif 'dmidecode' in cmd:
return Result(1)
else:
return Result('')
class Result:
def __init__(self, stdout) -> None:
self.stdout = stdout
with mock.patch('ereuse_workbench.computer.run') as run:
run.side_effect = Run()
yield run
def computer(lshw: MagicMock, json_name: str) -> (dict, Dict[str, List[dict]]):
"""Given a LSHW output and a LSHW mock, runs Computer."""
lshw.side_effect.json = fixture(json_name + '.json')
computer_getter = Computer()
assert lshw.called
pc, components = computer_getter.run()
components = json.dumps(components, skipkeys=True, cls=DeviceHubJSONEncoder, indent=2)
# Group components in a dictionary by their @type
grouped = defaultdict(list)
for component in json.loads(components):
grouped[component['@type']].append(component)
return pc, grouped
@pytest.fixture()
def subprocess_os_installer() -> MagicMock:
with mock.patch('ereuse_workbench.os_installer.subprocess') as subprocess:
subprocess.run = MagicMock()
yield subprocess
| eReuse/device-inventory | tests/conftest.py | Python | agpl-3.0 | 2,261 |
import sys
sys.exit(1)
| rmbar/acceptpy | tool_files/bash_tester/tests/exit_1.py | Python | mit | 24 |
# Copyright (C) 2012 Equinor ASA, Norway.
#
# The file 'field_config.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCClass
from res import ResPrototype
from res.enkf import LoadFailTypeEnum
class SummaryConfig(BaseCClass):
TYPE_NAME = "summary_config"
_alloc = ResPrototype("void* summary_config_alloc(char*, load_fail_type)", bind=False)
_free = ResPrototype("void summary_config_free(summary_config)")
_get_var = ResPrototype("char* summary_config_get_var(summary_config)")
def __init__(self, key, load_fail = LoadFailTypeEnum.LOAD_FAIL_WARN):
c_ptr = self._alloc(key, load_fail)
super(SummaryConfig, self).__init__(c_ptr)
def __repr__(self):
return 'SummaryConfig() %s' % self._ad_str()
def free(self):
self._free( )
@property
def key(self):
return self._get_var()
| andreabrambilla/libres | python/res/enkf/config/summary_config.py | Python | gpl-3.0 | 1,414 |
import sys
import uuid
import stratus
PROMPT = "Method to call: "
CLI_NAME = str(uuid.uuid4())
def main():
# Create the client
client = stratus.client()
# Connect to the cluster
client.connect(host=sys.argv[1], name=CLI_NAME)
# Use other arguments for call
if len(sys.argv) > 2:
line = sys.argv[2:]
print client.call(*line).result()
# Ask function to call
else:
line = raw_input(PROMPT)
while line != "exit":
line = line.split()
print client.call(*line).result()
line = raw_input(PROMPT)
# Disconnect client
client.disconnect()
if __name__ == '__main__':
main()
| pdxjohnny/stratus | examples/cli.py | Python | mit | 677 |
__author__ = 'keyvan'
from globals import * | rodsol/opencog | opencog/python/utility/numeric/__init__.py | Python | agpl-3.0 | 44 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# updates the copyright information for all .cs files
# usage: call recursive_traversal, with the following parameters
# parent directory, old copyright text content, new copyright text content
import os
excludedir = ["..\\Lib"]
def update_source(filename, oldcopyright, copyright):
utfstr = chr(0xef)+chr(0xbb)+chr(0xbf)
fdata = file(filename,"r+").read()
isUTF = False
if (fdata.startswith(utfstr)):
isUTF = True
fdata = fdata[3:]
if (oldcopyright != None):
if (fdata.startswith(oldcopyright)):
fdata = fdata[len(oldcopyright):]
if not (fdata.startswith(copyright)):
print "updating "+filename
fdata = copyright + fdata
if (isUTF):
file(filename,"w").write(utfstr+fdata)
else:
file(filename,"w").write(fdata)
def recursive_traversal(dir, oldcopyright, copyright):
global excludedir
fns = os.listdir(dir)
print "listing "+dir
for fn in fns:
fullfn = os.path.join(dir,fn)
if (fullfn in excludedir):
continue
if (os.path.isdir(fullfn)):
recursive_traversal(fullfn, oldcopyright, copyright)
else:
if (fullfn.endswith(".cpp") or fullfn.endswith(".hpp") or fullfn.endswith(".h") or fullfn.endswith(".i")):
update_source(fullfn, oldcopyright, copyright)
oldcright = file("old_license_header.txt","r+").read()
#oldcright = None
cright = file("license_header.txt","r+").read()
recursive_traversal("..", oldcright, cright)
exit()
| andrescodas/casadi | misc/update_license.py | Python | lgpl-3.0 | 2,584 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import xml.etree.ElementTree as ET
import requests
import urlparse
def create_headers(args):
return { 'X-Api-Key': args.api_key }
def finish_command(command, response):
print(command, response.status_code, response.reason)
print(response.text)
if response.status_code < 400:
sys.exit(0)
else:
sys.exit(2)
def create_build(args):
build = {}
build['buildType'] = args.build_type
build['number'] = args.build_number
build['source'] = args.build_source
build['status'] = 'running'
r = requests.post(urlparse.urljoin(args.url, "api/builds"), headers=create_headers(args), json=build)
if r.status_code < 400:
if args.property_file_format:
print("MBED_BUILD_ID=" + r.text)
else:
print(r.text)
sys.exit(0)
else:
sys.exit(2)
def finish_build(args):
data = {}
data['status'] = 'completed'
r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
finish_command('finish-build', r)
def promote_build(args):
data = {}
data['buildType'] = 'Release'
r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
finish_command('promote-build', r)
def abort_build(args):
data = {}
data['status'] = 'aborted'
r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
finish_command('abort-build', r)
def add_project_runs(args):
'''
-------------------------------------
Notes on 'project_run_data' structure:
--------------------------------------
'projectRuns' - Tree structure used to keep track of what projects have
been logged in different report files. The tree is organized as follows:
'projectRuns': { - Root element of tree
'hostOs': { - Host OS on which project was built/tested
- ex. windows, linux, or mac
'platform': { - Platform for which project was built/tested
(Corresponds to platform names in targets.py)
- ex. K64F, LPC1768, NRF51822, etc.
'toolchain': { - Toolchain with which project was built/tested
(Corresponds to TOOLCHAIN_CLASSES names in toolchains/__init__.py)
- ex. ARM, uARM, GCC_ARM, etc.
'project': { - Project that was build/tested
(Corresponds to test id in tests.py or library id in libraries.py)
- For tests, ex. MBED_A1, MBED_11, DTCT_1 etc.
- For libraries, ex. MBED, RTX, RTOS, etc.
},
...
},
...
},
...
}
}
'platforms_set' - Set of all the platform names mentioned in the given report files
'toolchains_set' - Set of all the toolchain names mentioned in the given report files
'names_set' - Set of all the project names mentioned in the given report files
'hostOses_set' - Set of all the host names given (only given by the command line arguments)
'''
project_run_data = {}
project_run_data['projectRuns'] = {}
project_run_data['platforms_set'] = set()
project_run_data['vendors_set'] = set()
project_run_data['toolchains_set'] = set()
project_run_data['names_set'] = set()
project_run_data['hostOses_set'] = set()
project_run_data['hostOses_set'].add(args.host_os)
add_report(project_run_data, args.build_report, True, args.build_id, args.host_os)
if (args.test_report):
add_report(project_run_data, args.test_report, False, args.build_id, args.host_os)
ts_data = format_project_run_data(project_run_data)
r = requests.post(urlparse.urljoin(args.url, "api/projectRuns"), headers=create_headers(args), json=ts_data)
finish_command('add-project-runs', r)
def format_project_run_data(project_run_data):
ts_data = {}
ts_data['projectRuns'] = []
for hostOs in project_run_data['projectRuns'].values():
for platform in hostOs.values():
for toolchain in platform.values():
for project in toolchain.values():
ts_data['projectRuns'].append(project)
ts_data['platforms'] = list(project_run_data['platforms_set'])
ts_data['vendors'] = list(project_run_data['vendors_set'])
ts_data['toolchains'] = list(project_run_data['toolchains_set'])
ts_data['names'] = list(project_run_data['names_set'])
ts_data['hostOses'] = list(project_run_data['hostOses_set'])
return ts_data
def find_project_run(projectRuns, project):
keys = ['hostOs', 'platform', 'toolchain', 'project']
elem = projectRuns
for key in keys:
if not project[key] in elem:
return None
elem = elem[project[key]]
return elem
def add_project_run(projectRuns, project):
keys = ['hostOs', 'platform', 'toolchain']
elem = projectRuns
for key in keys:
if not project[key] in elem:
elem[project[key]] = {}
elem = elem[project[key]]
elem[project['project']] = project
def update_project_run_results(project_to_update, project, is_build):
if is_build:
project_to_update['buildPass'] = project['buildPass']
project_to_update['buildResult'] = project['buildResult']
project_to_update['buildOutput'] = project['buildOutput']
else:
project_to_update['testPass'] = project['testPass']
project_to_update['testResult'] = project['testResult']
project_to_update['testOutput'] = project['testOutput']
def update_project_run(projectRuns, project, is_build):
found_project = find_project_run(projectRuns, project)
if found_project:
update_project_run_results(found_project, project, is_build)
else:
add_project_run(projectRuns, project)
def add_report(project_run_data, report_file, is_build, build_id, host_os):
tree = None
try:
tree = ET.parse(report_file)
except:
print(sys.exc_info()[0])
print('Invalid path to report: %s', report_file)
sys.exit(1)
test_suites = tree.getroot()
for test_suite in test_suites:
platform = ""
toolchain = ""
vendor = ""
for properties in test_suite.findall('properties'):
for property in properties.findall('property'):
if property.attrib['name'] == 'target':
platform = property.attrib['value']
project_run_data['platforms_set'].add(platform)
elif property.attrib['name'] == 'toolchain':
toolchain = property.attrib['value']
project_run_data['toolchains_set'].add(toolchain)
elif property.attrib['name'] == 'vendor':
vendor = property.attrib['value']
project_run_data['vendors_set'].add(vendor)
for test_case in test_suite.findall('testcase'):
projectRun = {}
projectRun['build'] = build_id
projectRun['hostOs'] = host_os
projectRun['platform'] = platform
projectRun['toolchain'] = toolchain
projectRun['project'] = test_case.attrib['classname'].split('.')[-1]
projectRun['vendor'] = vendor
project_run_data['names_set'].add(projectRun['project'])
skipped = test_case.findall('skipped')
if not skipped:
system_outs = test_case.findall('system-out')
output = ""
if system_outs:
output = system_outs[0].text
if is_build:
projectRun['buildOutput'] = output
else:
projectRun['testOutput'] = output
errors = test_case.findall('error')
failures = test_case.findall('failure')
projectRunPass = None
result = None
if errors:
projectRunPass = False
result = errors[0].attrib['message']
elif failures:
projectRunPass = False
result = failures[0].attrib['message']
else:
projectRunPass = True
result = 'OK'
if is_build:
projectRun['buildPass'] = projectRunPass
projectRun['buildResult'] = result
else:
projectRun['testPass'] = projectRunPass
projectRun['testResult'] = result
update_project_run(project_run_data['projectRuns'], projectRun, is_build)
def main(arguments):
# Register and parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', required=True, help='url to ci site')
parser.add_argument('-k', '--api-key', required=True, help='api-key for posting data')
subparsers = parser.add_subparsers(help='subcommand help')
create_build_parser = subparsers.add_parser('create-build', help='create a new build')
create_build_parser.add_argument('-b', '--build-number', required=True, help='build number')
create_build_parser.add_argument('-T', '--build-type', choices=['Nightly', 'Limited', 'Pull_Request', 'Release_Candidate'], required=True, help='type of build')
create_build_parser.add_argument('-s', '--build-source', required=True, help='url to source of build')
create_build_parser.add_argument('-p', '--property-file-format', action='store_true', help='print result in the property file format')
create_build_parser.set_defaults(func=create_build)
finish_build_parser = subparsers.add_parser('finish-build', help='finish a running build')
finish_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
finish_build_parser.set_defaults(func=finish_build)
finish_build_parser = subparsers.add_parser('promote-build', help='promote a build to a release')
finish_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
finish_build_parser.set_defaults(func=promote_build)
abort_build_parser = subparsers.add_parser('abort-build', help='abort a running build')
abort_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
abort_build_parser.set_defaults(func=abort_build)
add_project_runs_parser = subparsers.add_parser('add-project-runs', help='add project runs to a build')
add_project_runs_parser.add_argument('-b', '--build-id', required=True, help='build id')
add_project_runs_parser.add_argument('-r', '--build-report', required=True, help='path to junit xml build report')
add_project_runs_parser.add_argument('-t', '--test-report', required=False, help='path to junit xml test report')
add_project_runs_parser.add_argument('-o', '--host-os', required=True, help='host os on which test was run')
add_project_runs_parser.set_defaults(func=add_project_runs)
args = parser.parse_args(arguments)
args.func(args)
if __name__ == '__main__':
main(sys.argv[1:])
| jrjang/mbed | workspace_tools/upload_results.py | Python | apache-2.0 | 12,273 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.form_processor.models import XFormInstanceSQL
from corehq.sql_db.operations import RawSQLMigration, HqRunSQL
migrator = RawSQLMigration(('corehq', 'sql_accessors', 'sql_templates'), {
'FORM_STATE_DELETED': XFormInstanceSQL.DELETED
})
class Migration(migrations.Migration):
dependencies = [
('sql_accessors', '0013_merge'),
]
operations = [
HqRunSQL(
"DROP FUNCTION IF EXISTS save_ledger_values(TEXT[], form_processor_ledgervalue[]);",
"SELECT 1"
),
migrator.get_migration('get_ledger_transactions_for_case.sql'),
]
| qedsoftware/commcare-hq | corehq/sql_accessors/migrations/0014_ledger_transactions.py | Python | bsd-3-clause | 707 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle.
# If the file is missing please request a copy by contacting
# jason@glencoesoftware.com.
#
from .. import Decoder
from omero.model import PermissionsI
class PermissionsDecoder(Decoder):
TYPE = 'TBD#Permissions'
OMERO_CLASS = PermissionsI
def decode(self, data):
o = PermissionsI()
o.from_string(data['perm'])
o._restrictions = [
not data['canLink'], not data['canEdit'],
not data['canDelete'], not data['canAnnotate']
]
return o
decoder = (PermissionsDecoder.TYPE, PermissionsDecoder)
| openmicroscopy/omero-marshal | omero_marshal/decode/decoders/permissions.py | Python | gpl-2.0 | 817 |
import csv
import itertools
import random
import ast
import sys
#usage
# python parseResults.py
fname = '../results/model_results/results_betabinomial.txt'
file_names = [fname]
itemfile = open("items.txt")
items = [" ".join(l.rstrip().split()) for l in itemfile.readlines()]
itemfile.close()
print items
lines = []
results = []
wresults = []
files = [open(fn) for fn in file_names]
for f in files:
lines.extend([l.rstrip() for l in f.readlines()])
#print lines
def getReducedAlternatives(alts):
basic = ""
lownum = ""
highnum = ""
extra = ""
twowords = ""
threewords = ""
if "some,all,none" in alts:
basic = "0_basic"
if "one,two,three" in alts:
lownum = "1_lownum"
if "eleven" in alts:
highnum = "3_highnum"
if "many" in alts:
extra = "2_extra"
if "almostall" in alts:
twowords = "4_twowords"
if "lessthanhalf" in alts:
threewords = "5_threewords"
return "".join([basic,lownum,extra,highnum,twowords,threewords])
headers = ["Item","QUD","Wonky","State","Alternatives","WonkyWorldPrior","Quantifier","SpeakerOptimality","PosteriorProbability"]
k = 0
wwcnt = -1
condcnt = 0
priorcnt = 0
while k < len(lines):
if lines[k] == "alternatives":
if priorcnt < 89:
if wwcnt < 4:
wwcnt = wwcnt + 1
else:
priorcnt = priorcnt+1
wwcnt = 0
else:
if wwcnt < 4:
wwcnt = wwcnt + 1
else:
priorcnt = 0
wwcnt = 0
print items[priorcnt]
print priorcnt
print wwcnt
k = k + 1
alts = getReducedAlternatives(lines[k])
k = k + 4
wonkyworldprior = lines[k]
k = k + 1
quantifier = lines[k].split(",")[1]
k = k + 1
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
combs = lines[k].split(",,,")
pairs = combs[0].split(",,")
probs = combs[1].split(",")
# print pairs
# print k
for j,pa in enumerate(pairs):
pasplit = pa.split(",")
wonky = pasplit[0]
ssize = pasplit[1]
prob = probs[j]
results.append([items[priorcnt],qud, wonky, ssize, alts, wonkyworldprior, quantifier, spopt, prob])
k = k + 1
elif lines[k].startswith("quantifier"):
quantifier = lines[k].split(",")[1]
k = k + 1
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
combs = lines[k].split(",,,")
pairs = combs[0].split(",,")
# print combs
probs = combs[1].split(",")
# print pairs
# print k
for j,pa in enumerate(pairs):
pasplit = pa.split(",")
wonky = pasplit[0]
ssize = pasplit[1]
prob = probs[j]
results.append([items[priorcnt],qud, wonky, ssize, alts, wonkyworldprior, quantifier, spopt, prob])
k = k + 1
elif lines[k].startswith("speaker-opt"):
spopt = lines[k].split(",")[1]
k = k + 1
combs = lines[k].split(",,,")
pairs = combs[0].split(",,")
probs = combs[1].split(",")
# print pairs
# print k
for j,pa in enumerate(pairs):
pasplit = pa.split(",")
wonky = pasplit[0]
ssize = pasplit[1]
prob = probs[j]
results.append([items[priorcnt],qud, wonky, ssize, alts, wonkyworldprior, quantifier, spopt, prob])
k = k + 1
elif lines[k].startswith("qud"):
qud = lines[k].split(",")[1]
k = k + 1
spopt = lines[k].split(",")[1]
k = k + 1
combs = lines[k].split(",,,")
pairs = combs[0].split(",,")
probs = combs[1].split(",")
# print pairs
# print k
for j,pa in enumerate(pairs):
pasplit = pa.split(",")
wonky = pasplit[0]
ssize = pasplit[1]
prob = probs[j]
results.append([items[priorcnt],qud, wonky, ssize, alts, wonkyworldprior, quantifier, spopt, prob])
k = k + 1
else:
#print lines[k]
print "this shouldn't be happening"
#print results
for r in results:
inner_dict = dict(zip(headers,r))
wresults.append(inner_dict)
oname = '../results/data/parsed_betabinomial_results.tsv'
w = csv.DictWriter(open(oname, 'wb'),fieldnames=headers,restval="NA",delimiter="\t")
w.writeheader()
w.writerows(wresults)
| thegricean/sinking-marbles | models/wonky_world/scripts/parseBetaBinomialResults.py | Python | mit | 3,898 |
import pytest
from webdriver.bidi.error import InvalidArgumentException
@pytest.mark.asyncio
async def test_params_empty(bidi_session, send_blocking_command):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command("session.subscribe", {})
@pytest.mark.asyncio
@pytest.mark.parametrize("value", [None, True, "foo", 42, {}])
async def test_params_events_invalid_type(bidi_session, send_blocking_command, value):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command("session.subscribe", {"events": value})
@pytest.mark.asyncio
async def test_params_events_empty(bidi_session):
response = await bidi_session.session.subscribe(events=[])
assert response == {}
@pytest.mark.asyncio
@pytest.mark.parametrize("value", [None, True, 42, [], {}])
async def test_params_events_value_invalid_type(send_blocking_command, value):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command("session.subscribe", {"events": [value]})
@pytest.mark.asyncio
@pytest.mark.parametrize("value", ["", "foo", "foo.bar", "log.invalidEvent"])
async def test_params_events_value_invalid_event_name(send_blocking_command, value):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command("session.subscribe", {"events": [value]})
@pytest.mark.asyncio
@pytest.mark.parametrize("value", [None, True, "foo", 42, {}])
async def test_params_contexts_invalid_type(bidi_session, send_blocking_command, value):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command(
"session.subscribe",
{
"events": [],
"contexts": value,
}
)
@pytest.mark.asyncio
async def test_params_contexts_empty(bidi_session):
response = await bidi_session.session.subscribe(events=[], contexts=[])
assert response == {}
@pytest.mark.asyncio
@pytest.mark.parametrize("value", [None, True, 42, [], {}])
async def test_params_contexts_value_invalid_type(send_blocking_command, value):
with pytest.raises(InvalidArgumentException):
response = await send_blocking_command(
"session.subscribe",
{
"events": [],
"contexts": [value],
}
)
| servo/servo | tests/wpt/web-platform-tests/webdriver/tests/bidi/session_subscribe/subscribe.py | Python | mpl-2.0 | 2,368 |
"""Database models used by django-reversion."""
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, IntegrityError
from django.dispatch.dispatcher import Signal
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text, python_2_unicode_compatible
def safe_revert(versions):
"""
Attempts to revert the given models contained in the give versions.
This method will attempt to resolve dependencies between the versions to revert
them in the correct order to avoid database integrity errors.
"""
unreverted_versions = []
for version in versions:
try:
version.revert()
except (IntegrityError, ObjectDoesNotExist):
unreverted_versions.append(version)
if len(unreverted_versions) == len(versions):
raise RevertError("Could not revert revision, due to database integrity errors.")
if unreverted_versions:
safe_revert(unreverted_versions)
class RevertError(Exception):
"""Exception thrown when something goes wrong with reverting a model."""
UserModel = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Revision(models.Model):
"""A group of related object versions."""
manager_slug = models.CharField(
max_length = 200,
db_index = True,
default = "default",
)
date_created = models.DateTimeField(auto_now_add=True,
db_index=True,
verbose_name=_("date created"),
help_text="The date and time this revision was created.")
user = models.ForeignKey(UserModel,
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name=_("user"),
help_text="The user who created this revision.")
comment = models.TextField(blank=True,
verbose_name=_("comment"),
help_text="A text comment on this revision.")
def revert(self, delete=False):
"""Reverts all objects in this revision."""
version_set = self.version_set.all()
# Optionally delete objects no longer in the current revision.
if delete:
# Get a dict of all objects in this revision.
old_revision = {}
for version in version_set:
try:
obj = version.object
except ContentType.objects.get_for_id(version.content_type_id).model_class().DoesNotExist:
pass
else:
old_revision[obj] = version
# Calculate the set of all objects that are in the revision now.
from reversion.revisions import RevisionManager
current_revision = RevisionManager.get_manager(self.manager_slug)._follow_relationships(obj for obj in old_revision.keys() if obj is not None)
# Delete objects that are no longer in the current revision.
for item in current_revision:
if item not in old_revision:
item.delete()
# Attempt to revert all revisions.
safe_revert(version_set)
def __str__(self):
"""Returns a unicode representation."""
return ", ".join(force_text(version) for version in self.version_set.all())
#Meta
class Meta:
app_label = 'reversion'
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.rel.to)
)
)
@python_2_unicode_compatible
class Version(models.Model):
"""A saved version of a database model."""
revision = models.ForeignKey(Revision,
help_text="The revision that contains this version.")
object_id = models.TextField(help_text="Primary key of the model under version control.")
object_id_int = models.IntegerField(
blank = True,
null = True,
db_index = True,
help_text = "An indexed, integer version of the stored model's primary key, used for faster lookups.",
)
content_type = models.ForeignKey(ContentType,
help_text="Content type of the model under version control.")
# A link to the current instance, not the version stored in this Version!
object = generic.GenericForeignKey()
format = models.CharField(max_length=255,
help_text="The serialization format used by this model.")
serialized_data = models.TextField(help_text="The serialized form of this version of the model.")
object_repr = models.TextField(help_text="A string representation of the object.")
@property
def object_version(self):
"""The stored version of the model."""
data = self.serialized_data
data = force_text(data.encode("utf8"))
return list(serializers.deserialize(self.format, data, ignorenonexistent=True))[0]
@property
def field_dict(self):
"""
A dictionary mapping field names to field values in this version
of the model.
This method will follow parent links, if present.
"""
if not hasattr(self, "_field_dict_cache"):
object_version = self.object_version
obj = object_version.object
result = {}
for field in obj._meta.fields:
result[field.name] = field.value_from_object(obj)
result.update(object_version.m2m_data)
# Add parent data.
for parent_class, field in obj._meta.concrete_model._meta.parents.items():
if obj._meta.proxy and parent_class == obj._meta.concrete_model:
continue
content_type = ContentType.objects.get_for_model(parent_class)
if field:
parent_id = force_text(getattr(obj, field.attname))
else:
parent_id = obj.pk
try:
parent_version = Version.objects.get(revision__id=self.revision_id,
content_type=content_type,
object_id=parent_id)
except Version.DoesNotExist:
pass
else:
result.update(parent_version.field_dict)
setattr(self, "_field_dict_cache", result)
return getattr(self, "_field_dict_cache")
def revert(self):
"""Recovers the model in this version."""
self.object_version.save()
def __str__(self):
"""Returns a unicode representation."""
return self.object_repr
#Meta
class Meta:
app_label = 'reversion'
# Version management signals.
pre_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
post_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
| adonm/django-reversion | src/reversion/models.py | Python | bsd-3-clause | 7,592 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetHyperparameterTuningJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync]
from google.cloud import aiplatform_v1beta1
def sample_get_hyperparameter_tuning_job():
# Create a client
client = aiplatform_v1beta1.JobServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetHyperparameterTuningJobRequest(
name="name_value",
)
# Make the request
response = client.get_hyperparameter_tuning_job(request=request)
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync]
| googleapis/python-aiplatform | samples/generated_samples/aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py | Python | apache-2.0 | 1,556 |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import sys
import os
import time
import socket
import select
import eventlet
import json
import requests
from cStringIO import StringIO
from cfgm_common.utils import cgitb_hook
class KubeMonitor(object):
def __init__(self, args=None, logger=None, q=None, db=None,
resource_name='KubeMonitor', beta=False):
self.name = type(self).__name__
self.args = args
self.logger = logger
self.q = q
self.cloud_orchestrator = self.args.orchestrator
self.token = self.args.token # valid only for OpenShift
self.headers = {'Connection': 'Keep-Alive'}
self.verify = False
self.timeout = 60
# Per-monitor stream handle to api server.
self.kube_api_resp = None
self.kube_api_stream_handle = None
# Resource name corresponding to this monitor.
self.resource_name = resource_name
self.resource_beta = beta
# Use Kube DB if kube object caching is enabled in config.
if args.kube_object_cache == 'True':
self.db = db
else:
self.db = None
self.kubernetes_api_server = self.args.kubernetes_api_server
if self.token:
protocol = "https"
header = {'Authorization': "Bearer " + self.token}
self.headers.update(header)
self.verify = False
self.kubernetes_api_server_port = self.args.kubernetes_api_secure_port
else: # kubernetes
protocol = "http"
self.kubernetes_api_server_port = self.args.kubernetes_api_port
# URL to the api server.
self.url = "%s://%s:%s" % (protocol,
self.kubernetes_api_server,
self.kubernetes_api_server_port)
# URL to the v1-components in api server.
self.v1_url = "%s/api/v1" % (self.url)
# URL to v1-beta1 components to api server.
self.beta_url = "%s/apis/extensions/v1beta1" % (self.url)
if not self._is_kube_api_server_alive():
msg = "kube_api_service is not available"
self.logger.error("%s - %s" %(self.name, msg))
raise Exception(msg)
self.logger.info("%s - KubeMonitor init done." %self.name)
def _is_kube_api_server_alive(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((self.kubernetes_api_server, \
self.kubernetes_api_server_port))
if result == 0:
return True
else:
return False
def _get_component_url(self):
"""URL to a component.
This method return the URL for the component represented by this
monitor instance.
"""
if self.resource_beta == False:
base_url = self.v1_url
else:
base_url = self.beta_url
url = "%s/%s" % (base_url, self.resource_name)
return url
def get_entry_url(self, base_url, entry):
"""URL to an entry of this component.
This method returns a URL to a specific entry of this component.
"""
return base_url + entry['metadata']['selfLink']
def init_monitor(self):
"""Initialize/sync a monitor component.
This method will initialize a monitor component.
As a part of this init, this method will read existing entries in api
server and populate the local db.
"""
# Get the URL to this component.
url = self._get_component_url()
try:
resp = requests.get(url, headers=self.headers, verify=self.verify)
if resp.status_code != 200:
resp.close()
return
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return
initial_entries = resp.json()['items']
resp.close()
if initial_entries:
for entry in initial_entries:
entry_url = self.get_entry_url(self.url, entry)
try:
resp = requests.get(entry_url, headers=self.headers, \
verify=self.verify)
if resp.status_code != 200:
resp.close()
continue
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
continue
try:
# Construct the event and initiate processing.
event = {'object':resp.json(), 'type':'ADDED'}
self.process_event(event)
except ValueError:
self.logger.error("Invalid data read from kube api server:"
" %s" % (entry))
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("%s - %s" %(self.name, err_msg))
resp.close()
def register_monitor(self):
"""Register this component for notifications from api server.
"""
if self.kube_api_resp:
self.kube_api_resp.close()
if not self._is_kube_api_server_alive():
msg = "kube_api_service is not available"
self.logger.error("%s - %s" %(self.name, msg))
time.sleep(self.timeout)
return
url = self._get_component_url()
try:
resp = requests.get(url, params={'watch': 'true'}, \
stream=True, headers=self.headers, \
verify=self.verify)
if resp.status_code != 200:
resp.close()
return
# Get handle to events for this monitor.
self.kube_api_resp = resp
self.kube_api_stream_handle = resp.iter_lines(chunk_size=256,
delimiter='\n')
self.logger.info("%s - Watches %s" %(self.name, url))
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
def get_resource(self, resource_type, resource_name, \
namespace=None, beta=False):
json_data = {}
if beta == False:
base_url = self.v1_url
else:
base_url = self.beta_url
if resource_type == "namespaces":
url = "%s/%s" % (base_url, resource_type)
else:
url = "%s/namespaces/%s/%s/%s" % (base_url, namespace,
resource_type, resource_name)
try:
resp = requests.get(url, stream=True, \
headers=self.headers, verify=self.verify)
if resp.status_code == 200:
json_data = json.loads(resp.raw.read())
resp.close()
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return json_data
def patch_resource(self, resource_type, resource_name, \
merge_patch, namespace=None, beta=False, sub_resource_name=None):
if beta == False:
base_url = self.v1_url
else:
base_url = self.beta_url
if resource_type == "namespaces":
url = "%s/%s" % (base_url, resource_type)
else:
url = "%s/namespaces/%s/%s/%s" % (base_url, namespace,
resource_type, resource_name)
if sub_resource_name:
url = "%s/%s" %(url, sub_resource_name)
headers = {'Accept': 'application/json', \
'Content-Type': 'application/strategic-merge-patch+json'}
headers.update(self.headers)
try:
resp = requests.patch(url, headers=headers, \
data=json.dumps(merge_patch), \
verify=self.verify)
if resp.status_code != 200:
resp.close()
return
except requests.exceptions.RequestException as e:
self.logger.error("%s - %s" % (self.name, e))
return resp.iter_lines(chunk_size=10, delimiter='\n')
def process(self):
"""Process available events."""
if not self.kube_api_stream_handle:
self.logger.error("%s - Event handler not found. "
"Cannot process its events." % (self.name))
return
resp = self.kube_api_resp
fp = resp.raw._fp.fp
if fp is None:
self.register_monitor()
return
try:
line = next(self.kube_api_stream_handle)
if not line:
return
except StopIteration:
return
except requests.exceptions.ChunkedEncodingError as e:
self.logger.error("%s - %s" %(self.name, e))
return
try:
self.process_event(json.loads(line))
except ValueError:
self.logger.error("Invalid JSON data from response stream:%s" % line)
except Exception as e:
string_buf = StringIO()
cgitb_hook(file=string_buf, format="text")
err_msg = string_buf.getvalue()
self.logger.error("%s - %s" %(self.name, err_msg))
def process_event(self, event):
"""Process an event."""
pass
| nischalsheth/contrail-controller | src/container/kube-manager/kube_manager/kube/kube_monitor.py | Python | apache-2.0 | 9,729 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
from datetime import datetime, timedelta
import zipfile
import os
import shutil
import tempfile
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.utils import fileutil
from azurelinuxagent.common.utils.archive import StateFlusher, StateArchiver, MAX_ARCHIVED_STATES
from tests.tools import AgentTestCase
debug = False
if os.environ.get('DEBUG') == '1':
debug = True
# Enable verbose logger to stdout
if debug:
logger.add_logger_appender(logger.AppenderType.STDOUT,
logger.LogLevel.VERBOSE)
class TestArchive(AgentTestCase):
def setUp(self):
prefix = "{0}_".format(self.__class__.__name__)
self.tmp_dir = tempfile.mkdtemp(prefix=prefix)
def tearDown(self):
if not debug and self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
def _write_file(self, fn, contents=None):
full_name = os.path.join(self.tmp_dir, fn)
fileutil.mkdir(os.path.dirname(full_name))
with open(full_name, 'w') as fh:
data = contents if contents is not None else fn
fh.write(data)
return full_name
@property
def history_dir(self):
return os.path.join(self.tmp_dir, 'history')
def test_archive00(self):
"""
StateFlusher should move all 'goal state' files to a new directory
under the history folder that is timestamped.
"""
temp_files = [
'Prod.0.manifest.xml',
'Prod.0.agentsManifest',
'Microsoft.Azure.Extensions.CustomScript.0.xml'
]
for f in temp_files:
self._write_file(f)
test_subject = StateFlusher(self.tmp_dir)
test_subject.flush(datetime.utcnow())
self.assertTrue(os.path.exists(self.history_dir))
self.assertTrue(os.path.isdir(self.history_dir))
timestamp_dirs = os.listdir(self.history_dir)
self.assertEqual(1, len(timestamp_dirs))
self.assertIsIso8601(timestamp_dirs[0])
ts = self.parse_isoformat(timestamp_dirs[0])
self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30))
for f in temp_files:
history_path = os.path.join(self.history_dir, timestamp_dirs[0], f)
msg = "expected the temp file {0} to exist".format(history_path)
self.assertTrue(os.path.exists(history_path), msg)
def test_archive01(self):
"""
StateArchiver should archive all history directories by
1. Creating a .zip of a timestamped directory's files
2. Saving the .zip to /var/lib/waagent/history/
2. Deleting the timestamped directory
"""
temp_files = [
'Prod.0.manifest.xml',
'Prod.0.agentsManifest',
'Microsoft.Azure.Extensions.CustomScript.0.xml'
]
for f in temp_files:
self._write_file(f)
flusher = StateFlusher(self.tmp_dir)
flusher.flush(datetime.utcnow())
test_subject = StateArchiver(self.tmp_dir)
test_subject.archive()
timestamp_zips = os.listdir(self.history_dir)
self.assertEqual(1, len(timestamp_zips))
zip_fn = timestamp_zips[0] # 2000-01-01T00:00:00.000000.zip
ts_s = os.path.splitext(zip_fn)[0] # 2000-01-01T00:00:00.000000
self.assertIsIso8601(ts_s)
ts = self.parse_isoformat(ts_s)
self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30))
zip_full = os.path.join(self.history_dir, zip_fn)
self.assertZipContains(zip_full, temp_files)
def test_archive02(self):
"""
StateArchiver should purge the MAX_ARCHIVED_STATES oldest files
or directories. The oldest timestamps are purged first.
This test case creates a mixture of archive files and directories.
It creates 5 more values than MAX_ARCHIVED_STATES to ensure that
5 archives are cleaned up. It asserts that the files and
directories are properly deleted from the disk.
"""
count = 6
total = MAX_ARCHIVED_STATES + count
start = datetime.now()
timestamps = []
for i in range(0, total):
ts = start + timedelta(seconds=i)
timestamps.append(ts)
if i % 2 == 0:
fn = os.path.join('history', ts.isoformat(), 'Prod.0.manifest.xml')
else:
fn = os.path.join('history', "{0}.zip".format(ts.isoformat()))
self._write_file(fn)
self.assertEqual(total, len(os.listdir(self.history_dir)))
test_subject = StateArchiver(self.tmp_dir)
test_subject.purge()
archived_entries = os.listdir(self.history_dir)
self.assertEqual(MAX_ARCHIVED_STATES, len(archived_entries))
archived_entries.sort()
for i in range(0, MAX_ARCHIVED_STATES):
ts = timestamps[i + count].isoformat()
if i % 2 == 0:
fn = ts
else:
fn = "{0}.zip".format(ts)
self.assertTrue(fn in archived_entries, "'{0}' is not in the list of unpurged entires".format(fn))
def test_archive03(self):
"""
If the StateFlusher has to flush the same file, it should
overwrite the existing one.
"""
temp_files = [
'Prod.0.manifest.xml',
'Prod.0.agentsManifest',
'Microsoft.Azure.Extensions.CustomScript.0.xml'
]
def _write_goal_state_files(temp_files, content=None):
for f in temp_files:
self._write_file(f, content)
def _check_history_files(timestamp_dir, files, content=None):
for f in files:
history_path = os.path.join(self.history_dir, timestamp_dir, f)
msg = "expected the temp file {0} to exist".format(history_path)
self.assertTrue(os.path.exists(history_path), msg)
expected_content = f if content is None else content
actual_content = fileutil.read_file(history_path)
self.assertEqual(expected_content, actual_content)
timestamp = datetime.utcnow()
_write_goal_state_files(temp_files)
test_subject = StateFlusher(self.tmp_dir)
test_subject.flush(timestamp)
# Ensure history directory exists, has proper timestamped-based name,
self.assertTrue(os.path.exists(self.history_dir))
self.assertTrue(os.path.isdir(self.history_dir))
timestamp_dirs = os.listdir(self.history_dir)
self.assertEqual(1, len(timestamp_dirs))
self.assertIsIso8601(timestamp_dirs[0])
ts = self.parse_isoformat(timestamp_dirs[0])
self.assertDateTimeCloseTo(ts, datetime.utcnow(), timedelta(seconds=30))
# Ensure saved files contain the right content
_check_history_files(timestamp_dirs[0], temp_files)
# re-write all of the same files with different content, and flush again.
# .flush() should overwrite the existing ones
_write_goal_state_files(temp_files, "--this-has-been-changed--")
test_subject.flush(timestamp)
# The contents of the saved files were overwritten as a result of the flush.
_check_history_files(timestamp_dirs[0], temp_files, "--this-has-been-changed--")
def test_archive04(self):
"""
The archive directory is created if it does not exist.
This failure was caught when .purge() was called before .archive().
"""
test_subject = StateArchiver(os.path.join(self.tmp_dir, 'does-not-exist'))
test_subject.purge()
def parse_isoformat(self, s):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
def assertIsIso8601(self, s):
try:
self.parse_isoformat(s)
except:
raise AssertionError("the value '{0}' is not an ISO8601 formatted timestamp".format(s))
def _total_seconds(self, td):
"""
Compute the total_seconds for a timedelta because 2.6 does not have total_seconds.
"""
return (0.0 + td.microseconds + (td.seconds + td.days * 24 * 60 * 60) * 10 ** 6) / 10 ** 6
def assertDateTimeCloseTo(self, t1, t2, within):
if t1 <= t2:
diff = t2 -t1
else:
diff = t1 - t2
secs = self._total_seconds(within - diff)
if secs < 0:
self.fail("the timestamps are outside of the tolerance of by {0} seconds".format(secs))
def assertZipContains(self, zip_fn, files):
ziph = zipfile.ZipFile(zip_fn, 'r')
zip_files = [x.filename for x in ziph.filelist]
for f in files:
self.assertTrue(f in zip_files, "'{0}' was not found in {1}".format(f, zip_fn))
ziph.close()
| rjschwei/WALinuxAgent | tests/utils/test_archive.py | Python | apache-2.0 | 8,920 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""add custom head tags
add setting for custom tags (HTML) in <head> section
added: 2012-02-13 (v0.10dev)
previously migrate script v054
Revision ID: 280565a54124
Revises: 4d27ff5680e5
Create Date: 2013-05-14 22:38:02.552230
"""
# revision identifiers, used by Alembic.
revision = '280565a54124'
down_revision = '4d27ff5680e5'
from alembic.op import execute, inline_literal
from sqlalchemy import Integer, Unicode, UnicodeText
from sqlalchemy import Column, MetaData, Table
# -- table definition ---------------------------------------------------------
metadata = MetaData()
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
# -- helpers ------------------------------------------------------------------
def insert_setting(key, value):
execute(
settings.insert().\
values({
'key': inline_literal(key),
'value': inline_literal(value),
})
)
def delete_setting(key):
execute(
settings.delete().\
where(settings.c.key==inline_literal(key))
)
# -----------------------------------------------------------------------------
SETTINGS = [
(u'appearance_custom_head_tags', u''),
]
def upgrade():
for key, value in SETTINGS:
insert_setting(key, value)
def downgrade():
for key, value in SETTINGS:
delete_setting(key)
| rbu/mediadrop | mediadrop/migrations/versions/004-280565a54124-add_custom_head_tags.py | Python | gpl-3.0 | 1,908 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class AssertZeroImagPartTest(test.TestCase):
def test_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([0., 2, 3])
# Should not raise.
self.evaluate(
linear_operator_util.assert_zero_imag_part(x, message="ABC123"))
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
# Should not raise.
self.evaluate(
linear_operator_util.assert_zero_imag_part(z, message="ABC123"))
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_zero_imag_part(z, message="ABC123"))
class AssertNoEntriesWithModulusZeroTest(test.TestCase):
def test_nonzero_real_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 2, 3])
# Should not raise.
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123"))
def test_nonzero_complex_tensor_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
# Should not raise.
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123"))
def test_zero_real_tensor_raises(self):
x = ops.convert_to_tensor([1., 0, 3])
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
x, message="ABC123"))
def test_zero_complex_tensor_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.assertRaisesOpError("ABC123"):
self.evaluate(
linear_operator_util.assert_no_entries_with_modulus_zero(
z, message="ABC123"))
class BroadcastMatrixBatchDimsTest(test.TestCase):
def test_zero_batch_matrices_returned_as_empty_list(self):
self.assertAllEqual([],
linear_operator_util.broadcast_matrix_batch_dims([]))
def test_one_batch_matrix_returned_after_tensor_conversion(self):
arr = rng.rand(2, 3, 4)
tensor, = linear_operator_util.broadcast_matrix_batch_dims([arr])
self.assertTrue(isinstance(tensor, ops.Tensor))
self.assertAllClose(arr, self.evaluate(tensor))
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5)
y = rng.rand(4, 1, 3, 7)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
self.assertAllEqual(x_bc_expected.shape, x_bc.shape)
self.assertAllEqual(y_bc_expected.shape, y_bc.shape)
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_static_dims_broadcast_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [1, 3, 1]
# broadcast batch shape = [1, 3, 2]
x = rng.rand(1, 2, 1, 5)
y = rng.rand(1, 3, 2, 3, 7)
batch_of_zeros = np.zeros((1, 3, 2, 1, 1))
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x, y])
self.assertAllEqual(x_bc_expected.shape, x_bc.shape)
self.assertAllEqual(y_bc_expected.shape, y_bc.shape)
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit(self):
# x.batch_shape = [3, 1, 2]
# y.batch_shape = [4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(3, 1, 2, 1, 5).astype(np.float32)
y = rng.rand(4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder_with_default(x, shape=None)
y_ph = array_ops.placeholder_with_default(y, shape=None)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_dynamic_dims_broadcast_32bit_second_arg_higher_rank(self):
# x.batch_shape = [1, 2]
# y.batch_shape = [3, 4, 1]
# broadcast batch shape = [3, 4, 2]
x = rng.rand(1, 2, 1, 5).astype(np.float32)
y = rng.rand(3, 4, 1, 3, 7).astype(np.float32)
batch_of_zeros = np.zeros((3, 4, 2, 1, 1)).astype(np.float32)
x_bc_expected = x + batch_of_zeros
y_bc_expected = y + batch_of_zeros
x_ph = array_ops.placeholder_with_default(x, shape=None)
y_ph = array_ops.placeholder_with_default(y, shape=None)
x_bc, y_bc = linear_operator_util.broadcast_matrix_batch_dims([x_ph, y_ph])
x_bc_, y_bc_ = self.evaluate([x_bc, y_bc])
self.assertAllClose(x_bc_expected, x_bc_)
self.assertAllClose(y_bc_expected, y_bc_)
def test_less_than_two_dims_raises_static(self):
x = rng.rand(3)
y = rng.rand(1, 1)
with self.assertRaisesRegex(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([x, y])
with self.assertRaisesRegex(ValueError, "at least two dimensions"):
linear_operator_util.broadcast_matrix_batch_dims([y, x])
class MatrixSolveWithBroadcastTest(test.TestCase):
def test_static_dims_broadcast_matrix_has_extra_dims(self):
# batch_shape = [2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(3, 7)
rhs_broadcast = rhs + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 7), result.shape)
expected = linalg_ops.matrix_solve(matrix, rhs_broadcast)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 2), result.shape)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims_dynamic(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
matrix_ph = array_ops.placeholder_with_default(matrix, shape=[None, None])
rhs_ph = array_ops.placeholder_with_default(rhs, shape=[None, None, None])
result = linear_operator_util.matrix_solve_with_broadcast(matrix_ph, rhs_ph)
self.assertAllEqual(3, result.shape.ndims)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(*self.evaluate([expected, result]))
def test_static_dims_broadcast_rhs_has_extra_dims_and_adjoint(self):
# Since the second arg has extra dims, and the domain dim of the first arg
# is larger than the number of linear equations, code will "flip" the extra
# dims of the first arg to the far right, making extra linear equations
# (then call the matrix function, then flip back).
# We have verified that this optimization indeed happens. How? We stepped
# through with a debugger.
# batch_shape = [2]
matrix = rng.rand(3, 3)
rhs = rng.rand(2, 3, 2)
matrix_broadcast = matrix + np.zeros((2, 1, 1))
result = linear_operator_util.matrix_solve_with_broadcast(
matrix, rhs, adjoint=True)
self.assertAllEqual((2, 3, 2), result.shape)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(*self.evaluate([expected, result]))
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
matrix = rng.rand(2, 3, 3)
rhs = rng.rand(2, 1, 3, 7)
matrix_broadcast = matrix + np.zeros((2, 2, 1, 1))
rhs_broadcast = rhs + np.zeros((2, 2, 1, 1))
matrix_ph = array_ops.placeholder_with_default(matrix, shape=None)
rhs_ph = array_ops.placeholder_with_default(rhs, shape=None)
result, expected = self.evaluate([
linear_operator_util.matrix_solve_with_broadcast(matrix_ph, rhs_ph),
linalg_ops.matrix_solve(matrix_broadcast, rhs_broadcast)
])
self.assertAllClose(expected, result)
class DomainDimensionStubOperator(object):
def __init__(self, domain_dimension):
self._domain_dimension = ops.convert_to_tensor(domain_dimension)
def domain_dimension_tensor(self):
return self._domain_dimension
class AssertCompatibleMatrixDimensionsTest(test.TestCase):
def test_compatible_dimensions_do_not_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 3, 4))
operator = DomainDimensionStubOperator(3)
# Should not raise
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
def test_incompatible_dimensions_raise(self):
x = ops.convert_to_tensor(rng.rand(2, 4, 4))
operator = DomainDimensionStubOperator(3)
# pylint: disable=g-error-prone-assert-raises
with self.assertRaisesOpError("Dimensions are not compatible"):
self.evaluate(
linear_operator_util.assert_compatible_matrix_dimensions(operator, x))
# pylint: enable=g-error-prone-assert-raises
class DummyOperatorWithHint(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class UseOperatorOrProvidedHintUnlessContradictingTest(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
("none_none", None, None, None),
("none_true", None, True, True),
("true_none", True, None, True),
("true_true", True, True, True),
("none_false", None, False, False),
("false_none", False, None, False),
("false_false", False, False, False),
)
def test_computes_an_or_if_non_contradicting(self, operator_hint_value,
provided_hint_value,
expected_result):
self.assertEqual(
expected_result,
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="should not be needed here"))
@parameterized.named_parameters(
("true_false", True, False),
("false_true", False, True),
)
def test_raises_if_contradicting(self, operator_hint_value,
provided_hint_value):
with self.assertRaisesRegex(ValueError, "my error message"):
linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
operator=DummyOperatorWithHint(my_hint=operator_hint_value),
hint_attr_name="my_hint",
provided_hint_value=provided_hint_value,
message="my error message")
class BlockwiseTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("split_dim_1", [3, 3, 4], -1),
("split_dim_2", [2, 5], -2),
)
def test_blockwise_input(self, op_dimension_values, split_dim):
op_dimensions = [
tensor_shape.Dimension(v) for v in op_dimension_values]
unknown_op_dimensions = [
tensor_shape.Dimension(None) for _ in op_dimension_values]
batch_shape = [2, 1]
arg_dim = 5
if split_dim == -1:
blockwise_arrays = [np.zeros(batch_shape + [arg_dim, d])
for d in op_dimension_values]
else:
blockwise_arrays = [np.zeros(batch_shape + [d, arg_dim])
for d in op_dimension_values]
blockwise_list = [block.tolist() for block in blockwise_arrays]
blockwise_tensors = [ops.convert_to_tensor(block)
for block in blockwise_arrays]
blockwise_placeholders = [
array_ops.placeholder_with_default(block, shape=None)
for block in blockwise_arrays]
# Iterables of non-nested structures are always interpreted as blockwise.
# The list of lists is interpreted as blockwise as well, regardless of
# whether the operator dimensions are known, since the sizes of its elements
# along `split_dim` are non-identical.
for op_dims in [op_dimensions, unknown_op_dimensions]:
for blockwise_inputs in [
blockwise_arrays, blockwise_list,
blockwise_tensors, blockwise_placeholders]:
self.assertTrue(linear_operator_util.arg_is_blockwise(
op_dims, blockwise_inputs, split_dim))
def test_non_blockwise_input(self):
x = np.zeros((2, 3, 4, 6))
x_tensor = ops.convert_to_tensor(x)
x_placeholder = array_ops.placeholder_with_default(x, shape=None)
x_list = x.tolist()
# For known and matching operator dimensions, interpret all as non-blockwise
op_dimension_values = [2, 1, 3]
op_dimensions = [tensor_shape.Dimension(d) for d in op_dimension_values]
for inputs in [x, x_tensor, x_placeholder, x_list]:
self.assertFalse(linear_operator_util.arg_is_blockwise(
op_dimensions, inputs, -1))
# The input is still interpreted as non-blockwise for unknown operator
# dimensions (`x_list` has an outermost dimension that does not matcn the
# number of blocks, and the other inputs are not iterables).
unknown_op_dimensions = [
tensor_shape.Dimension(None) for _ in op_dimension_values]
for inputs in [x, x_tensor, x_placeholder, x_list]:
self.assertFalse(linear_operator_util.arg_is_blockwise(
unknown_op_dimensions, inputs, -1))
def test_ambiguous_input_raises(self):
x = np.zeros((3, 4, 2)).tolist()
op_dimensions = [tensor_shape.Dimension(None) for _ in range(3)]
# Since the leftmost dimension of `x` is equal to the number of blocks, and
# the operators have unknown dimension, the input is ambiguous.
with self.assertRaisesRegex(ValueError, "structure is ambiguous"):
linear_operator_util.arg_is_blockwise(op_dimensions, x, -2)
def test_mismatched_input_raises(self):
x = np.zeros((2, 3, 4, 6)).tolist()
op_dimension_values = [4, 3]
op_dimensions = [tensor_shape.Dimension(v) for v in op_dimension_values]
# The dimensions of the two operator-blocks sum to 7. `x` is a
# two-element list; if interpreted blockwise, its corresponding dimensions
# sum to 12 (=6*2). If not interpreted blockwise, its corresponding
# dimension is 6. This is a mismatch.
with self.assertRaisesRegex(ValueError, "dimension does not match"):
linear_operator_util.arg_is_blockwise(op_dimensions, x, -1)
if __name__ == "__main__":
test.main()
| karllessard/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py | Python | apache-2.0 | 17,220 |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pandas as pd
from mousestyles import path_diversity
def test_smooth_noise():
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert len(smoothed) == 3
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['y'][1] == 0.5
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['x'][1] == 0.05
movement = {'t': pd.Series([0., 0.02, 0.04, 0.06], index=[0, 1, 2, 3]),
'x': pd.Series([0., 0., 0.1, 0.2], index=[0, 1, 2, 3]),
'y': pd.Series([0., 1., 0., 1.], index=[0, 1, 2, 3]),
'isHB': pd.Series(['No', 'No', 'No', 'No'],
index=[0, 1, 2, 3])}
movement = pd.DataFrame(movement)
paths = path_diversity.path_index(movement, 1, 1)
# Check if function produces the correct outputs.
smoothed = path_diversity.smooth_noise(movement, paths, 120, 1)
assert smoothed['t'][1] == 0.03
| changsiyao/mousestyles | mousestyles/path_diversity/tests/test_smooth_noise.py | Python | bsd-2-clause | 2,526 |
'''
Copyright 2016, 2017 Hop and Fork.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import yaml
class Configuration:
def __init__(self):
conf = self.parse_configuration_file('config.yaml')
self.get_required_parameter(conf, 'aws_access_key_id')
self.get_required_parameter(conf, 'aws_secret_access_key')
self.get_required_parameter(conf, 'region_name')
self.get_required_parameter(conf, 'key_pair')
self.get_required_parameter(conf, 'security_groups_zk')
self.get_required_parameter(conf, 'security_groups_ui')
self.get_required_parameter(conf, 'security_groups_ni')
self.get_required_parameter(conf, 'security_groups_sv')
self.get_required_parameter(conf, 'default_vpc')
self.get_required_parameter(conf, 'default_vpc_security_group')
self.get_required_parameter(conf, 'default_ami_id')
self.get_required_parameter(conf, 'subnet_id')
self.get_parameter(conf, 'nimbus_ami_id', self.default_ami_id)
self.get_parameter(conf, 'supervisor_ami_id', self.default_ami_id)
self.get_parameter(conf, 'ui_ami_id', self.default_ami_id)
self.get_parameter(conf, 'zookeeper_ami_id', self.default_ami_id)
self.get_parameter(conf, 'zk_instances', 1)
self.get_parameter(conf, 'supervisors', 1)
self.get_parameter(conf, 'slots', 4)
self.get_parameter(conf, 'default_instance_type', 't2.micro')
self.get_parameter(conf, 'nimbus_instance_type', self.default_instance_type)
self.get_parameter(conf, 'ui_instance_type', self.default_instance_type)
self.get_parameter(conf, 'supervisor_instance_type', self.default_instance_type)
self.get_parameter(conf, 'zookeeper_instance_type', self.default_instance_type)
self.get_parameter(conf, 'volume_size', 8)
def parse_configuration_file(self, config_file):
try:
stream = open(config_file)
conf = yaml.load(stream)
if conf == None:
raise yaml.YAMLError
stream.close()
except IOError:
print("Failed to open configuration file '{}'".format(config_file))
exit(1)
except yaml.YAMLError:
print("Error in configuration file '{}'".format(config_file))
stream.close()
exit(2)
else:
return conf
def get_required_parameter(self, conf, key):
if key in conf:
setattr(self, key, conf[key])
else:
print("Missing required parameter '{}' in configuration."
.format(key))
exit(3)
def get_parameter(self, conf, key, default_value):
if key in conf:
setattr(self, key, conf[key])
else:
setattr(self, key, default_value)
| hopandfork/teacup-storm | configuration.py | Python | apache-2.0 | 3,317 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaecookie.middleware import CSRFMiddleware, CSRFInputToDependency
from locale_app.middleware import LocaleMiddleware
from tekton.gae.middleware.json_middleware import JsonResponseMiddleware
from config.template_middleware import TemplateMiddleware, TemplateWriteMiddleware
from tekton.gae.middleware.email_errors import EmailMiddleware
from tekton.gae.middleware.parameter import RequestParamsMiddleware
from tekton.gae.middleware.redirect import RedirectMiddleware
from tekton.gae.middleware.router_middleware import RouterMiddleware, ExecutionMiddleware
from tekton.gae.middleware.webapp2_dependencies import Webapp2Dependencies
from gaepermission.middleware import LoggedUserMiddleware, PermissionMiddleware
APP_URL = 'https://conheca-o-mundo.appspot.com'
SENDER_EMAIL = 'elenildoms@gmail.com'
DEFAULT_LOCALE = 'pt_BR'
DEFAULT_TIMEZONE = 'America/Sao_Paulo'
LOCALES = ['en_US', 'pt_BR']
TEMPLATE_404_ERROR = 'base/404.html'
TEMPLATE_400_ERROR = 'base/400.html'
MIDDLEWARE_LIST = [LoggedUserMiddleware,
TemplateMiddleware,
EmailMiddleware,
Webapp2Dependencies,
RequestParamsMiddleware,
CSRFInputToDependency,
LocaleMiddleware,
RouterMiddleware,
CSRFMiddleware,
PermissionMiddleware,
ExecutionMiddleware,
TemplateWriteMiddleware,
JsonResponseMiddleware,
RedirectMiddleware]
| elenildo/conheca-o-mundo | backend/appengine/settings.py | Python | mit | 1,611 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
"""
__author__ = 'Bitcraze AB'
__all__ = ['TakeOverSelectiveMux']
import os
import glob
import logging
from . import InputMux
logger = logging.getLogger(__name__)
class TakeOverSelectiveMux(InputMux):
def __init__(self, *args):
super(TakeOverSelectiveMux, self).__init__(*args)
self.name = "TakeOverSelective"
def add_device(self, dev, parameters):
logger.info("Adding device {} to {}".format(dev.name, self.name))
logger.info("Device has mapping {}".format(dev.input_map_name))
if len(self._devs) == 0:
parameters = ("thrust", "yaw", "estop", "alt1", "alt2", "althold", "exit")
else:
parameters = ("roll", "pitch")
self._devs.append((dev, parameters))
#logger.info("First has mapping {}".format(self._devs[0][0].input_map["Input.AXIS-3"]["key"]))
#if len(self._devs) > 1:
# logger.info("Second has mapping {}".format(self._devs[1][0].input_map["Input.AXIS-3"]["key"]))
def get_supported_dev_count(self):
return 2
def read(self):
try:
use_master = False
dm = self._devs[0][0].read()
ds = self._devs[1][0].read()
if self._check_toggle("alt1", dm):
if dm["alt1"]:
use_master = True
if use_master:
data = dm
else:
# Mux the two together
data = {}
for mk in dm:
if mk in self._devs[0][1]:
data[mk] = dm[mk]
for sk in ds:
if sk in self._devs[1][1]:
data[sk] = ds[sk]
# Now res contains the mix of the two
[roll, pitch] = self._scale_rp(data["roll"], data["pitch"])
[roll, pitch] = self._trim_rp(roll, pitch)
self._update_alt_hold(data["althold"])
self._update_em_stop(data["estop"])
self._update_alt1(data["alt2"])
#self._update_alt2(data["alt2"])
thrust = self._limit_thrust(data["thrust"],
data["althold"],
data["estop"])
yaw = self._scale_and_deadband_yaw(data["yaw"])
return [roll, pitch, yaw, thrust]
except Exception as e:
logger.info("Could not read devices: {}".format(e))
return [0.0, 0.0, 0.0, 0.0]
| Venris/crazyflie-multilink | lib/cfclient/utils/mux/takeoverselectivemux.py | Python | gpl-2.0 | 3,564 |
"""
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import fitter
import matplotlib.cbook as mpcb
import copy
import model
from astropy import log
import astropy.units as u
from . import mpfit_messages
from ammonia_constants import (line_names, freq_dict, aval_dict, ortho_dict,
voff_lines_dict, tau_wts_dict)
def ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1, xoff_v=0.0,
fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
background_tb=2.7315,
thin=False, verbose=False, return_components=False, debug=False):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
ntot: float
can be specified as a column density (e.g., 10^15) or a
log-column-density (e.g., 15)
tex: float or None
Excitation temperature. Assumed LTE if unspecified (``None``), if
tex>tkin, or if ``thin`` is specified.
ntot: float
Total column density of NH3. Can be specified as a float in the range
5-25 or an exponential (1e5-1e25)
width: float
Line width in km/s
xoff_v: float
Line offset in km/s
fortho: float
Fraction of NH3 molecules in ortho state. Default assumes all para
(fortho=0).
tau: None or float
If tau (optical depth in the 1-1 line) is specified, ntot is NOT fit
but is set to a fixed value. The optical depths of the other lines are
fixed relative to tau_oneone
fillingfraction: None or float
fillingfraction is an arbitrary scaling factor to apply to the model
return_tau: bool
Return a dictionary of the optical depths in each line instead of a
synthetic spectrum
thin: bool
uses a different parametetrization and requires only the optical depth,
width, offset, and tkin to be specified. In the 'thin' approximation,
tex is not used in computation of the partition function - LTE is
implicitly assumed
return_components: bool
Return a list of arrays, one for each hyperfine component, instead of
just one array
background_tb : float
The background brightness temperature. Defaults to TCMB.
verbose: bool
More messages
debug: bool
For debugging.
Returns
-------
spectrum: `numpy.ndarray`
Synthetic spectrum with same shape as ``xarr``
component_list: list
List of `numpy.ndarray`'s, one for each hyperfine component
tau_dict: dict
Dictionary of optical depth values for the various lines
(if ``return_tau`` is set)
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('GHz')
if tex is not None:
# Yes, you certainly can have nonthermal excitation, tex>tkin.
#if tex > tkin: # cannot have Tex > Tkin
# tex = tkin
if thin: # tex is not used in this case
tex = tkin
else:
tex = tkin
if thin:
ntot = 1e15
elif 5 < ntot < 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
ntot = 10**ntot
elif (25 < ntot < 1e5) or (ntot < 5):
# these are totally invalid for log/non-log
return 0
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)
# Generate Partition Functions
nlevs = 51
jv=np.arange(nlevs)
ortho = jv % 3 == 0
para = True-ortho
Jpara = jv[para]
Jortho = jv[ortho]
Brot = 298117.06e6
Crot = 186726.36e6
runspec = np.zeros(len(xarr))
tau_dict = {}
para_count = 0
ortho_count = 1 # ignore 0-0
if tau is not None and thin:
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic temperature
Tex is still a free parameter in the final spectrum calculation at the bottom
(technically, I think this process assumes LTE; Tex should come into play in
these equations, not just the final one)
"""
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
else:
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
Given the complexity of these equations, it would be worth my while to
comment each step carefully.
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*tkin))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*tkin))
for linename in line_names:
if ortho_dict[linename]:
orthoparafrac = fortho
Z = Zortho
count = ortho_count
ortho_count += 1
else:
orthoparafrac = 1.0-fortho
Z = Zpara
count = para_count # need to treat partition function separately
para_count += 1
# short variable names for readability
frq = freq_dict[linename]
partition = Z[count]
aval = aval_dict[linename]
# Friesen 2009 eqn A4 points out that the partition function actually says
# how many molecules are in the NH3(1-1) state, both upper *and* lower.
# population_upperlower = ntot * orthoparafrac * partition/(Z.sum())
# population_upperstate = population_upperlower / (1+np.exp(h*frq/(kb*tex)))
#
# Note Jan 1, 2015: This is accounted for in the eqn below. The
# only difference is that I have used Tkin where Friesen et al 2009
# use Tex. Since Tex describes which states are populated, that may
# be the correct one to use.
# Total population of the higher energy inversion transition
population_upperstate = ntot * orthoparafrac * partition/(Z.sum())
tau_dict[linename] = (population_upperstate /
(1. + np.exp(-h*frq/(kb*tkin) ))*ccms**2 /
(8*np.pi*frq**2) * aval *
(1-np.exp(-h*frq/(kb*tex))) /
(width/ckms*frq*np.sqrt(2*np.pi)) )
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None and not thin:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in tau_dict.iteritems():
tau_dict[linename] = t * tau/tau11_temp
components =[]
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,nuo in enumerate(nuoff):
tauprof += (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr.value+nuo-lines[kk])**2 / (2.0*nuwidth[kk]**2)) *
fillingfraction)
components.append( tauprof )
T0 = (h*xarr.value*1e9/kb) # "temperature" of wavelength
if tau is not None and thin:
#runspec = tauprof+runspec
# is there ever a case where you want to ignore the optical depth function? I think no
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-tauprof))+runspec
else:
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-tauprof))+runspec
if runspec.min() < 0 and background_tb == 2.7315:
raise ValueError("Model dropped below zero. That is not possible normally. Here are the input values: "+
("tex: %f " % tex) +
("tkin: %f " % tkin) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
log.info("tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction))
if return_components:
return (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))*(1-np.exp(-1*np.array(components)))
if return_tau:
return tau_dict
return runspec
class ammonia_model(model.SpectralModel):
def __init__(self,npeaks=1,npars=6,
parnames=['tkin','tex','ntot','width','xoff_v','fortho'],
**kwargs):
self.npeaks = npeaks
self.npars = npars
self._default_parnames = parnames
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# Remove keywords parsed by parinfo and ignored by the fitter
for kw in ('tied','partied'):
if kw in kwargs:
kwargs.pop(kw)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'tkin' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
self.use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else False
self.fitunits = 'GHz'
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
tkin,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)/len(parnames)) for p in parnames]
npars = len(parvals) / self.npeaks
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
else:
npars = len(parvals) / self.npeaks
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in xrange(self.npeaks):
modelkwargs = kwargs.copy()
for ii in xrange(npars):
name = parnames[ii+jj*npars].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*npars]})
v += ammonia(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False, **kwargs):
"""
Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call
"""
comps=[]
for ii in xrange(self.npeaks):
if hyperfine:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( ammonia(xarr,return_components=True,**modelkwargs) )
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, err=None,
parinfo=None,
quiet=True, shh=True,
debug=False,
maxiter=200,
use_lmfit=False,
veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
parinfo = self.parinfo = self.make_parinfo(**kwargs)
else:
if isinstance(parinfo, ParinfoList):
if not quiet:
log.info("Using user-specified parinfo.")
self.parinfo = parinfo
else:
if not quiet:
log.info("Using something like a user-specified parinfo, but not.")
self.parinfo = ParinfoList([p if isinstance(p,Parinfo) else Parinfo(p)
for p in parinfo],
preserve_order=True)
fitfun_kwargs = dict((x,y) for (x,y) in kwargs.items()
if x not in ('npeaks', 'params', 'parnames',
'fixed', 'limitedmin', 'limitedmax',
'minpars', 'maxpars', 'tied',
'max_tem_step'))
if 'use_lmfit' in fitfun_kwargs:
raise KeyError("use_lmfit was specified in a location where it "
"is unacceptable")
npars = len(parinfo)/self.npeaks
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))/err]
return f
if veryverbose:
log.info("GUESSES: ")
log.info(str(parinfo))
#log.info "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
if use_lmfit:
return self.lmfitter(xax, data, err=err,
parinfo=parinfo,
quiet=quiet,
debug=debug)
else:
mp = mpfit(mpfitfun(xax,data,err),
parinfo=parinfo,
maxiter=maxiter,
quiet=quiet,
debug=debug)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Final fit values: ")
for i,p in enumerate(mpp):
log.info(" ".join((parinfo[i]['parname'], str(p), " +/- ",
str(mpperr[i]))))
log.info(" ".join(("Chi2: ", str(mp.fnorm)," Reduced Chi2: ",
str(mp.fnorm/len(data)), " DOF:",
str(len(data)-len(mpp)))))
self.mp = mp
self.parinfo = parinfo
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames,
**fitfun_kwargs)(xax)
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars]
for jj in xrange(len(self.parinfo)/self.npars)]
modelkwargs = [
dict([(p['parname'].strip("0123456789").lower(),p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K', 'tex':'T_{ex}', 'ntot':'N', 'fortho':'F_o',
'width':'\\sigma', 'xoff_v':'v', 'fillingfraction':'FF',
'tau':'\\tau_{1-1}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def make_parinfo(self, quiet=True,
npeaks=1,
params=(20,20,14,1.0,0.0,0.5), parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True),
minpars=(2.73,2.73,0,0,0,0),
maxpars=(0,0,0,0,0,1),
tied=('',)*6,
max_tem_step=1.,
**kwargs
):
if not quiet:
log.info("Creating a 'parinfo' from guesses.")
self.npars = len(params) / npeaks
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None: parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params', 'parnames', 'fixed',
'limitedmin', 'limitedmax', 'minpars',
'maxpars', 'tied'],
[params, parnames, fixed, limitedmin,
limitedmax, minpars, maxpars, tied]))
# make sure all various things are the right length; if they're
# not, fix them using the defaults
# (you can put in guesses of length 12 but leave the rest length 6;
# this code then doubles the length of everything else)
for partype,parlist in partype_dict.iteritems():
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be
# multiplied by npars to get to the right number of
# gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
log.warn("WARNING! Input parameters were longer than allowed for variable {0}".format(parlist))
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars:
# all have minima of zero except kinetic temperature, which can't be below CMB.
# Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
elif parlist==tied:
partype_dict[partype] = [_increment_string_number(t, ii*self.npars)
for t in tied
for ii in range(self.npeaks)]
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(ii/self.npars),
'tied':partype_dict['tied'][ii],
'mpmaxstep':max_tem_step*float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in xrange(len(partype_dict['params'])) ]
# hack: remove 'fixed' pars
#parinfo_with_fixed = parinfo
#parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
#fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),
# p['value'])
# for p in parinfo_with_fixed if p['fixed'])
## don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
## this is OK - not a permanent change
#parnames = [p['parname'] for p in parinfo]
## not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
return parinfo
class ammonia_model_vtau(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_vtau,self).__init__(parnames=['tkin','tex','tau','width','xoff_v','fortho'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20, 10, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
class ammonia_model_background(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_background,self).__init__(npars=7,
parnames=['tkin', 'tex',
'ntot',
'width',
'xoff_v',
'fortho',
'background_tb'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1, 1.0, 0.0, 1.0, 2.73]
def __call__(self,*args,**kwargs):
#if self.multisingle == 'single':
# return self.onepeakammoniafit(*args,**kwargs)
#elif self.multisingle == 'multi':
# # Why is tied 6 instead of 7?
return self.multinh3fit(*args,**kwargs)
def make_parinfo(self, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,2.73), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(2.73,2.73,0,0,0,0,2.73), parinfo=None,
maxpars=(0,0,0,0,0,1,2.73),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).make_parinfo(npeaks=npeaks, err=err, params=params,
parnames=parnames, fixed=fixed,
limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,2.73), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(2.73,2.73,0,0,0,0,2.73), parinfo=None,
maxpars=(0,0,0,0,0,1,2.73),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).multinh3fit(xax, data, npeaks=npeaks, err=err,
params=params, parnames=parnames,
fixed=fixed, limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K', 'tex':'T_{ex}', 'ntot':'N', 'fortho':'F_o',
'width':'\\sigma', 'xoff_v':'v', 'fillingfraction':'FF',
'tau':'\\tau_{1-1}', 'background_tb':'T_{BG}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def _increment_string_number(st, count):
"""
Increment a number in a string
Expects input of the form: p[6]
"""
import re
dig = re.compile('[0-9]+')
if dig.search(st):
n = int(dig.search(st).group())
result = dig.sub(str(n+count), st)
return result
else:
return st
| bsipocz/pyspeckit | pyspeckit/spectrum/models/ammonia.py | Python | mit | 34,486 |
#!/usr/bin/env python
# coding=utf-8
__author__ = 'yxryab'
import logging
import optparse
import re
import sys
import platform
import os
from collections import OrderedDict
from time import gmtime, strftime
from subprocess import Popen, PIPE, CalledProcessError, call
def checkExcept(results):
resultERROR = []
resultSUCCSES = []
resultDic = resultMain
for result in results:
logging.info(result)
try:
errore = reerror.findall(str(result))
unknownCommand = reunkcom.findall(str(result))
oshibka = reoshibka.findall(result.decode('utf-8'))
nofile = renofile.findall(result.decode('utf-8'))
neudalos = reneudalos.findall(result.decode('utf-8'))
except re.error, e:
logging.info("Error regular expression %s" % e)
try:
if errore or unknownCommand or oshibka or nofile or neudalos:
f = open('/opt/opflood/log/' + (strftime("%d_%m_%Y", gmtime())) + '.log', 'a')
global openfile
openfile = True
if neudalos:
resultERROR.append(neudalos[0] + ", and still found: '" + str(len(neudalos)) + "' errors in stdout, schema: " + schema)
if errore:
resultERROR.append(errore[0] + ", and still found: '" + str(len(errore)) + "' errors in stdout, schema: " + schema)
if unknownCommand:
resultERROR.append(unknownCommand[0] + ", and still found: '" + str(len(unknownCommand)) + "' errors in stdout, schema: " + schema)
if oshibka:
resultERROR.append(oshibka[0] + u", and still found: '" + str(len(oshibka)) + u"' errors in stdout, schema: " + schema)
if nofile:
resultERROR.append(nofile[0] + u", and still found: '" + str(len(nofile)) + u"' errors in stdout, schema: " + schema)
f.write(str((strftime("%d/%m/%Y %H:%M:%S", gmtime()) + ' - in schema: ' + schema + " errors " + str(result))))
f.close()
else:
resultSUCCSES.append("No errors were found in schema: %s" % schema)
except TypeError, e:
logging.info('Errors %s' % e)
if len(resultERROR) == 0:
resultDic.update({
schema: schema,
schema + 'errors': False,
schema + 'succses': resultSUCCSES
})
elif len(resultERROR) >= 1:
resultDic.update({
schema: schema,
schema + 'errors': resultERROR,
schema + 'succses': resultSUCCSES
})
return resultDic
def runSqlQueryOracle(sqlCommand, connectString):
try:
session = Popen(['sqlplus', '-S', connectString], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(sqlCommand)
except CalledProcessError, e:
logging.info("Failure: %s" % e)
sys.exit(1)
return session.communicate()
def runSqlQueryPostgresql(connectSring, sqlCommand):
try:
os.environ['PGPASSWORD'] = password
session = Popen(connectSring + sqlCommand, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except CalledProcessError, e:
logging.info("Failure: %s" % e)
sys.exit(1)
return session.communicate()
def installComponents():
def ErrorMessage(ec):
logging.info("Installation ended with error, exit_code: %s" % str(ec))
sys.exit(1)
currentPlatform = (platform.dist()[0].lower())
currentRelease = (platform.dist()[1].split('.')[0])
postgresURL = ('http://yum.postgresql.org/9.4/redhat/rhel-%s-x86_64/pgdg-%s94-9.4-1.noarch.rpm' % (currentRelease, currentPlatform))
ExitCodePostgres = call(['rpm', '-q', 'postgresql94'], stdout=PIPE)
ExitCodeNonVersionPostgres = call(['rpm', '-q', 'postgresql'], stdout=PIPE)
ExitCodeSqlplus = call(['rpm', '-q', 'oracle-instantclient12.1-sqlplus-12.1*'], stdout=PIPE)
ExitCodeBasic = call(['rpm', '-q', 'oracle-instantclient12.1-basic-12.1*'], stdout=PIPE)
ExitCodePostgresInstallRepo = call(['rpm', '-q', 'pgdg-centos94-9.4-1'], stdout=PIPE)
try:
if "postgresql" == install or "all" == install:
if 0 == ExitCodeNonVersionPostgres:
o = Popen(['rpm', '-q', 'postgresql'], stdout=PIPE)
ovp, err = o.communicate()
logging.info("You have installed %s, to work correctly, remove the old version of PostgreSQL" % ovp.strip())
logging.info("Installation failed because the tasks required under the terms of PostgreSQL 9.4")
sys.exit(1)
if 0 == ExitCodePostgres:
logging.info('PostgreSQL 9.4 already installed')
else:
if 0 == ExitCodePostgresInstallRepo:
logging.info("PostgreSQL 9.4 repo, already installed")
else:
ExitCodePostgresInstallRepo = call(['yum', 'install', postgresURL, '-y'])
if 0 == ExitCodePostgresInstallRepo:
ExitCodePostgresInstallPostgres = call(['yum', 'install', 'postgresql94', '-y'])
if 0 != ExitCodePostgresInstallPostgres:
ErrorMessage(ExitCodePostgresInstallPostgres)
else:
ErrorMessage(ExitCodePostgresInstallRepo)
if "oracle" == install or "all" == install:
if 0 == ExitCodeBasic:
logging.info('oracle instantclient 12.1 basic already installed')
else:
ExitCodeBasicInstall = call(['yum', 'localinstall', '--nogpgcheck', '-y', '/opt/opflood/archive/oracle-instantclient12.1-basic-12.1.0.2.0-1.x86_64.rpm'])
if 0 != ExitCodeBasicInstall:
ErrorMessage(ExitCodeBasicInstall)
if 0 == ExitCodeSqlplus:
logging.info('oracle instantclient 12.1 sqlplus already installed')
else:
ExitCodeSqlplusInstall = call(['yum', 'localinstall', '--nogpgcheck', '-y', '/opt/opflood/archive/oracle-instantclient12.1-sqlplus-12.1.0.2.0-1.x86_64.rpm'])
if 0 == ExitCodeSqlplusInstall:
call(['cp', '/opt/opflood/archive/sqlplusenv.sh', '/etc/profile.d/sqlplusenv.sh'])
logging.info('------------------------------------------------------------------------------------------')
logging.info('| In order to successfully work with the Oracle database , you must supply a valid Hosts.|')
logging.info('| Example /etc/hosts: |')
logging.info('| 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 |')
logging.info('| ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 |')
logging.info('| 127.0.0.1 hostname |')
logging.info('------------------------------------------------------------------------------------------')
call(['su', '-c', 'source /etc/profile.d/sqlplusenv.sh'], shell=True, executable="/bin/bash")
else:
ErrorMessage(ExitCodeSqlplusInstall)
except OSError, e:
logging.info(e)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
parser = optparse.OptionParser(usage="usage: %prog [-h] [-a ADDRESS] \n\
[-p PORT] [-u USER] [-w PASSWORD] [--install]\n\
[-i DIALECT] [-d DATABASE] [-s SCHEMA] [-v VERBOSE] [-q QUIET] [--version]",
version="%prog 1.2",
description='The script contains a minimum set of customers for \
PostgreSQL and Oracle. Designed for use SQL injection. \
Example: \
\
Oracle: ./%prog -i oracle -a 10.0.0.1 -u system -w 123456 -d dbt -s 1.sql -s /schem/2.sql \
\
PostgreSQL: ./%prog -i postgresql -a 10.0.0.2 -u system -w 123456 -d dbt -s 1.sql -s /shem/2.sql \
')
parser.add_option("-a", "--address", dest="address", metavar="Server address",
default='localhost', type="string",
help="Server address, default 'localhost'")
parser.add_option("-p", "--port", dest="port", type="string",
help="Server port")
parser.add_option("-u", "--user", dest="user", type="string",
help="username")
parser.add_option("-w", "--password", dest="password", type="string",
help="Password")
parser.add_option("-i", "--dialect", metavar="{oracle or postgresql}", dest="dialect",
choices=("oracle", "postgresql"), help="Dialect of SQL {oracle, postgresql}")
parser.add_option("-d", "--db", metavar="DB name", dest="db", type="string",
help="Name the connected database")
parser.add_option("-s", "--schema", dest="schema", action="append",
help="Database schema name(s)")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose")
parser.add_option("--install", dest="install", choices=("oracle", "postgresql", "all"), metavar="{oracle or postgresql or all}", help="Installing components")
(options, args) = parser.parse_args()
address = options.address
port = options.port
user = options.user
password = options.password
dialect = options.dialect
db = options.db
schemas = options.schema
install = options.install
resultMain = {}
openfile = False
if options.verbose is None:
logging.basicConfig(format='%(asctime)s : %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
stream=sys.stdout)
elif options.verbose:
logging.basicConfig(format='%(asctime)s : %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.DEBUG,
stream=sys.stdout)
else:
logging.basicConfig(format='%(asctime)s : %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=("/opt/opflood/log/" + strftime("%d_%m_%Y", gmtime()) + ".log"),
level=logging.DEBUG)
def parsExcept():
parser.print_help()
sys.exit(1)
resultFinish = []
reerror = re.compile('.*ERROR|error|FATAL|fatal|FAILED|failed|exception|EXCEPTION|critical|CRITICAL.*')
reunkcom = re.compile('.*unknown|unable\ to\ open.*')
uni1 = u"ОШИБКА"
reoshibka = re.compile(".*" + uni1 + ".*")
uni2 = u"Нет такого файла"
renofile = re.compile('.*' + uni2 + '.*')
uni3 = u"не удалось"
reneudalos = re.compile('.*' + uni3 + '.*')
if install:
if os.getegid() == 0:
installComponents()
else:
logging.info("You need to have root privileges to run installation. Please try again, this time using 'sudo'.")
sys.exit(1)
if not schemas:
logging.info("To work correctly, need SQL schema")
parsExcept()
if 'localhost' == address:
logging.info("The script will run on localhost")
if not db:
logging.info("To work correctly, need database name")
parsExcept()
if not dialect:
logging.info("Looking for information about the database server")
parsExcept()
logging.info('Connect and SQL script execution may take time')
for schema in schemas:
if 'oracle' == dialect:
if not port:
port = '1521'
connectSring = ('%s/%s@%s:%s/%s' % (user, password, address, port, db))
sqlCommand = ('@%s' % schema)
results = runSqlQueryOracle(sqlCommand, connectSring)
resultMain.update(checkExcept(results))
if 'postgresql' == dialect:
if not port:
port = '5432'
sqlCommand = ['-f', schema]
if 'localhost' == address:
connectSring = ['psql', '-U', user, '-d', db]
else:
connectSring = ['psql', '-h', address, '-p', port, '-U', user, '-d', db]
results = runSqlQueryPostgresql(connectSring, sqlCommand)
os.environ['PGPASSWORD'] = ''
resultMain.update(checkExcept(results))
if True == openfile:
logging.info("---in total------- Please refer to the log in: /opt/opflood/log/" + str((strftime("%d_%m_%Y", gmtime()))) + ".log----------")
else:
logging.info("---in total-------------------------------------------------------")
for schema in schemas:
if resultMain[schema + 'errors']:
for re in resultMain[schema + 'errors']:
logging.info(re)
else:
for rs in list(OrderedDict.fromkeys(resultMain[schema + 'succses'])):
logging.info(rs) | YxRyaB/opflood | opflood.py | Python | mit | 13,386 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'iradio.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include("iradioapp.urls",namespace="iradioapp")),
)
| Northcode/FunkyRadio | iradio/urls.py | Python | gpl-2.0 | 339 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='service.proto',
package='pb',
syntax='proto3',
serialized_options=b'ZFgithub.com/egustafson/sandbox/_hybrid/grpc-error-handling/server-go/pb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\rservice.proto\x12\x02pb\"\x1e\n\nSvcRequest\x12\x10\n\x08req_text\x18\x01 \x01(\t\" \n\x0bSvcResponse\x12\x11\n\tresp_text\x18\x01 \x01(\t25\n\x03Svc\x12.\n\tDoService\x12\x0e.pb.SvcRequest\x1a\x0f.pb.SvcResponse\"\x00\x42HZFgithub.com/egustafson/sandbox/_hybrid/grpc-error-handling/server-go/pbb\x06proto3'
)
_SVCREQUEST = _descriptor.Descriptor(
name='SvcRequest',
full_name='pb.SvcRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='req_text', full_name='pb.SvcRequest.req_text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=21,
serialized_end=51,
)
_SVCRESPONSE = _descriptor.Descriptor(
name='SvcResponse',
full_name='pb.SvcResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resp_text', full_name='pb.SvcResponse.resp_text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=85,
)
DESCRIPTOR.message_types_by_name['SvcRequest'] = _SVCREQUEST
DESCRIPTOR.message_types_by_name['SvcResponse'] = _SVCRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SvcRequest = _reflection.GeneratedProtocolMessageType('SvcRequest', (_message.Message,), {
'DESCRIPTOR' : _SVCREQUEST,
'__module__' : 'service_pb2'
# @@protoc_insertion_point(class_scope:pb.SvcRequest)
})
_sym_db.RegisterMessage(SvcRequest)
SvcResponse = _reflection.GeneratedProtocolMessageType('SvcResponse', (_message.Message,), {
'DESCRIPTOR' : _SVCRESPONSE,
'__module__' : 'service_pb2'
# @@protoc_insertion_point(class_scope:pb.SvcResponse)
})
_sym_db.RegisterMessage(SvcResponse)
DESCRIPTOR._options = None
_SVC = _descriptor.ServiceDescriptor(
name='Svc',
full_name='pb.Svc',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=87,
serialized_end=140,
methods=[
_descriptor.MethodDescriptor(
name='DoService',
full_name='pb.Svc.DoService',
index=0,
containing_service=None,
input_type=_SVCREQUEST,
output_type=_SVCRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SVC)
DESCRIPTOR.services_by_name['Svc'] = _SVC
# @@protoc_insertion_point(module_scope)
| egustafson/sandbox | _hybrid/grpc-error-handling/client-py/service_pb2.py | Python | apache-2.0 | 4,053 |
import unittest
import charm.schemes.abenc.abenc_maabe_yj14 as abenc_maabe_yj14
debug = False
# unit test for scheme contributed by artjomb
class MAabe_YJ14Test(unittest.TestCase):
def testMAabe_YJ14(self):
abenc_maabe_yj14.basicTest()
abenc_maabe_yj14.revokedTest()
if __name__ == "__main__":
unittest.main()
| JHUISI/charm | charm/test/schemes/abenc/abenc_maabe_yj14_test.py | Python | lgpl-3.0 | 340 |
import os
import urllib.request
""" Order 3: Download iamge with image url
For internet crawl spider, donwload image is necessary. So Write a function to deel it.
"""
class DownloadImageWithUrl(object):
@staticmethod
def donwload(image_url, save_path, save_name):
urllib.request.urlretrieve(image_url, os.path.join(save_path, save_name))
# Order 3 testing:
# i_image_url = 'http://img.hb.aicdn.com/576fe24099dd9481d52ebeb503b0e17cd95183d5341e6-VUYSZv_fw658'
# i_save_path = 'D:\\test'
# i_save_name = '576fe24099dd9481d52ebeb503b0e17cd95183d5341e6.jpg'
# DownloadImageWithUrl.donwload(i_image_url, i_save_path, i_save_name)
| flyingSprite/spinelle | task_inventory/order_1_to_30/order_3_donwload_image_with_url.py | Python | mit | 648 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers as layers_module
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine import training_generator_v1
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.platform import test
from tensorflow.python.util import nest
def custom_generator(mode=2):
batch_size = 10
num_samples = 50
arr_data = np.random.random((num_samples, 2))
arr_labels = np.random.random((num_samples, 4))
arr_weights = np.random.random((num_samples,))
i = 0
while True:
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + batch_size
x = arr_data[start: end]
y = arr_labels[start: end]
w = arr_weights[start: end]
if mode == 1:
yield x
elif mode == 2:
yield x, y
else:
yield x, y, w
def custom_generator_changing_batch_size(mode=2):
batch_size = 10
cur_batch_size = 11
num_samples = 50
arr_data = np.random.random((num_samples, 2))
arr_labels = np.random.random((num_samples, 4))
arr_weights = np.random.random((num_samples,))
i = 0
while True:
if cur_batch_size > 1:
cur_batch_size -= 1
batch_index = i * batch_size % num_samples
i += 1
start = batch_index
end = start + cur_batch_size
x = arr_data[start: end]
y = arr_labels[start: end]
w = arr_weights[start: end]
if mode == 1:
yield x
elif mode == 2:
yield x, y
else:
yield x, y, w
custom_generator_threads = data_utils.threadsafe_generator(custom_generator)
class TestGeneratorMethods(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_fit_generator_method(self):
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()])
model.fit_generator(custom_generator_threads(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
workers=4,
use_multiprocessing=True)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator(),
validation_steps=10)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
validation_data=custom_generator(),
validation_steps=1,
workers=0)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_evaluate_generator_method(self):
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.evaluate_generator(custom_generator_threads(),
steps=5,
max_queue_size=10,
workers=2,
verbose=1,
use_multiprocessing=True)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False,
workers=0)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_predict_generator_method(self):
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.run_eagerly = testing_utils.should_run_eagerly()
model.predict_generator(custom_generator_threads(),
steps=5,
max_queue_size=10,
workers=2,
use_multiprocessing=True)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
workers=0)
# Test generator with just inputs (no targets)
model.predict_generator(custom_generator_threads(mode=1),
steps=5,
max_queue_size=10,
workers=2,
use_multiprocessing=True)
model.predict_generator(custom_generator(mode=1),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.predict_generator(custom_generator(mode=1),
steps=5,
max_queue_size=10,
workers=0)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_generator_methods_with_sample_weights(self):
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly())
model.fit_generator(custom_generator(mode=3),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
model.fit_generator(custom_generator(mode=3),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator(mode=3),
validation_steps=10)
model.predict_generator(custom_generator(mode=3),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.evaluate_generator(custom_generator(mode=3),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_generator_methods_invalid_use_case(self):
def invalid_generator():
while 1:
yield (0, 0, 0, 0)
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
run_eagerly=testing_utils.should_run_eagerly())
with self.assertRaises(ValueError):
model.fit_generator(invalid_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
with self.assertRaises(ValueError):
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=invalid_generator(),
validation_steps=10)
with self.assertRaises(ValueError):
model.predict_generator(invalid_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
with self.assertRaises(ValueError):
model.evaluate_generator(invalid_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_generator_input_to_fit_eval_predict(self):
val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
def ones_generator():
while True:
yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
model = testing_utils.get_small_mlp(
num_hidden=10, num_classes=1, input_dim=10)
model.compile(
rmsprop.RMSprop(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(
ones_generator(),
steps_per_epoch=2,
validation_data=val_data,
epochs=2)
model.evaluate(ones_generator(), steps=2)
model.predict(ones_generator(), steps=2)
# Test with a changing batch size
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()])
model.fit_generator(custom_generator_changing_batch_size(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
model.fit_generator(custom_generator_changing_batch_size(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator_changing_batch_size(),
validation_steps=10)
model.fit(
custom_generator_changing_batch_size(),
steps_per_epoch=5,
validation_data=custom_generator_changing_batch_size(),
validation_steps=10,
epochs=2)
model.evaluate(custom_generator_changing_batch_size(), steps=5)
model.predict(custom_generator_changing_batch_size(), steps=5)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_generator_dynamic_shapes(self):
x = [
'I think juice is great',
'unknown is the best language since slicedbread',
'a a a a a a a',
'matmul'
'Yaks are also quite nice',
]
y = [1, 0, 0, 1, 1]
vocab = {
word: i + 1 for i, word in
enumerate(
sorted(set(itertools.chain(*[i.split() for i in x]))))
}
def data_gen(batch_size=2):
np.random.seed(0)
data = list(zip(x, y)) * 10
np.random.shuffle(data)
def pack_and_pad(queue):
x = [[vocab[j] for j in i[0].split()] for i in queue]
pad_len = max(len(i) for i in x)
x = np.array([i + [0] * (pad_len - len(i)) for i in x])
y = np.array([i[1] for i in queue])
del queue[:]
return x, y[:, np.newaxis]
queue = []
for i, element in enumerate(data):
queue.append(element)
if not (i + 1) % batch_size:
yield pack_and_pad(queue)
if queue:
# Last partial batch
yield pack_and_pad(queue)
model = testing_utils.get_model_from_layers([
layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4),
layers_module.SimpleRNN(units=1),
layers_module.Activation('sigmoid')
],
input_shape=(None,))
model.compile(loss=losses.binary_crossentropy, optimizer='sgd')
model.fit(data_gen(), epochs=1, steps_per_epoch=5)
class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_training_with_sequences(self):
class DummySequence(data_utils.Sequence):
def __getitem__(self, idx):
return np.zeros([10, 2]), np.ones([10, 4])
def __len__(self):
return 10
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3))
model.fit_generator(DummySequence(),
steps_per_epoch=10,
validation_data=custom_generator(),
validation_steps=1,
max_queue_size=10,
workers=0,
use_multiprocessing=True)
model.fit_generator(DummySequence(),
steps_per_epoch=10,
validation_data=custom_generator(),
validation_steps=1,
max_queue_size=10,
workers=0,
use_multiprocessing=False)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
@data_utils.dont_use_multiprocessing_pool
def test_sequence_input_to_fit_eval_predict(self):
val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
class CustomSequence(data_utils.Sequence):
def __getitem__(self, idx):
return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
def __len__(self):
return 2
class CustomSequenceChangingBatchSize(data_utils.Sequence):
def __getitem__(self, idx):
batch_size = 10 - idx
return (np.ones([batch_size, 10], np.float32),
np.ones([batch_size, 1], np.float32))
def __len__(self):
return 2
model = testing_utils.get_small_mlp(
num_hidden=10, num_classes=1, input_dim=10)
model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
model.fit(CustomSequence(), validation_data=val_data, epochs=2)
model.evaluate(CustomSequence())
model.predict(CustomSequence())
with self.assertRaisesRegex(ValueError, '`y` argument is not supported'):
model.fit(CustomSequence(), y=np.ones([10, 1]))
with self.assertRaisesRegex(ValueError,
'`sample_weight` argument is not supported'):
model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))
model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
model.fit(CustomSequenceChangingBatchSize(),
validation_data=val_data, epochs=2)
model.evaluate(CustomSequenceChangingBatchSize())
model.predict(CustomSequenceChangingBatchSize())
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequence_on_epoch_end(self):
class MySequence(data_utils.Sequence):
def __init__(self):
self.epochs = 0
def __getitem__(self, idx):
return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
def __len__(self):
return 2
def on_epoch_end(self):
self.epochs += 1
inputs = input_layer.Input(10)
outputs = layers_module.Dense(1)(inputs)
model = training.Model(inputs, outputs)
model.compile('sgd', 'mse')
my_seq = MySequence()
model.fit(my_seq, epochs=2)
self.assertEqual(my_seq.epochs, 2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestConvertToGeneratorLike(test.TestCase, parameterized.TestCase):
simple_inputs = (np.ones((10, 10)), np.ones((10, 1)))
nested_inputs = ((np.ones((10, 10)), np.ones((10, 20))), (np.ones((10, 1)),
np.ones((10, 3))))
def _make_dataset(self, inputs, batches):
return dataset_ops.DatasetV2.from_tensors(inputs).repeat(batches)
def _make_iterator(self, inputs, batches):
return dataset_ops.make_one_shot_iterator(
self._make_dataset(inputs, batches))
def _make_generator(self, inputs, batches):
def _gen():
for _ in range(batches):
yield inputs
return _gen()
def _make_numpy(self, inputs, _):
return inputs
@parameterized.named_parameters(
('simple_dataset', _make_dataset, simple_inputs),
('simple_iterator', _make_iterator, simple_inputs),
('simple_generator', _make_generator, simple_inputs),
('simple_numpy', _make_numpy, simple_inputs),
('nested_dataset', _make_dataset, nested_inputs),
('nested_iterator', _make_iterator, nested_inputs),
('nested_generator', _make_generator, nested_inputs),
('nested_numpy', _make_numpy, nested_inputs))
def test_convert_to_generator_like(self, input_fn, inputs):
expected_batches = 5
data = input_fn(self, inputs, expected_batches)
# Dataset and Iterator not supported in Legacy Graph mode.
if (not context.executing_eagerly() and
isinstance(data, (dataset_ops.DatasetV2, iterator_ops.Iterator))):
return
generator, steps = training_generator_v1.convert_to_generator_like(
data, batch_size=2, steps_per_epoch=expected_batches)
self.assertEqual(steps, expected_batches)
for _ in range(expected_batches):
outputs = next(generator)
nest.assert_same_structure(outputs, inputs)
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/keras/engine/training_generator_test.py | Python | apache-2.0 | 19,479 |
import sys
import matplotlib
matplotlib.use("Agg")
from pylab import *
base = '../'
sys.path.append(base+"utils/Continuum")
sys.path.append(base+"utils/Correlation")
sys.path.append(base+"utils/GLOBALutils")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib.pyplot as plt
# ecpipe modules
import continuum
import correlation
import fiesutils
import GLOBALutils
# other useful modules
from astropy.io import fits as pyfits
import pickle
import os
import numpy as np
import scipy
import scipy.interpolate
from math import radians as rad
import argparse
import warnings
warnings.filterwarnings("ignore")
import ephem
import jplephem
from matplotlib.backends.backend_pdf import PdfPages
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-o2do',default='all')
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-npools', default=1)
parser.add_argument('-reffile',default='default')
parser.add_argument('-dirout',default='default')
parser.add_argument('-binning', default=1)
parser.add_argument('-fibre',default='default')
args = parser.parse_args()
DoClass = args.do_class
avoid_plot = args.avoid_plot
dirin = args.directorio
object2do = args.o2do
JustExtract = args.just_extract
npools = int(args.npools)
reffile = args.reffile
dirout = args.dirout
binning = int(args.binning)
mode = args.fibre
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red_'+mode+'_B'+str(int(binning))+'/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
####### GLOBAL VARIABLES #####
force_pre_process = False
force_flat_extract = False
force_sci_extract = False
force_thar_extract = False
force_tharxc = False
force_thar_wavcal = False
force_spectral_file_build = True
force_stellar_pars = True
dumpargon = False
dark_corr = True
minlines_glob = 300
Inverse_m = True
use_cheby = True
MRMS = 90 # max rms in m/s, global wav solution
trace_degree = 4
Marsh_alg = 0
ext_aperture = 4
NSigma_Marsh = 5
NCosmic_Marsh = 5
S_Marsh = 0.4
N_Marsh = 3 # grado polinomio
min_extract_col = 5
max_extract_col = 2048
oii = 50
off = 2098
ncoef_x = 4
ncoef_m = 6
npar_wsol = (min(ncoef_x,ncoef_m) + 1) * (2*max(ncoef_x,ncoef_m) - min(ncoef_x,ncoef_m) + 2) / 2
sufix = '.iwdat'
models_path = base+"data/COELHO_MODELS/R_40000b/"
order_dir = base+"fies/wavcals/"
n_useful = 80
ro0 = 79
bacap=8
if mode == 'F1':
resol = 25000
MRMS = 200.
ext_aperture = 6
elif mode == 'F3':
resol = 50000
elif mode == 'F4':
resol = 67000
if binning == 2:
ext_aperture /= 2
min_extract_col = int(np.around(min_extract_col/2.))
max_extract_col = int(np.around(max_extract_col/2.))
bacap /= bacap
#############################
# file containing the log
log = dirout+'night.log'
print "\n\n\tFIES NOT2.5m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
biases, flats, ThAr_ref, sim_sci, ThAr_ref_dates, obnames, exptimes, darks, flats_co, flats_co_dates, ThAr_sim, ThAr_sim_dates, ThAr_co, ThAr_co_dates = fiesutils.FileClassify(dirin,log,binning=binning, mode=mode, dark_corr=dark_corr)
IS = np.argsort(ThAr_ref_dates)
ThAr_ref_dates = ThAr_ref_dates[IS]
ThAr_ref = ThAr_ref[IS]
h = pyfits.open(flats[0])
mjd, mjd0 = fiesutils.mjd_fromheader2( h )
if mjd > 2457661.5 - 2400000.5:
sufix = '.new.iwdat'
oii = 100
off = 2048
print '\tThese are all the images to proccess:'
f = open(log)
flines = f.readlines()
for line in flines:
print '\t'+line[:-1]
print '\n'
if ( (os.access(dirout+'Flat.fits',os.F_OK) == False) or \
(os.access(dirout+'trace.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias.fits',os.F_OK) == False) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
# median combine Biases
print "\tGenerating Master calibration frames..."
if len(biases)>0:
MasterBias, RO_bias, GA_bias = fiesutils.MedianCombine(biases, binning=binning, off=off, oii=oii)
else:
MasterBias = np.zeros(fiesutils.OverscanTrim(pyfits.getdata(flats[0]),binning=binning).shape,ii=oii,ff=off)
RO_bias = 0.
GA_bias = 1.
print "\t\t-> Masterbias: done!"
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias.fits',os.F_OK)):
os.remove(dirout+'MasterBias.fits')
hdu.writeto(dirout+'MasterBias.fits')
dark_names = []
dark_utimes = []
if dark_corr and len(darks)>0:
dark_utimes, dark_times = fiesutils.get_darktimes(darks)
for tim in dark_utimes:
I = np.where(dark_times == tim)[0]
dark,ron_d,gain_d = fiesutils.MedianCombine(darks[I], zero=dirout+'MasterBias.fits',binning=binning, off=off, oii=oii)
hdu = pyfits.PrimaryHDU(dark)
dark_names.append(dirout+'Dark_'+str(int(tim))+'.fits')
if (os.access(dark_names[-1],os.F_OK)):
os.remove(dark_names[-1])
hdu.writeto(dark_names[-1])
print "\t\t-> MasterDarks: done!"
dark_names, dark_utimes = np.array(dark_names), np.array(dark_utimes)
# median combine list of flats
Flat, RO_fl, GA_fl = fiesutils.MedianCombine(flats, zero=dirout+'MasterBias.fits',binning=binning, off=off, oii=oii)
hdu = pyfits.PrimaryHDU(Flat)
if (os.access(dirout+'Flat.fits',os.F_OK)):
os.remove(dirout+'Flat.fits')
hdu.writeto(dirout+'Flat.fits')
if len(flats_co)>0:
Flat_co, RO_fl_co, GA_fl_co = fiesutils.MedianCombine(flats_co, zero=dirout+'MasterBias.fits',binning=binning, off=off, oii=oii)
hdu = pyfits.PrimaryHDU(Flat_co)
if (os.access(dirout+'Flat_co.fits',os.F_OK)):
os.remove(dirout+'Flat_co.fits')
hdu.writeto(dirout+'Flat_co.fits')
else:
c_co = []
nord_co = 0
print "\t\t-> Masterflats: done!"
print "\tTracing echelle orders..."
c_all,nord = GLOBALutils.get_them(Flat.T, ext_aperture, trace_degree, maxords=-1, mode=1,nsigmas=5.)
c_all,nord = GLOBALutils.good_orders(c_all,nord,Flat.shape[1],Flat.shape[0],ext_aperture)
print '\t\t'+ str(nord)+' orders found.'
if len(flats_co)>0:
c_co, nord_co = GLOBALutils.get_them(Flat_co.T, ext_aperture, trace_degree, maxords=-1, mode=1,nsigmas=5.)
c_co, nord_co = GLOBALutils.good_orders(c_co,nord_co,Flat_co.shape[1],Flat_co.shape[0],ext_aperture)
print '\t\t'+ str(nord_co)+' comparison orders found.'
# pickle traces
trace_dict = {'c_all':c_all, 'c_co':c_co,
'nord':nord, 'nord_co':nord_co,
'GA_bias': GA_bias, 'RO_bias' : RO_bias,
'GA_fl': GA_fl, 'RO_fl': RO_fl,
'dark_names':dark_names, 'dark_utimes':dark_utimes}
pickle.dump( trace_dict, open( dirout+"trace.pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_all = trace_dict['c_all']
nord = trace_dict['nord']
c_co = trace_dict['c_co']
nord_co = trace_dict['nord_co']
# recover GA*, RO*
GA_bias = trace_dict['GA_bias']
RO_bias = trace_dict['RO_bias']
GA_fl = trace_dict['GA_fl']
RO_fl = trace_dict['RO_fl']
if dark_corr and len(darks)>0:
dark_utimes = trace_dict['dark_utimes']
dark_names = trace_dict['dark_names']
# recover flats & master bias
h = pyfits.open(dirout+'Flat.fits')
Flat = h[0].data
if len(c_co)>0:
h = pyfits.open(dirout+'Flat_co.fits')
Flat_co = h[0].data
h = pyfits.open(dirout+'MasterBias.fits')
MasterBias = h[0].data
print '\n\tExtraction of Flat calibration frames:'
if len(c_co)>0:
c_tot = GLOBALutils.Mesh(c_all,c_co)
P_fits = dirout + 'P.fits'
S_flat_fits = dirout +'flat.fits'
S_flat = np.zeros((nord, 3, Flat.shape[1]) )
if ( os.access(P_fits,os.F_OK) == False ) or \
( os.access(S_flat_fits,os.F_OK) == False ) or \
(force_flat_extract):
print "\t\tNo extracted flat object spectra found or extraction forced, extracting and saving..."
Centers = np.zeros((len(c_all),Flat.shape[0]))
for i in range(nord):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(Flat.T,Centers,span=bacap)
fl = Flat.T - bac
bacfile = dirout + 'BAC_FLAT.fits'
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bac )
hdbac.writeto(bacfile)
print "\t\tWill extract",nord,"orders for object fibre..."
P = GLOBALutils.obtain_P(fl,c_all,ext_aperture,RO_fl,\
GA_fl,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
if (os.access(P_fits,os.F_OK)):
os.remove( P_fits )
hdu = pyfits.PrimaryHDU( P )
hdu.writeto( P_fits )
S_flat = GLOBALutils.optimal_extraction(fl,P,c_all,ext_aperture,RO_fl,GA_fl,\
S_Marsh,10*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
S_flat = S_flat[::-1]
S_flat = GLOBALutils.invert(S_flat)
if (os.access(S_flat_fits,os.F_OK)):
os.remove( S_flat_fits )
hdu = pyfits.PrimaryHDU( S_flat )
hdu.writeto( S_flat_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P = pyfits.getdata( P_fits )
S_flat = pyfits.getdata( S_flat_fits )
if nord_co>0:
P_co_fits = dirout + 'P_co.fits'
S_flat_co_fits = dirout +'flat_co.fits'
S_flat_co = np.zeros((nord_co, 3, Flat_co.shape[1]) )
if ( os.access(P_co_fits,os.F_OK) == False ) or \
( os.access(S_flat_co_fits,os.F_OK) == False ) or \
(force_flat_extract):
print "\t\tNo extracted flat comparison spectra found or extraction forced, extracting and saving..."
Centers = np.zeros((len(c_co),Flat_co.shape[0]))
for i in range(nord_co):
Centers[i,:]=scipy.polyval(c_co[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(Flat_co.T,Centers,span=bacap)
fl = Flat_co.T - bac
bacfile = dirout + 'BAC_FLAT_CO.fits'
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bac )
hdbac.writeto(bacfile)
print "\t\tWill extract",nord_co,"orders for comparison fibre..."
P_co = GLOBALutils.obtain_P(fl,c_co,ext_aperture,RO_fl,\
GA_fl,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
if (os.access(P_co_fits,os.F_OK)):
os.remove( P_co_fits )
hdu = pyfits.PrimaryHDU( P_co )
hdu.writeto( P_co_fits )
S_flat_co = GLOBALutils.optimal_extraction(fl,P_co,c_co,ext_aperture,RO_fl,GA_fl,\
S_Marsh,10*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
S_flat_co = S_flat_co[::-1]
S_flat_co = GLOBALutils.invert(S_flat_co)
if (os.access(S_flat_co_fits,os.F_OK)):
os.remove( S_flat_co_fits )
hdu = pyfits.PrimaryHDU( S_flat_co )
hdu.writeto( S_flat_co_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P_co = pyfits.getdata( P_co_fits )
S_flat_co = pyfits.getdata( S_flat_co_fits )
# Normalize flat field spectra.
S_flat_n, Snorms = GLOBALutils.FlatNormalize_single( S_flat, mid = int(.5*S_flat.shape[2]) )
if nord_co>0:
S_flat_co_n, Snorms_co = GLOBALutils.FlatNormalize_single( S_flat_co, mid = int(.5*S_flat_co.shape[2]) )
print '\n\tExtraction of ThAr calibration frames:'
# Extract all ThAr files
for fsim in ThAr_ref:
thar_fits = dirout + fsim.split('/')[-1][:-4]+'spec.fits.S'
thar_simple_fits = dirout + fsim.split('/')[-1][:-4]+'spec.simple.fits.S'
if ( os.access(thar_simple_fits,os.F_OK) == False ) or ( force_thar_extract ):
hthar = pyfits.open( fsim )
dthar = fiesutils.OverscanTrim( hthar[1].data, binning=binning, ii=oii, ff=off ) - MasterBias
Centers = np.zeros((len(c_all),dthar.shape[0]))
for i in range(nord):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(dthar.T,Centers,span=ext_aperture,option=1,allow_neg=True)
sdthar = dthar.T - bac
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
thar_S = np.zeros( (nord,3,dthar.shape[0]) )
thar_Ss = np.zeros( (nord,dthar.shape[0]) )
tR,tG = fiesutils.get_RONGAIN(hthar[1].header)
thar_Ss = GLOBALutils.simple_extraction(sdthar,c_all,ext_aperture,\
min_extract_col,max_extract_col,npools)
thar_Ss = thar_Ss[::-1]
thar_Ss = GLOBALutils.invert(thar_Ss)
if (os.access(thar_fits,os.F_OK)):
os.remove( thar_fits )
if (os.access(thar_simple_fits,os.F_OK)):
os.remove( thar_simple_fits )
hdu = pyfits.PrimaryHDU( thar_S )
hdu.writeto( thar_fits )
hdu = pyfits.PrimaryHDU( thar_Ss )
hdu.writeto( thar_simple_fits )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
# Extract all ThAr files
for fsim in ThAr_co:
thar_co_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.fits.S'
thar_co_simple_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.simple.fits.S'
if ( os.access(thar_co_simple_fits,os.F_OK) == False ) or ( force_thar_extract ):
hthar = pyfits.open( fsim )
dthar = fiesutils.OverscanTrim( hthar[1].data, binning=binning, ii=oii, ff=off ) - MasterBias
Centers = np.zeros((len(c_co),dthar.shape[0]))
for i in range(nord_co):
Centers[i,:]=scipy.polyval(c_co[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(dthar.T,Centers,span=ext_aperture+2)
sdthar = dthar.T - bac
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
thar_S = np.zeros( (nord_co,3,dthar.shape[0]) )
thar_Ss = np.zeros( (nord_co,dthar.shape[0]) )
tR,tG = fiesutils.get_RONGAIN(hthar[1].header)
#print tR,tG
thar_Ss = GLOBALutils.simple_extraction(sdthar,c_co,ext_aperture,\
min_extract_col,max_extract_col,npools)
thar_Ss = thar_Ss[::-1]
thar_Ss = GLOBALutils.invert(thar_Ss)
if (os.access(thar_co_fits,os.F_OK)):
os.remove( thar_co_fits )
if (os.access(thar_co_simple_fits,os.F_OK)):
os.remove( thar_co_simple_fits )
hdu = pyfits.PrimaryHDU( thar_S )
hdu.writeto( thar_co_fits )
hdu = pyfits.PrimaryHDU( thar_Ss )
hdu.writeto( thar_co_simple_fits )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
for fsim in ThAr_sim:
thar_fits = dirout + fsim.split('/')[-1][:-4]+'spec.fits.S'
thar_simple_fits = dirout + fsim.split('/')[-1][:-4]+'spec.simple.fits.S'
thar_co_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.fits.S'
thar_co_simple_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.simple.fits.S'
if ( os.access(thar_simple_fits,os.F_OK) == False ) or ( force_thar_extract ):
hthar = pyfits.open( fsim )
dthar = fiesutils.OverscanTrim( hthar[1].data, binning=binning, ii=oii, ff=off ) - MasterBias
Centers = np.zeros((len(c_tot),dthar.shape[0]))
ccc = c_tot.copy()
for i in range(Centers.shape[0]):
Centers[i,:]=scipy.polyval(ccc[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(dthar.T,Centers,span=ext_aperture,option=1)
sdthar = dthar.T - bac
print "\t\tNo previous extraction or extraction forced for simultaneous ThAr file", fsim, "extracting..."
thar_S = np.zeros( (nord,3,dthar.shape[0]) )
thar_Ss = np.zeros( (nord,dthar.shape[0]) )
thar_S_co = np.zeros( (nord_co,3,dthar.shape[0]) )
thar_S_co = np.zeros( (nord_co,3,dthar.shape[0]) )
tR,tG = fiesutils.get_RONGAIN(hthar[1].header)
#print tR,tG
thar_Ss = GLOBALutils.simple_extraction(sdthar,c_all,ext_aperture,\
min_extract_col,max_extract_col,npools)
thar_Ss_co = GLOBALutils.simple_extraction(sdthar,c_co,ext_aperture,\
min_extract_col,max_extract_col,npools)
thar_Ss = thar_Ss[::-1]
thar_Ss = GLOBALutils.invert(thar_Ss)
if (os.access(thar_fits,os.F_OK)):
os.remove( thar_fits )
if (os.access(thar_simple_fits,os.F_OK)):
os.remove( thar_simple_fits )
thar_Ss_co = thar_Ss_co[::-1]
thar_Ss_co = GLOBALutils.invert(thar_Ss_co)
if (os.access(thar_co_fits,os.F_OK)):
os.remove( thar_co_fits )
if (os.access(thar_co_simple_fits,os.F_OK)):
os.remove( thar_co_simple_fits )
hdu = pyfits.PrimaryHDU( thar_S )
hdu.writeto( thar_fits )
hdu = pyfits.PrimaryHDU( thar_Ss )
hdu.writeto( thar_simple_fits )
hdu = pyfits.PrimaryHDU( thar_S_co )
hdu.writeto( thar_co_fits )
hdu = pyfits.PrimaryHDU( thar_Ss_co )
hdu.writeto( thar_co_simple_fits )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
"""
p0 = np.zeros( npar_wsol )
#p0[0] = (int(.5*n_useful)+ro0) * Global_ZP
dat = np.loadtxt('wavcals/initial.txt')
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = GLOBALutils.Fit_Global_Wav_Solution(dat[:,2], dat[:,1],\
dat[:,0], np.ones(len(dat[:,0])), p0, Cheby=use_cheby, \
order0=79, ntotal=nord, maxrms=100000, Inv=Inverse_m, minlines=10, \
npix=S_flat_n.shape[2],nx=3,nm=4)
#f = open('thar_list.txt','r')
f = open('lovis.txt','r')
lines = f.readlines()
wv,tp = [],[]
for line in lines:
cos = line.split()
wv.append(float(cos[0]))
if len(cos)==4:
tp.append(cos[3])
else:
tp.append(' ')
wv,tp = np.array(wv),np.array(tp)
sc =pyfits.getdata(dirout + 'FIyh230033.spec.simple.fits.S')
for i in range(sc.shape[0]):
print i
sorder = str(i)
if i <10:
sorder = '0'+sorder
fout = open('wavcals/order_'+sorder+'.iwdat','w')
m = i + 79
ejx = np.arange(sc.shape[1])
ejy = sc[i]
chebs = GLOBALutils.Calculate_chebs(ejx, np.zeros(len(ejx))+i+79,Inverse=True,order0=79,ntotal=nord,npix=len(ejx),nx=ncoef_x,nm=ncoef_m)
wavsol = GLOBALutils.ToVacuum(1.0/float(m) * GLOBALutils.Joint_Polynomial_Cheby(p1,chebs,ncoef_x,ncoef_m))
print wavsol
tck = scipy.interpolate.splrep(wavsol,ejx,k=3)
I = np.where((wv>wavsol[50])&(wv<wavsol[-50]))[0]
wlist,plist,tlist = [],[],[]
write= False
if len(I)>0:
px = np.around(scipy.interpolate.splev(wv[I],tck)).astype('int')
tpx,twv = [],[]
for o in range(len(px)):
if ejy[px[o]]> 100:
if len(tlist) == 0:
wlist.append(wv[I][o])
plist.append(px[o])
tlist.append(tp[I][o])
else:
if px[o] - plist[-1] < 8:
wlist.append(wv[I][o])
plist.append(px[o])
tlist.append(tp[I][o])
else:
write = True
if write:
lout = str(len(tlist))
for ix in range(len(wlist)):
lout += '\t'+str(plist[ix])+'\t'+str(wlist[ix])
for ix in range(len(wlist)):
lout += '\t'+str(tlist[ix])
lout+='\n'
fout.write(lout)
wlist,plist,tlist = [],[],[]
write=False
#plot(ejx,ejy)
#plot(tpx,ejy[tpx],'ro')
#show()
#print ghj
fout.close()
plot(G_wav,G_res,'r.')
show()
print p1
"""
print "\n\tWavelength solution of ThAr calibration spectra of object fibre:"
for fsim in ThAr_ref:
wavsol_pkl = dirout + fsim.split('/')[-1][:-4]+'wavsolpars.pkl'
h = pyfits.open(fsim)
hd = pyfits.getheader(fsim)
ron,gain = fiesutils.get_RONGAIN(h[1].header)
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tWorking on simple ThAr file", fsim
hthar = pyfits.open( fsim )
mjd, mjd0 = fiesutils.mjd_fromheader2( hthar )
thar_fits = dirout + fsim.split('/')[-1][:-4]+'spec.simple.fits.S'
thar_S = pyfits.getdata( thar_fits )
lines_thar = thar_S.copy()
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
All_residuals = np.array([])
orders_offset, rough_shift = fiesutils.get_thar_offsets(lines_thar,binning=binning, delt_or=25,suf=sufix)
print 'orders_ofset:',orders_offset
print 'rough_shift:',rough_shift
orderi = 0
if orders_offset < 0:
orderi = - orders_offset
orderf = nord - 1
if orderf + orders_offset >= n_useful:
orderf = n_useful - orders_offset - 1
for order in range(orderi, orderf+1):
order_s = str(order+orders_offset)
if (order + orders_offset < 10):
order_s = '0' + str(order+orders_offset)
f = open(order_dir+'order_'+order_s+sufix,'r')
llins = f.readlines()
if len(llins)>5:
thar_order_orig = lines_thar[order]
#IV = iv_thar_ob_R[order,:]
L = np.where(thar_order_orig != 0)[0]
IV = 1. / (thar_order_orig / gain + (ron/gain)**2 )
IV[L] = 0.
wei = np.ones(len(thar_order_orig)) #np.sqrt( IV )
bkg = scipy.signal.medfilt(thar_order_orig,101)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids,sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+sufix, thar_order, order, wei, \
rmsmax=500, minlines=10, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, line_width=6, Cheby=use_cheby,porder=3,rough_shift=rough_shift,binning=binning,del_width=5.,do_xc=False)
if (order == int(.5*n_useful)):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 0.5*len(thar_order), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order)
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
All_residuals = np.append( All_residuals, residuals )
p0 = np.zeros( npar_wsol )
p0[0] = int(.5*n_useful) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths,\
All_Orders, np.ones(All_Intensities.shape), p0, Cheby=use_cheby, \
order0=ro0+orders_offset, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, \
npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
pdict = {'orders_offset':orders_offset,'rough_shift':rough_shift,'p1':p1,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Wavelengths':All_Wavelengths, \
'All_Orders':All_Orders, 'All_Pixel_Centers':All_Pixel_Centers, 'All_Sigmas':All_Sigmas}
pickle.dump( pdict, open(wavsol_pkl, 'w' ) )
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
pdict = pickle.load(open(wavsol_pkl,'r'))
print "\n\tWavelength solution of ThAr calibration spectra of comparison fibre:"
for fsim in ThAr_co:
wavsol_pkl = dirout + fsim.split('/')[-1][:-4]+'wavsolpars.pkl'
h = pyfits.open(fsim)
mjd, mjd0 = fiesutils.mjd_fromheader2( h )
hd = pyfits.getheader(fsim)
ron,gain = fiesutils.get_RONGAIN(h[1].header)
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tWorking on simple ThAr file", fsim
hthar = pyfits.open( fsim )
thar_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.simple.fits.S'
thar_S = pyfits.getdata( thar_fits )
lines_thar = thar_S.copy()
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
All_residuals = np.array([])
orders_offset, rough_shift = fiesutils.get_thar_offsets(lines_thar,binning=binning,suf=sufix,delt_or=25)
print 'orders_ofset:',orders_offset
print 'rough_shift:',rough_shift
orderi = 0
if orders_offset < 0:
orderi = - orders_offset
orderf = nord_co - 1
if orderf + orders_offset >= n_useful:
orderf = n_useful - orders_offset - 1
for order in range(orderi,orderf+1):
order_s = str(order+orders_offset)
if (order + orders_offset < 10):
order_s = '0' + str(order+orders_offset)
f = open(order_dir+'order_'+order_s+sufix,'r')
llins = f.readlines()
if len(llins)>5:
thar_order_orig = lines_thar[order]
L = np.where(thar_order_orig != 0)[0]
IV = 1. / (thar_order_orig / gain + (ron/gain)**2 )
IV[L] = 0.
wei = np.ones(len(thar_order_orig)) #np.sqrt( IV )
bkg = scipy.signal.medfilt(thar_order_orig,101)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids,sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+sufix, thar_order, order, wei, \
rmsmax=500, minlines=10, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, line_width=6, Cheby=use_cheby,porder=3,rough_shift=rough_shift,binning=binning,del_width=5.,do_xc=False)
if (order == int(.5*n_useful)):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 0.5*len(thar_order), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order)
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
All_residuals = np.append( All_residuals, residuals )
p0 = np.zeros( npar_wsol )
p0[0] = int(.5*n_useful) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths,\
All_Orders, np.ones(All_Intensities.shape), p0, Cheby=use_cheby, \
order0=ro0+orders_offset, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, \
npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
I = np.argmin((ThAr_ref_dates - mjd)**2)
pdict = {'orders_offset_co':orders_offset,'rough_shift_co':rough_shift,'p1_co':p1,'mjd_co':mjd,\
'G_pix_co':G_pix, 'G_ord_co':G_ord, 'G_wav_co':G_wav, 'II_co':II, 'rms_ms_co':rms_ms,\
'G_res_co':G_res, 'All_Centroids_co':All_Centroids, 'All_Wavelengths_co':All_Wavelengths,\
'All_Orders_co':All_Orders, 'All_Pixel_Centers_co':All_Pixel_Centers, 'All_Sigmas_co':All_Sigmas,\
'ref_thar_ob':ThAr_ref[I]}
pickle.dump( pdict, open(wavsol_pkl, 'w' ) )
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
pdict = pickle.load(open(wavsol_pkl,'r'))
for fsim in ThAr_sim:
wavsol_pkl = dirout + fsim.split('/')[-1][:-4]+'wavsolpars.pkl'
h = pyfits.open(fsim)
hd = pyfits.getheader(fsim)
ron,gain = fiesutils.get_RONGAIN(h[1].header)
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tWorking on sim ThAr file", fsim
hthar = pyfits.open( fsim )
mjd, mjd0 = fiesutils.mjd_fromheader2( hthar )
thar_fits = dirout + fsim.split('/')[-1][:-4]+'spec.simple.fits.S'
thar_S = pyfits.getdata( thar_fits )
lines_thar = thar_S.copy()
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
All_residuals = np.array([])
xcs = []
orders_offset, rough_shift = fiesutils.get_thar_offsets(lines_thar,binning=binning,suf=sufix,delt_or=25)
print 'orders_ofset:',orders_offset
print 'rough_shift:',rough_shift
orderi = 0
if orders_offset < 0:
orderi = - orders_offset
orderf = nord - 1
if orderf + orders_offset >= n_useful:
orderf = n_useful - orders_offset - 1
for order in range(orderi,orderf+1):
order_s = str(order+orders_offset)
if (order + orders_offset < 10):
order_s = '0' + str(order+orders_offset)
f = open(order_dir+'order_'+order_s+sufix,'r')
llins = f.readlines()
if len(llins)>5:
thar_order_orig = lines_thar[order]
L = np.where(thar_order_orig != 0)[0]
IV = 1. / (thar_order_orig / gain + (ron/gain)**2 )
IV[L] = 0.
wei = np.ones(len(thar_order_orig)) #np.sqrt( IV )
bkg = scipy.signal.medfilt(thar_order_orig,101)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids,sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+sufix, thar_order, order, wei, \
rmsmax=500, minlines=10, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, line_width=6, Cheby=use_cheby,porder=3,rough_shift=rough_shift,binning=binning,del_width=5.,do_xc=False)
if (order == int(.5*n_useful)):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 0.5*len(thar_order), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order)
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
All_residuals = np.append( All_residuals, residuals )
p0 = np.zeros( npar_wsol )
p0[0] = int(.5*n_useful) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths,\
All_Orders, np.ones(All_Intensities.shape), p0, Cheby=use_cheby, \
order0=ro0+orders_offset, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, \
npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
thar_co_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.simple.fits.S'
thar_S = pyfits.getdata( thar_co_fits )
lines_thar = thar_S.copy()
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
All_residuals_co = np.array([])
orders_offset_co, rough_shift_co = fiesutils.get_thar_offsets(lines_thar,binning=binning,suf=sufix)
print 'orders_ofset_co:',orders_offset_co
print 'rough_shift_co:',rough_shift_co
orderi = 0
if orders_offset_co < 0:
orderi = - orders_offset_co
orderf = nord_co - 1
if orderf + orders_offset_co >= n_useful:
orderf = n_useful - orders_offset_co - 1
for order in range(orderi,orderf+1):
#print order
order_s = str(order+orders_offset_co)
if (order + orders_offset_co < 10):
order_s = '0' + str(order+orders_offset_co)
f = open(order_dir+'order_'+order_s+sufix,'r')
llins = f.readlines()
if len(llins)>5:
thar_order_orig = lines_thar[order]
#IV = iv_thar_ob_R[order,:]
L = np.where(thar_order_orig != 0)[0]
IV = 1. / (thar_order_orig / gain + (ron/gain)**2 )
IV[L] = 0.
wei = np.ones(len(thar_order_orig)) #np.sqrt( IV )
bkg = scipy.signal.medfilt(thar_order_orig,101)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids,sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+sufix, thar_order, order, wei, \
rmsmax=500, minlines=10, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, line_width=6, Cheby=use_cheby,porder=3,rough_shift=rough_shift,binning=binning,del_width=5.,do_xc=False)
if (order == int(.5*n_useful)):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 0.5*len(thar_order), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order)
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
All_residuals_co = np.append( All_residuals_co, residuals )
p0 = np.zeros( npar_wsol )
p0[0] = int(.5*n_useful) * Global_ZP
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co,\
All_Orders_co, np.ones(All_Intensities_co.shape), p0, Cheby=use_cheby,\
order0=ro0+orders_offset_co, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m,\
minlines=minlines_glob, npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
pdict = {'orders_offset':orders_offset,'orders_offset_co':orders_offset_co, 'rough_shift':rough_shift,\
'rough_shift_co':rough_shift_co,'p1':p1,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Wavelengths':All_Wavelengths, \
'All_Orders':All_Orders, 'All_Pixel_Centers':All_Pixel_Centers, 'All_Sigmas':All_Sigmas,\
'p1_co':p1_co, 'G_pix_co':G_pix_co, 'G_ord_co':G_ord_co, 'G_wav_co':G_wav_co, 'II_co':II_co,\
'rms_ms_co':rms_ms_co, 'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Wavelengths':All_Wavelengths,\
'All_Orders_co':All_Orders_co, 'All_Pixel_Centers_co':All_Pixel_Centers_co, 'All_Sigmas_co':All_Sigmas_co}
pickle.dump( pdict, open(wavsol_pkl, 'w' ) )
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
pdict = pickle.load(open(wavsol_pkl,'r'))
if len(ThAr_sim)>0:
wavsol_pkl = dirout + ThAr_sim[0].split('/')[-1][:-4]+'wavsolpars.pkl'
dct = pickle.load(open(wavsol_pkl,'r'))
p_ref = dct['p1']
p_ref_co = dct['p1_co']
orders_offset = dct['orders_offset']
orders_offset_co = dct['orders_offset_co']
rough_shift = dct['rough_shift']
rough_shift_co = dct['rough_shift_co']
else:
if len(ThAr_ref)>0 and len(ThAr_co)>0:
wavsol_pkl = dirout + ThAr_co[0].split('/')[-1][:-4]+'wavsolpars.pkl'
dct = pickle.load(open(wavsol_pkl,'r'))
wavsol_ob_pkl = dirout + dct['ref_thar_ob'].split('/')[-1][:-4]+'wavsolpars.pkl'
dct_ob = pickle.load(open(wavsol_ob_pkl,'r'))
p_ref_co = dct['p1_co']
p_ref = dct_ob['p1']
orders_offset = dct_ob['orders_offset']
orders_offset_co = dct['orders_offset_co']
rough_shift = dct_ob['rough_shift']
rough_shift_co = dct['rough_shift_co']
elif len(ThAr_ref)>0:
wavsol_pkl = dirout + ThAr_ref[0].split('/')[-1][:-4]+'wavsolpars.pkl'
dct = pickle.load(open(wavsol_pkl,'r'))
p_ref = dct['p1']
orders_offset = dct['orders_offset']
rough_shift = dct['rough_shift']
mjds_thar,shifts,shifts_co = [],[],[]
print '\n\tDetermination of instrumental drift through the night...'
for i in range(len(ThAr_sim)):
hthar = pyfits.open( ThAr_sim[i] )
mjd, mjd0 = fiesutils.mjd_fromheader2( hthar )
wavsol_pkl = dirout + ThAr_sim[i].split('/')[-1][:-4]+'wavsolpars.pkl'
dthar = pyfits.getdata( ThAr_sim[i] )
npix = dthar.shape[0]
dct = pickle.load(open(wavsol_pkl,'r'))
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(dct['G_pix'], dct['G_wav'], dct['G_ord'],\
np.ones(len(dct['G_ord'])), p_ref, order0=ro0 + orders_offset, npix=npix,\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
p_shift_co, pix_centers_co, orders_co, wavelengths_co, I_co, rms_ms_co, residuals_co = \
GLOBALutils.Global_Wav_Solution_vel_shift(dct['G_pix_co'], dct['G_wav_co'], dct['G_ord_co'],\
np.ones(len(dct['G_ord_co'])), p_ref_co, order0=ro0 + orders_offset_co, npix=npix,\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
if rms_ms / np.sqrt(float(len(orders))) < 10. and rms_ms_co / np.sqrt(float(len(orders_co))) < 10.:
shifts.append(p_shift[0])
shifts_co.append(p_shift_co[0])
mjds_thar.append(mjd)
used_thars = []
for i in range(len(ThAr_co)):
hthar = pyfits.open( ThAr_co[i] )
mjd, mjd0 = fiesutils.mjd_fromheader2( hthar )
wavsol_pkl = dirout + ThAr_co[i].split('/')[-1][:-4]+'wavsolpars.pkl'
dthar = pyfits.getdata( ThAr_co[i] )
npix = dthar.shape[0]
dct = pickle.load(open(wavsol_pkl,'r'))
wavsol_ob_pkl = dirout + dct['ref_thar_ob'].split('/')[-1][:-4]+'wavsolpars.pkl'
dct_ob = pickle.load(open(wavsol_ob_pkl,'r'))
used_thars.append(dct['ref_thar_ob'])
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(dct_ob['G_pix'], dct_ob['G_wav'], dct_ob['G_ord'],\
np.ones(len(dct_ob['G_ord'])), p_ref, order0=ro0 + orders_offset, npix=npix,\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
p_shift_co, pix_centers_co, orders_co, wavelengths_co, I_co, rms_ms_co, residuals_co = \
GLOBALutils.Global_Wav_Solution_vel_shift(dct['G_pix_co'], dct['G_wav_co'], dct['G_ord_co'],\
np.ones(len(dct['G_ord_co'])), p_ref_co, order0=ro0 + orders_offset_co, npix=npix,\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
if rms_ms / np.sqrt(float(len(orders))) < 10. and rms_ms_co / np.sqrt(float(len(orders_co))) < 10.:
shifts.append(p_shift[0])
shifts_co.append(p_shift_co[0])
mjds_thar.append(mjd)
used_thars = np.array(used_thars)
for i in range(len(ThAr_ref_dates)):
if not ThAr_ref[i] in used_thars:
hthar = pyfits.open( ThAr_ref[i] )
mjd, mjd0 = fiesutils.mjd_fromheader2( hthar )
wavsol_pkl = dirout + ThAr_ref[i].split('/')[-1][:-4]+'wavsolpars.pkl'
dthar = pyfits.getdata( ThAr_ref[i] )
npix = dthar.shape[0]
dct = pickle.load(open(wavsol_pkl,'r'))
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(dct['G_pix'], dct['G_wav'], dct['G_ord'],\
np.ones(len(dct['G_ord'])), p_ref, order0=ro0 + orders_offset, npix=npix,\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
if rms_ms / np.sqrt(float(len(orders))) < 10.:
shifts.append(p_shift[0])
shifts_co.append(p_shift[0])
mjds_thar.append(mjd)
mjds_thar,shifts = np.array(mjds_thar),np.array(shifts)
I = np.argsort(mjds_thar)
mjds_thar = mjds_thar[I]
shifts = shifts[I]
shv = (1e-6*shifts)*299792458.0
if len(shifts)>1:
tck_v = scipy.interpolate.splrep(mjds_thar,shv,k=1)
tck_shift = scipy.interpolate.splrep(mjds_thar,shifts,k=1)
# Does any image have a special requirement for dealing with the moonlight?
if os.access(dirin + 'moon_corr.txt', os.F_OK):
fmoon = open(dirin + 'moon_corr.txt','r')
moon_lns = fmoon.readlines()
spec_moon = []
use_moon = []
for line in moon_lns:
spec_moon.append(line.split()[0])
if line.split()[1] == '0':
use_moon.append(False)
else:
use_moon.append(True)
else:
spec_moon = []
use_moon = []
spec_moon = np.array(spec_moon)
use_moon = np.array(use_moon)
new_list = []
new_times = []
for fsim in sim_sci:
h = pyfits.open(fsim)
if object2do in h[0].header['OBJECT'] or object2do == 'all':
new_list.append(fsim)
mjd, mjd0 = fiesutils.mjd_fromheader2( h )
new_times.append(mjd)
new_list = np.array(new_list)
new_times = np.array(new_times)
IS = np.argsort(new_times)
new_list = new_list[IS]
new_times = new_times[IS]
for fsim in new_list:
know_moon = False
if fsim.split('/')[-1] in spec_moon:
I = np.where(fsim.split('/')[-1] == spec_moon)[0]
know_moon = True
here_moon = use_moon[I]
print '\n'
print "\t--> Working on image: ", fsim
h = pyfits.open(fsim)
mjd,mjd0 = fiesutils.mjd_fromheader2(h)
ronoise, gain = fiesutils.get_RONGAIN(h[1].header)
# Object name
obname = h[0].header['OBJECT']
print "\t\tObject name:",obname
# Open file, trim, overscan subtract and MasterBias subtract
data = h[1].data
data = fiesutils.OverscanTrim( data, binning=binning, ii=oii, ff=off ) - MasterBias
if dark_corr and len(darks)>0 and int(h[0].header['EXPTIME']) in dark_utimes.astype('int'):
I = np.where(dark_utimes.astype('int') == int(h[0].header['EXPTIME']))[0]
data = data - pyfits.getdata(dark_names[I][0])
simult = False
if h[0].header['FILMP4'] == 1:
simult = True
bacfile = dirout + 'BAC_' + fsim.split('/')[-1][:-4]+'fits'
if os.access(bacfile,os.F_OK)==False:
if simult:
Centers = np.zeros((len(c_tot),dthar.shape[0]))
ccc = c_tot.copy()
else:
Centers = np.zeros((len(c_all),dthar.shape[0]))
ccc = c_all.copy()
for i in range(Centers.shape[0]):
Centers[i,:]=scipy.polyval(ccc[i,:],np.arange(len(Centers[i,:])))
if simult:
bac = GLOBALutils.get_scat(data.T,Centers,span=ext_aperture,option=1)
else:
bac = GLOBALutils.get_scat(data.T,Centers,span=bacap)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bac )
hdbac.writeto(bacfile)
else:
bac = pyfits.getdata(bacfile)
data = data.T - bac
ra = float(h[0].header['RA'])
dec = float(h[0].header['DEC'])
altitude = 2382.
latitude = 28.75722
longitude = -17.885
epoch = 2000.
ra2,dec2 = GLOBALutils.getcoords(obname,mjd,filen=reffile)
if ra2 !=0 and dec2 != 0:
ra = ra2
dec = dec2
else:
print '\t\tUsing the coordinates found in the image header.'
iers = GLOBALutils.JPLiers( baryc_dir, mjd-999.0, mjd+999.0 )
obsradius, R0 = GLOBALutils.JPLR0( latitude, altitude)
obpos = GLOBALutils.obspos( longitude, obsradius, R0 )
jplephem.set_ephemeris_dir( baryc_dir , ephemeris )
jplephem.set_observer_coordinates( float(obpos[0]), float(obpos[1]), float(obpos[2]) )
res = jplephem.doppler_fraction(float(ra/15.0), float(dec), long(mjd), float(mjd%1), 1, 0.0)
lbary_ltopo = 1.0 + res['frac'][0]
bcvel_baryc = ( lbary_ltopo - 1.0 ) * 2.99792458E5
print "\t\tBarycentric velocity:", bcvel_baryc
res = jplephem.pulse_delay(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
mbjd = mjd + res['delay'][0] / (3600.0 * 24.0)
gobs = ephem.Observer()
gobs.name = 'La Palma'
gobs.lat = rad(latitude) # lat/long in decimal degrees
gobs.long = rad(longitude)
gobs.date = h[0].header['DATE-OBS'][:10] + ' ' + h[0].header['DATE-OBS'][11:]
mephem = ephem.Moon()
mephem.compute(gobs)
Mcoo = jplephem.object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Mp = jplephem.barycentric_object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Sp = jplephem.barycentric_object_track("Sun", int(mjd), float(mjd%1), 1, 0.0)
res = jplephem.object_doppler("Moon", int(mjd), mjd%1, 1, 0.0)
lunation,moon_state,moonsep,moonvel = GLOBALutils.get_lunar_props(ephem,gobs,Mcoo,Mp,Sp,res,ra,dec)
refvel = bcvel_baryc + moonvel
print '\t\tRadial Velocity of sacttered moonlight:',refvel
print '\t\tExtraction:'
sci_fits = dirout + fsim.split('/')[-1][:-4]+'spec.fits.S'
sci_fits_simple = dirout + fsim.split('/')[-1][:-4]+'spec.simple.fits.S'
if ( os.access(sci_fits,os.F_OK) == False ) or ( os.access(sci_fits_simple,os.F_OK) == False ) or \
( force_sci_extract ):
print "\t\t\tNo previous extraction or extraction forced for science file", fsim, "extracting..."
sci_Ss = GLOBALutils.simple_extraction(data,c_all,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_S = GLOBALutils.optimal_extraction(data,P,c_all,ext_aperture,\
ronoise,gain,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_Ss = sci_Ss[::-1]
sci_Ss = GLOBALutils.invert(sci_Ss)
for iii in range(3):
sci_St = sci_S[:,iii].copy()
sci_St = sci_St[::-1]
sci_S[:,iii] = sci_St
sci_S = GLOBALutils.invert(sci_S)
if (os.access(sci_fits,os.F_OK)):
os.remove( sci_fits )
if (os.access(sci_fits_simple,os.F_OK)):
os.remove( sci_fits_simple )
hdu = pyfits.PrimaryHDU( sci_S )
hdu.writeto( sci_fits )
hdu = pyfits.PrimaryHDU( sci_Ss )
hdu.writeto( sci_fits_simple )
else:
print '\t\t\t '+fsim+" has already been extracted, reading in product fits files..."
sci_S = pyfits.getdata( sci_fits )
sci_Ss = pyfits.getdata( sci_fits_simple )
if simult:
sci_co_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.fits.S'
sci_co_simple_fits = dirout + fsim.split('/')[-1][:-4]+'spec.co.simple.fits.S'
if ( os.access(sci_co_fits,os.F_OK) == False ) or ( os.access(sci_co_simple_fits,os.F_OK) == False ) or \
( force_sci_extract ):
print "\t\t\tNo previous extraction or extraction forced for comparison orders of science file", fsim, "extracting..."
sci_co_Ss = GLOBALutils.simple_extraction(data,c_co,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_co_S = GLOBALutils.optimal_extraction(data,P_co,c_co,ext_aperture,\
ronoise,gain,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_co_Ss = sci_co_Ss[::-1]
sci_co_Ss = GLOBALutils.invert(sci_co_Ss)
for iii in range(3):
sci_co_St = sci_co_S[:,iii].copy()
sci_co_St = sci_co_St[::-1]
sci_co_S[:,iii] = sci_co_St
sci_co_S = GLOBALutils.invert(sci_co_S)
if (os.access(sci_co_fits,os.F_OK)):
os.remove( sci_co_fits )
if (os.access(sci_co_simple_fits,os.F_OK)):
os.remove( sci_co_simple_fits )
hdu = pyfits.PrimaryHDU( sci_co_S )
hdu.writeto( sci_co_fits )
hdu = pyfits.PrimaryHDU( sci_co_Ss )
hdu.writeto( sci_co_simple_fits )
else:
print '\t\t\t '+fsim+" has already been extracted, reading in product fits files..."
sci_co_S = pyfits.getdata( sci_co_fits )
sci_co_Ss = pyfits.getdata( sci_co_simple_fits )
fout = 'proc/' + obname + '_' + h[0].header['DATE-OBS'] + '_sp.fits'
if ( os.access(dirout+fout ,os.F_OK) == False ) or (force_spectral_file_build):
orderi = 0
if orders_offset < 0:
orderi = - orders_offset
orderf = nord - 1
if orderf + orders_offset >= n_useful:
orderf = n_useful - orders_offset - 1
spec = np.zeros((11, orderf - orderi + 1, data.shape[1]))
hdu = pyfits.PrimaryHDU( spec )
hdu = GLOBALutils.update_header(hdu,'HIERARCH MJD', mjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH MBJD', mbjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START DATE', h[0].header['DATE-OBS'][:10] )
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START UT', h[0].header['DATE-OBS'][11:])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEXP (S)',h[0].header['EXPTIME'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH BARYCENTRIC CORRECTION (KM/S)', bcvel_baryc)
hdu = GLOBALutils.update_header(hdu,'HIERARCH (LAMBDA_BARY / LAMBDA_TOPO)', lbary_ltopo)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARGET NAME', obname)
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA',h[0].header['RA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC',h[0].header['DEC'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA BARY',ra)
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC BARY',dec)
hdu = GLOBALutils.update_header(hdu,'HIERARCH EQUINOX',2000.)
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LATITUDE',latitude)
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LONGITUDE',longitude)
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS ALTITUDE',altitude)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARG AIRMASS START',h[0].header['AIRMASS'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH MOON VEL',refvel)
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMULT',simult)
print '\t\tWavelength calibration:'
if simult and (len(ThAr_co)>0 or len(ThAr_sim)>0):
lines_thar = sci_co_S[:,1,:].copy()
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
All_residuals_co = np.array([])
orderi = 0
if orders_offset_co < 0:
orderi = - orders_offset_co
orderf = nord_co - 1
if orderf + orders_offset_co >= n_useful:
orderf = n_useful - orders_offset_co - 1
for order in range(orderi,orderf+1):
order_s = str(order+orders_offset_co)
if (order + orders_offset_co < 10):
order_s = '0' + str(order+orders_offset_co)
f = open(order_dir+'order_'+order_s+sufix,'r')
llins = f.readlines()
if len(llins)>5:
thar_order_orig = lines_thar[order]
L = np.where(thar_order_orig != 0)[0]
IV = 1. / (thar_order_orig / gain + (ron/gain)**2 )
IV[L] = 0.
wei = np.ones(len(thar_order_orig)) #np.sqrt( IV )
bkg = scipy.signal.medfilt(thar_order_orig,101)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids,sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+sufix, thar_order, order, wei, \
rmsmax=500, minlines=10, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, line_width=6, Cheby=use_cheby,porder=3,rough_shift=rough_shift,binning=binning,del_width=5.,do_xc=False)
if (order == int(.5*n_useful)):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 0.5*len(thar_order), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order)
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
All_residuals_co = np.append( All_residuals_co, residuals )
p0 = np.zeros( npar_wsol )
p0[0] = int(.5*n_useful) * Global_ZP
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co,\
All_Orders_co, np.ones(All_Intensities_co.shape), p0, Cheby=use_cheby,\
order0=ro0+orders_offset_co, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m,\
minlines=minlines_glob, npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
p_shift, pix_centers_co, orders_co, wavelengths_co, I_co, rms_ms_co, residuals_co = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co, G_wav_co, G_ord_co,\
np.ones(len(G_ord_co)), p_ref_co, order0=ro0 + orders_offset_co, npix=len(thar_order),\
Cheby=use_cheby, ntotal=n_useful, maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob, nx=ncoef_x,nm=ncoef_m)
else:
if len(shifts)>1:
p_shift = scipy.interpolate.splev(mjd,tck_shift)
else:
p_shift = 0.
orderi = 0
if orders_offset < 0:
orderi = - orders_offset
orderf = nord - 1
if orderf + orders_offset >= n_useful:
orderf = n_useful - orders_offset - 1
print "\t\t\tInstrumental drift:",(1e-6*p_shift)*299792458.0, 'm/s'
# Apply new wavelength solution including barycentric correction
equis = np.arange( data.shape[1] )
order = orderi
torder = 0
while order < orderf+1:
m = order + ro0 + orders_offset
chebs = GLOBALutils.Calculate_chebs(equis, m, npix=sci_S.shape[2], order0=ro0 + orders_offset, ntotal=n_useful, Inverse=Inverse_m,nx=ncoef_x,nm=ncoef_m)
WavSol = lbary_ltopo * (1.0 + 1.0e-6*p_shift) * (1.0/float(m)) * \
GLOBALutils.Joint_Polynomial_Cheby(p_ref,chebs,ncoef_x,ncoef_m)
spec[0,torder,:] = WavSol
spec[1,torder,:] = sci_S[order,1]
spec[2,torder,:] = sci_S[order,2]
fn = S_flat[order,1,:]
L = np.where( fn == 0 )[0]
spec[3,torder,:] = spec[1,torder,:] / S_flat[order,1,:]
spec[4,torder,:] = spec[2,torder] * ( S_flat_n[order,1,:] ** 2 )
spec[3,torder,L] = 0.
spec[4,torder,L] = 0.
ccoef = GLOBALutils.get_cont_single(spec[0,torder],spec[3,torder],spec[4,torder],ll=1.5,lu=5,nc=3)
L = np.where( spec[1,torder] != 0 )
spec[5,torder,:][L] = spec[3,torder][L] / np.polyval(ccoef,spec[0,torder][L])
ratio = np.polyval(ccoef,spec[0,torder][L]) * Snorms[order]
spec[6,torder,:][L] = spec[4,torder][L] * (ratio ** 2 )
spec[7,torder,:][L] = ratio
spec[8,torder,:][L] = ratio * S_flat_n[order,1][L] / np.sqrt( ratio * S_flat_n[order,1][L] / gain + (ronoise/gain)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
LL = np.where(spec[5,torder] > 1 + 10. / scipy.signal.medfilt(spec[8,torder],21))[0]
spec[5,torder,LL] = 1.
spec[9,torder][L] = spec[5,torder][L] * (dlambda_dx[L] ** 1)
spec[10,torder][L] = spec[6,torder][L] / (dlambda_dx[L] ** 2)
order +=1
torder += 1
#show()
if os.access(dirout + fout, os.F_OK):
os.remove(dirout + fout)
hdu.writeto(dirout + fout)
if (not JustExtract):
if DoClass:
print '\t\tSpectral Analysis:'
# spectral analysis
# First, query SIMBAD with the object name
query_success = False
sp_type_query = 'None'
#query_success,sp_type_query = GLOBALutils.simbad_query_obname(obname)
# Now, query SIMBAD by coordinates if above not successful
#if (not query_success):
# query_success,sp_type_query = GLOBALutils.simbad_query_coords('12:00:00','00:00:00')
print "\t\t\tSpectral type returned by SIMBAD query:",sp_type_query
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMBAD SPTYP', sp_type_query)
pars_file = dirout + fsim.split('/')[-1][:-8]+'_stellar_pars.txt'
if os.access(pars_file,os.F_OK) == False or force_stellar_pars:
print "\t\t\tEstimating atmospheric parameters:"
spec2 = spec.copy()
if resol > 44000:
Rx = np.around(1./np.sqrt(1./40000.**2 - 1./resol**2))
for i in range(spec.shape[1]):
IJ = np.where(spec[5,i]!=0.)[0]
spec2[5,i,IJ] = GLOBALutils.convolve(spec[0,i,IJ],spec[5,i,IJ],Rx)
T_eff, logg, Z, vsini, vel0, ccf = correlation.CCF(spec2,model_path=models_path,npools=npools)
line = "%6d %4.1f %4.1f %8.1f %8.1f\n" % (T_eff,logg, Z, vsini, vel0)
f = open(pars_file,'w')
f.write(line)
f.close()
else:
print "\t\t\tAtmospheric parameters loaded from file:"
T_eff, logg, Z, vsini, vel0 = np.loadtxt(pars_file,unpack=True)
print "\t\t\t\tT_eff=",T_eff,"log(g)=",logg,"Z=",Z,"vsin(i)=",vsini,"vel0",vel0
else:
T_eff, logg, Z, vsini, vel0 = -999,-999,-999,-999,-999
T_eff_epoch = T_eff
logg_epoch = logg
Z_epoch = Z
vsini_epoch = vsini
vel0_epoch = vel0
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEFF', float(T_eff))
hdu = GLOBALutils.update_header(hdu,'HIERARCH LOGG', float(logg))
hdu = GLOBALutils.update_header(hdu,'HIERARCH Z', Z)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VSINI', vsini)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VEL0', vel0)
print "\t\tRadial Velocity analysis:"
# assign mask
sp_type, mask = GLOBALutils.get_mask_reffile(obname,reffile=reffile,base='../data/xc_masks/')
print "\t\t\tWill use",sp_type,"mask for CCF."
# Read in mask
ml, mh, weight = np.loadtxt(mask,unpack=True)
ml_v = GLOBALutils.ToVacuum( ml )
mh_v = GLOBALutils.ToVacuum( mh )
# make mask larger accounting for factor ~2 lower res in CORALIE w/r to HARPS
av_m = 0.5*( ml_v + mh_v )
ml_v -= (av_m - ml_v)
mh_v += (mh_v - av_m)
mask_hw_kms = (GLOBALutils.Constants.c/1e3) * 0.5*(mh_v - ml_v) / av_m
#sigma_fout = stellar_pars_dir + obname + '_' +'sigma.txt'
disp = GLOBALutils.get_disp(obname, reffile=reffile)
if disp == 0:
known_sigma = False
if vsini != -999 and vsini != 0.:
disp = vsini
else:
disp = 3.
else:
known_sigma = True
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
print '\t\t\tComputing the CCF...'
cond = True
while (cond):
# first rough correlation to find the minimum
vels, xc_full, sn, nlines_ccf, W_ccf = \
GLOBALutils.XCor(spec, ml_v, mh_v, weight, 0, lbary_ltopo, vel_width=300,vel_step=3,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
# Normalize the continuum of the CCF robustly with lowess
yy = scipy.signal.medfilt(xc_av,11)
pred = lowess(yy, vels,frac=0.4,it=10,return_sorted=False)
tck1 = scipy.interpolate.splrep(vels,pred,k=1)
xc_av_orig = xc_av.copy()
xc_av /= pred
vel0_xc = vels[ np.argmin( xc_av ) ]
rvels, rxc_av, rpred, rxc_av_orig, rvel0_xc = vels.copy(), \
xc_av.copy(), pred.copy(), xc_av_orig.copy(), vel0_xc
xc_av_rough = xc_av
vels_rough = vels
if disp > 30:
disp = 30.
vel_width = np.maximum( 20.0, 6*disp )
vels, xc_full, sn, nlines_ccf, W_ccf =\
GLOBALutils.XCor(spec, ml_v, mh_v, weight, vel0_xc, lbary_ltopo, vel_width=vel_width,vel_step=0.1,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
pred = scipy.interpolate.splev(vels,tck1)
xc_av /= pred
if sp_type == 'M5':
moon_sig = 2.5
elif sp_type == 'K5':
moon_sig = 3.3
else:
moon_sig = 4.5
p1,XCmodel,p1gau,XCmodelgau,Ls2 = GLOBALutils.XC_Final_Fit( vels, xc_av ,\
sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = False)
moonmatters = False
if (know_moon and here_moon):
moonmatters = True
ismoon = True
confused = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = GLOBALutils.XC_Final_Fit( vels, xc_av , sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = True)
moon_flag = 1
else:
confused = False
ismoon = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = p1,XCmodel,p1gau,XCmodelgau,Ls2
moon_flag = 0
bspan = GLOBALutils.calc_bss(vels,xc_av)
SP = bspan[0]
if (not known_sigma):
disp = np.floor(p1gau[2])
if (disp < 3.0):
disp = 3.0
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
known_sigma = True
else:
cond = False
xc_dict = {'vels':vels,'xc_av':xc_av,'XCmodelgau':XCmodelgau,'Ls2':Ls2,'refvel':refvel,\
'rvels':rvels,'rxc_av':rxc_av,'rpred':rpred,'rxc_av_orig':rxc_av_orig,\
'rvel0_xc':rvel0_xc,'xc_full':xc_full, 'p1':p1, 'sn':sn, 'p1gau':p1gau,\
'p1_m':p1_m,'XCmodel_m':XCmodel_m,'p1gau_m':p1gau_m,'Ls2_m':Ls2_m,\
'XCmodelgau_m':XCmodelgau_m}
moon_dict = {'moonmatters':moonmatters,'moon_state':moon_state,'moonsep':moonsep,\
'lunation':lunation,'mephem':mephem,'texp':float(h[0].header['EXPTIME'])}
pkl_xc = dirout + fsim.split('/')[-1][:-8]+obname+'_XC_'+sp_type+'.pkl'
pickle.dump( xc_dict, open( pkl_xc, 'w' ) )
ccf_pdf = dirout + 'proc/' + fsim.split('/')[-1][:-4] + obname + '_XCs_' + sp_type + '.pdf'
if not avoid_plot:
GLOBALutils.plot_CCF(xc_dict,moon_dict,path=ccf_pdf)
SNR_5130 = np.median(spec[8,32,900:1101] )
airmass = float(h[0].header['AIRMASS'])
seeing = -999
TEXP = float(h[0].header['EXPTIME'])
if sp_type == 'G2':
if T_eff < 6000:
A = 0.06544
B = 0.00146
D = 0.24416
C = 0.00181
else:
A = 0.09821
B = 0.00014
D = 0.33491
C = 0.00113
elif sp_type == 'K5':
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
else:
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
RVerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
depth_fact = 1. + p1gau[0]/(p1gau[2]*np.sqrt(2*np.pi))
if depth_fact >= 1.:
RVerr2 = -999.000
else:
if sp_type == 'G2':
depth_fact = (1 - 0.62) / (1 - depth_fact)
else:
depth_fact = (1 - 0.59) / (1 - depth_fact)
RVerr2 = RVerr * depth_fact
if (RVerr2 <= 0.009):
RVerr2 = 0.009
BSerr = D / float(np.round(SNR_5130)) + C
RV = np.around(p1gau_m[1],4)
BS = np.around(SP,4)
RVerr2 = np.around(RVerr2,4)
BSerr = np.around(BSerr,4)
print '\t\t\tRV = '+str(RV)+' +- '+str(RVerr2)
print '\t\t\tBS = '+str(BS)+' +- '+str(BSerr)
bjd_out = 2400000.5 + mbjd
T_eff_err = 100
logg_err = 0.5
Z_err = 0.5
vsini_err = 2
XC_min = np.abs(np.around(np.min(XCmodel),2))
SNR_5130 = np.around(SNR_5130)
SNR_5130_R = np.around(SNR_5130*np.sqrt(2.9))
disp_epoch = np.around(p1gau_m[2],1)
hdu = GLOBALutils.update_header(hdu,'RV', RV)
hdu = GLOBALutils.update_header(hdu,'RV_E', RVerr2)
hdu = GLOBALutils.update_header(hdu,'BS', BS)
hdu = GLOBALutils.update_header(hdu,'BS_E', BSerr)
hdu = GLOBALutils.update_header(hdu,'DISP', disp_epoch)
hdu = GLOBALutils.update_header(hdu,'SNR', SNR_5130)
hdu = GLOBALutils.update_header(hdu,'SNR_R', SNR_5130_R)
hdu = GLOBALutils.update_header(hdu,'INST', 'FIES')
hdu = GLOBALutils.update_header(hdu,'RESOL', str(resol))
hdu = GLOBALutils.update_header(hdu,'PIPELINE', 'CERES')
hdu = GLOBALutils.update_header(hdu,'XC_MIN', XC_min)
hdu = GLOBALutils.update_header(hdu,'BJD_OUT', bjd_out)
# write to output
line_out = "%-15s %18.8f %9.4f %7.4f %9.3f %5.3f fies ceres %8d %6d %5.2f %5.2f %5.1f %4.2f %5.2f %6.1f %4d %s\n"%\
(obname, bjd_out, RV, RVerr2, BS, BSerr, resol, T_eff_epoch, logg_epoch, Z_epoch, vsini_epoch, XC_min, disp_epoch,\
TEXP, SNR_5130_R, ccf_pdf)
f_res.write(line_out)
if (os.access( dirout + fout,os.F_OK)):
os.remove( dirout + fout)
hdu.writeto( dirout + fout )
else:
print "Reading spectral file from", fout
spec = pyfits.getdata( fout )
f_res.close()
| rabrahm/ceres | fies/fiespipe.py | Python | mit | 63,269 |
from settings import *
import dj_database_url
import sys
import os
from json import load
DEBUG = False
print >> sys.stderr, "Using Heroku Settings"
try:
APP_INFO = load(open(BASE_DIR + "/app_info.json"))['production']
except:
print "Failed to load app_info.json"
APP_INFO = {}
print "using appinfo: ", APP_INFO
if APP_INFO.get('project_name') and APP_INFO.get('branch_name'):
STATIC_PREPEND_PATH = '/{}/{}'.format(APP_INFO.get('project_name'), APP_INFO.get('branch_name'))
else:
STATIC_PREPEND_PATH = ''
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost'),
}
# this setting can be set to False after setting up a static file serve through a cdn
SERVE_STATIC = True
# AWS settings
AWS_ACCESS_KEY_ID = os.environ.get('HAUS_AWS_ACCESS_KEY_ID','')
AWS_SECRET_ACCESS_KEY = os.environ.get('HAUS_AWS_SECRET_ACCESS_KEY','')
AWS_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME = '__BUCKET_NAME__'
# suppress bucket auth via accesskeys
AWS_QUERYSTRING_AUTH = False
ASSET_PROTOCOL = 'https' if USE_HTTPS_FOR_ASSETS else 'http'
USE_RELATIVE_STATIC_URL = os.environ.get('USE_RELATIVE_STATIC_URL', False)
if USE_RELATIVE_STATIC_URL:
STATIC_URL = '/'
MEDIA_URL = '/uploads/'
else:
STATIC_URL = '{}://s3.amazonaws.com/{}/'.format(ASSET_PROTOCOL, AWS_STORAGE_BUCKET_NAME)
MEDIA_URL = '{}://s3.amazonaws.com/{}/uploads/'.format(ASSET_PROTOCOL, AWS_STORAGE_BUCKET_NAME)
STATICFILES_STORAGE = 'utils.storage.OptimizedS3BotoStorage'
DEFAULT_FILE_STORAGE = "utils.storage.MediaRootS3BotoStorage"
INSTALLED_APPS += ('storages',)
ALLOWED_HOSTS += ('{}.herokuapp.com'.format(APP_INFO.get('heroku_app_name','')), ) | MadeInHaus/django-template | backend/settings/hosts/production.py | Python | mit | 1,678 |
<<<<<<< HEAD
<<<<<<< HEAD
#
# test_codecencodings_cn.py
# Codec encoding tests for PRC encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_GB2312(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb2312'
tstring = multibytecodec_support.load_teststring('gb2312')
codectests = (
# invalid bytes
(b"abc\x81\x81\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x81\x81\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x81\x81\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x81\x81\xc1\xc4", "ignore", "abc\u804a"),
(b"\xc1\x64", "strict", None),
)
class Test_GBK(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gbk'
tstring = multibytecodec_support.load_teststring('gbk')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"\x83\x34\x83\x31", "strict", None),
("\u30fb", "strict", None),
)
class Test_GB18030(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb18030'
tstring = multibytecodec_support.load_teststring('gb18030')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"abc\x84\x39\x84\x39\xc1\xc4", "replace", "abc\ufffd9\ufffd9\u804a"),
("\u30fb", "strict", b"\x819\xa79"),
(b"abc\x84\x32\x80\x80def", "replace", 'abc\ufffd2\ufffd\ufffddef'),
(b"abc\x81\x30\x81\x30def", "strict", 'abc\x80def'),
(b"abc\x86\x30\x81\x30def", "replace", 'abc\ufffd0\ufffd0def'),
)
has_iso10646 = True
class Test_HZ(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'hz'
tstring = multibytecodec_support.load_teststring('hz')
codectests = (
# test '~\n' (3 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~{<:Ky2;S{#,~}~\n'
b'~{NpJ)l6HK!#~}Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# test '~\n' (4 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~\n'
b'~{<:Ky2;S{#,NpJ)l6HK!#~}~\n'
b'Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# invalid bytes
(b'ab~cd', 'replace', 'ab\uFFFDcd'),
(b'ab\xffcd', 'replace', 'ab\uFFFDcd'),
(b'ab~{\x81\x81\x41\x44~}cd', 'replace', 'ab\uFFFD\uFFFD\u804Acd'),
(b'ab~{\x41\x44~}cd', 'replace', 'ab\u804Acd'),
(b"ab~{\x79\x79\x41\x44~}cd", "replace", "ab\ufffd\ufffd\u804acd"),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
=======
#
# test_codecencodings_cn.py
# Codec encoding tests for PRC encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_GB2312(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb2312'
tstring = multibytecodec_support.load_teststring('gb2312')
codectests = (
# invalid bytes
(b"abc\x81\x81\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x81\x81\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x81\x81\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x81\x81\xc1\xc4", "ignore", "abc\u804a"),
(b"\xc1\x64", "strict", None),
)
class Test_GBK(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gbk'
tstring = multibytecodec_support.load_teststring('gbk')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"\x83\x34\x83\x31", "strict", None),
("\u30fb", "strict", None),
)
class Test_GB18030(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb18030'
tstring = multibytecodec_support.load_teststring('gb18030')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"abc\x84\x39\x84\x39\xc1\xc4", "replace", "abc\ufffd9\ufffd9\u804a"),
("\u30fb", "strict", b"\x819\xa79"),
(b"abc\x84\x32\x80\x80def", "replace", 'abc\ufffd2\ufffd\ufffddef'),
(b"abc\x81\x30\x81\x30def", "strict", 'abc\x80def'),
(b"abc\x86\x30\x81\x30def", "replace", 'abc\ufffd0\ufffd0def'),
)
has_iso10646 = True
class Test_HZ(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'hz'
tstring = multibytecodec_support.load_teststring('hz')
codectests = (
# test '~\n' (3 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~{<:Ky2;S{#,~}~\n'
b'~{NpJ)l6HK!#~}Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# test '~\n' (4 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~\n'
b'~{<:Ky2;S{#,NpJ)l6HK!#~}~\n'
b'Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# invalid bytes
(b'ab~cd', 'replace', 'ab\uFFFDcd'),
(b'ab\xffcd', 'replace', 'ab\uFFFDcd'),
(b'ab~{\x81\x81\x41\x44~}cd', 'replace', 'ab\uFFFD\uFFFD\u804Acd'),
(b'ab~{\x41\x44~}cd', 'replace', 'ab\u804Acd'),
(b"ab~{\x79\x79\x41\x44~}cd", "replace", "ab\ufffd\ufffd\u804acd"),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
#
# test_codecencodings_cn.py
# Codec encoding tests for PRC encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_GB2312(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb2312'
tstring = multibytecodec_support.load_teststring('gb2312')
codectests = (
# invalid bytes
(b"abc\x81\x81\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x81\x81\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x81\x81\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x81\x81\xc1\xc4", "ignore", "abc\u804a"),
(b"\xc1\x64", "strict", None),
)
class Test_GBK(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gbk'
tstring = multibytecodec_support.load_teststring('gbk')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"\x83\x34\x83\x31", "strict", None),
("\u30fb", "strict", None),
)
class Test_GB18030(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb18030'
tstring = multibytecodec_support.load_teststring('gb18030')
codectests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\xc8", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u804a"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u804a\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u804a"),
(b"abc\x84\x39\x84\x39\xc1\xc4", "replace", "abc\ufffd9\ufffd9\u804a"),
("\u30fb", "strict", b"\x819\xa79"),
(b"abc\x84\x32\x80\x80def", "replace", 'abc\ufffd2\ufffd\ufffddef'),
(b"abc\x81\x30\x81\x30def", "strict", 'abc\x80def'),
(b"abc\x86\x30\x81\x30def", "replace", 'abc\ufffd0\ufffd0def'),
)
has_iso10646 = True
class Test_HZ(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'hz'
tstring = multibytecodec_support.load_teststring('hz')
codectests = (
# test '~\n' (3 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~{<:Ky2;S{#,~}~\n'
b'~{NpJ)l6HK!#~}Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# test '~\n' (4 lines)
(b'This sentence is in ASCII.\n'
b'The next sentence is in GB.~\n'
b'~{<:Ky2;S{#,NpJ)l6HK!#~}~\n'
b'Bye.\n',
'strict',
'This sentence is in ASCII.\n'
'The next sentence is in GB.'
'\u5df1\u6240\u4e0d\u6b32\uff0c\u52ff\u65bd\u65bc\u4eba\u3002'
'Bye.\n'),
# invalid bytes
(b'ab~cd', 'replace', 'ab\uFFFDcd'),
(b'ab\xffcd', 'replace', 'ab\uFFFDcd'),
(b'ab~{\x81\x81\x41\x44~}cd', 'replace', 'ab\uFFFD\uFFFD\u804Acd'),
(b'ab~{\x41\x44~}cd', 'replace', 'ab\u804Acd'),
(b"ab~{\x79\x79\x41\x44~}cd", "replace", "ab\ufffd\ufffd\u804acd"),
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/test/test_codecencodings_cn.py | Python | mit | 10,529 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
from contextlib import redirect_stdout
import airflow.cli.commands.version_command
from airflow.cli import cli_parser
from airflow.version import version
class TestCliVersion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
def test_cli_version(self):
with redirect_stdout(io.StringIO()) as stdout:
airflow.cli.commands.version_command.version(self.parser.parse_args(['version']))
self.assertIn(version, stdout.getvalue())
| DinoCow/airflow | tests/cli/commands/test_version_command.py | Python | apache-2.0 | 1,329 |
from __future__ import with_statement
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
from mock import Mock, patch
from twilio.rest.resources import AuthorizedConnectApps
from twilio.rest.resources import AuthorizedConnectApp
class AuthorizedConnectAppTest(unittest.TestCase):
def setUp(self):
self.parent = Mock()
self.uri = "/base"
self.auth = ("AC123", "token")
self.resource = AuthorizedConnectApps(self.uri, self.auth)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_get(self, mock):
mock.return_value = Mock()
mock.return_value.content = '{"connect_app_sid": "SID"}'
self.resource.get("SID")
mock.assert_called_with("GET", "/base/AuthorizedConnectApps/SID",
auth=self.auth)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_list(self, mock):
mock.return_value = Mock()
mock.return_value.content = '{"authorized_connect_apps": []}'
self.resource.list()
mock.assert_called_with("GET", "/base/AuthorizedConnectApps",
params={}, auth=self.auth)
def test_load(self):
instance = AuthorizedConnectApp(Mock(), "sid")
instance.load({
"connect_app_sid": "SID",
"account_sid": "AC8dfe2f2358cf421cb6134cf6f217c6a3",
"permissions": ["get-all"],
"connect_app_friendly_name": "foo",
"connect_app_description": "bat",
"connect_app_company_name": "bar",
"connect_app_homepage_url": "http://www.google.com",
"uri": "/2010-04-01/Accounts/",
})
self.assertEquals(instance.permissions, ["get-all"])
self.assertEquals(instance.sid, "SID")
self.assertEquals(instance.friendly_name, "foo")
self.assertEquals(instance.description, "bat")
self.assertEquals(instance.homepage_url, "http://www.google.com")
self.assertEquals(instance.company_name, "bar")
def test_delete(self):
with self.assertRaises(AttributeError):
self.resource.delete()
def test_create(self):
with self.assertRaises(AttributeError):
self.resource.create()
def test_update(self):
with self.assertRaises(AttributeError):
self.resource.update()
| clearcare/twilio-python | tests/test_authorized_connect_apps.py | Python | mit | 2,396 |
__author__ = 'sathley'
class ValidationError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class UserAuthError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class AppacitiveError(Exception):
def __init__(self, status=None):
self.code = None
self.message = None
self.additional_messages = None
self.reference_id = None
self.version = None
if status is not None:
self.code = status.get('code', 0)
self.message = status.get('message', None)
self.additional_messages = status.get('additionalmessages', [])
self.reference_id = status['referenceid']
self.version = status['version'] | appacitive/pyappacitive | pyappacitive/error.py | Python | mit | 873 |
from __future__ import print_function
import numpy as np
import paralution_wrapper
import scipy.sparse
# Define a symmetric 8 x 8 dense upper triangular matrix first.
# This matrix is part of the examples which come with Intel's MKL library
# and is used here for historical reasons.
# A:
# 7.0, 1.0, 2.0, 7.0,
# -4.0, 8.0, 2.0,
# 1.0, 5.0,
# 7.0, 9.0,
# 5.0, 1.0, 5.0,
# -1.0, 5.0,
# 11.0,
# 5.0
print("Test with double precision")
A = np.zeros((8, 8), dtype=np.float64)
A[0, 0] = 7.0
A[0, 2] = 1.0
A[0, 5] = 2.0
A[0, 6] = 7.0
A[1, 1] = -4.0
A[1, 2] = 8.0
A[1, 4] = 2.0
A[2, 2] = 1.0
A[2, 7] = 5.0
A[3, 3] = 7.0
A[3, 6] = 9.0
A[4, 4] = 5.0
A[4, 5] = 1.0
A[4, 6] = 5.0
A[5, 5] = -1.0
A[5, 7] = 5.0
A[6, 6] = 11.0
A[7, 7] = 5.0
# print "Dense matrix:"
print(A)
# Dense matrix to sparse matrix in CSR format
Acsr = scipy.sparse.csr_matrix(A)
print("Sparse upper triangular CSR matrix:")
print("values: ", Acsr.data)
# Indices are 0 based
print("index: ", Acsr.indices)
print("pointer: ", Acsr.indptr)
# Convert the upper triangular CSR matrix Acsr to 'full' CSR matrix Acsr_full
Acsr_full = Acsr + Acsr.T - scipy.sparse.diags(Acsr.diagonal())
print()
print("Sparse 'full' CSR matrix:")
print("values: ", Acsr_full.data)
# Indices are 0 based
print("index: ", Acsr_full.indices)
print("pointer: ", Acsr_full.indptr)
# initial guess for solution x
x = np.zeros(8, dtype=np.float64)
# right hand side
b = np.ones(8, dtype=np.float64)
y = np.zeros(8, dtype=np.float64)
print("dtype x: ", x.dtype)
print("dtype b: ", b.dtype)
print("dtype A: ", Acsr_full.data.dtype)
info = 1 # make Paralution more verbose
abstol = 1e-6 # convergence tolerance
reltol = 5e-4
divtol = 1e5
max_iter = 10000 # maximum number of iterations
paralution_wrapper.solution(Acsr_full.data, Acsr_full.indices, Acsr_full.indptr,
x, b, info, abstol, reltol, divtol, max_iter)
# check solution x with original dense matrix A first
# convert upper triangular matrix A to 'full' matrix
y = (A + A.T - np.eye(A.shape[0]) * A.diagonal()).dot(x)
assert (np.allclose(b, y))
# check solution with sparse matrix Acsr_full
y = Acsr_full.dot(x)
assert (np.allclose(b, y))
print("Solution double x:")
print(x)
print()
print("A * x:")
print(y)
print("b:")
print(b)
print()
#stop
print("Test with single precision")
A = np.zeros((8, 8), dtype=np.float32)
A[0, 0] = 7.0
A[0, 2] = 1.0
A[0, 5] = 2.0
A[0, 6] = 7.0
A[1, 1] = -4.0
A[1, 2] = 8.0
A[1, 4] = 2.0
A[2, 2] = 1.0
A[2, 7] = 5.0
A[3, 3] = 7.0
A[3, 6] = 9.0
A[4, 4] = 5.0
A[4, 5] = 1.0
A[4, 6] = 5.0
A[5, 5] = -1.0
A[5, 7] = 5.0
A[6, 6] = 11.0
A[7, 7] = 5.0
# print "Dense matrix:"
print(A)
# Dense matrix to sparse matrix in CSR format
Acsr = scipy.sparse.csr_matrix(A)
print("Sparse upper triangular CSR matrix:")
print("values: ", Acsr.data)
# Indices are 0 based
print("index: ", Acsr.indices)
print("pointer: ", Acsr.indptr)
# Convert the upper triangular CSR matrix Acsr to 'full' CSR matrix Acsr_full
Acsr_full = Acsr + Acsr.T - scipy.sparse.diags(Acsr.diagonal())
print()
print("Sparse 'full' CSR matrix:")
print("values: ", Acsr_full.data)
# Indices are 0 based
print("index: ", Acsr_full.indices)
print("pointer: ", Acsr_full.indptr)
# initial guess for solution x
x = np.zeros(8, dtype=np.float32)
# right hand side
b = np.ones(8, dtype=np.float32)
y = np.zeros(8, dtype=np.float32)
print("dtype x: ", x.dtype)
print("dtype b: ", b.dtype)
print("dtype A: ", Acsr_full.data.dtype)
info = 1 # make Paralution more verbose
abstol = 1e-6 # convergence tolerance
reltol = 5e-4
divtol = 1e5
max_iter = 11111 # maximum number of iterations
paralution_wrapper.solution(Acsr_full.data, Acsr_full.indices, Acsr_full.indptr,
x, b, info, abstol, reltol, divtol, max_iter)
# check solution x with original dense matrix A first
# convert upper triangular matrix AA to 'full' matrix
y = (A + A.T - np.eye(A.shape[0]) * A.diagonal()).dot(x)
assert (np.allclose(b, y, rtol=reltol))
# check solution with sparse matrix Acsr_full
y = Acsr_full.dot(x)
assert (np.allclose(b, y, rtol=reltol))
print("Solution float x:")
print(x)
print()
print("A * x:")
print(y)
print("b:")
print(b)
| Kalle0x12/Test4 | csr_test.py | Python | gpl-3.0 | 4,420 |
import os
import re
__author__ = "Thierry Schellenbach"
__copyright__ = "Copyright 2014, Stream.io, Inc"
__credits__ = ["Thierry Schellenbach, mellowmorning.com, @tschellenbach"]
__license__ = "BSD-3-Clause"
__version__ = "5.1.1"
__maintainer__ = "Thierry Schellenbach"
__email__ = "support@getstream.io"
__status__ = "Production"
def connect(
api_key=None,
api_secret=None,
app_id=None,
version="v1.0",
timeout=3.0,
location=None,
base_url=None,
):
"""
Returns a Client object
:param api_key: your api key or heroku url
:param api_secret: the api secret
:param app_id: the app id (used for listening to feed changes)
"""
from stream.client import StreamClient
stream_url = os.environ.get("STREAM_URL")
# support for the heroku STREAM_URL syntax
if stream_url and not api_key:
pattern = re.compile(
r"https\:\/\/(\w+)\:(\w+)\@([\w-]*).*\?app_id=(\d+)", re.IGNORECASE
)
result = pattern.match(stream_url)
if result and len(result.groups()) == 4:
api_key, api_secret, location, app_id = result.groups()
location = None if location in ("getstream", "stream-io-api") else location
else:
raise ValueError("Invalid api key or heroku url")
return StreamClient(
api_key,
api_secret,
app_id,
version,
timeout,
location=location,
base_url=base_url,
)
| GetStream/stream-python | stream/__init__.py | Python | bsd-3-clause | 1,467 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2018-01-02 16:47
from __future__ import unicode_literals
from django.db import migrations
import oscar.models.fields
class Migration(migrations.Migration):
dependencies = [
("wellsfargo", "0011_auto_20171204_1506"),
]
operations = [
migrations.AlterField(
model_name="cacreditapp",
name="main_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cacreditapp",
name="main_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cacreditapp",
name="main_home_phone",
field=oscar.models.fields.PhoneNumberField(verbose_name="Home Phone"),
),
migrations.AlterField(
model_name="cajointcreditapp",
name="joint_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cajointcreditapp",
name="joint_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cajointcreditapp",
name="main_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cajointcreditapp",
name="main_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="cajointcreditapp",
name="main_home_phone",
field=oscar.models.fields.PhoneNumberField(verbose_name="Home Phone"),
),
migrations.AlterField(
model_name="uscreditapp",
name="main_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="uscreditapp",
name="main_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="uscreditapp",
name="main_home_phone",
field=oscar.models.fields.PhoneNumberField(verbose_name="Home Phone"),
),
migrations.AlterField(
model_name="usjointcreditapp",
name="joint_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="usjointcreditapp",
name="joint_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="usjointcreditapp",
name="main_cell_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Cell Phone"
),
preserve_default=False,
),
migrations.AlterField(
model_name="usjointcreditapp",
name="main_employer_phone",
field=oscar.models.fields.PhoneNumberField(
blank=True, null=True, verbose_name="Employer Phone Number"
),
preserve_default=False,
),
migrations.AlterField(
model_name="usjointcreditapp",
name="main_home_phone",
field=oscar.models.fields.PhoneNumberField(verbose_name="Home Phone"),
),
]
| thelabnyc/django-oscar-wfrs | src/wellsfargo/migrations/0012_auto_20180102_1147.py | Python | isc | 4,716 |
import time
import copy
import logging
from collections import defaultdict
from ..entity import entity_method
from .. import async, events, actions
log = logging.getLogger(__name__)
def setdefaultattr(obj, name, default_value):
return obj.__dict__.setdefault(name, default_value)
class AppliedAura:
def __init__(self, actor, target, aura):
self.target = target
self.aura = aura
self.key = aura.path, actor.path
self.stop_event = None
aura_props = aura.action.aura
actor_modifier = actor.get_modifier(aura_props.actor_modifier, 0)
target_modifier = target.get_modifier(aura_props.target_modifier, 0)
scale = 1. + actor_modifier - target_modifier
self.modifiers = {k: v * scale for k, v in aura_props.modifiers.items()}
if aura_props.effect is not None:
self.effect = copy.copy(aura_props.effect)
self.effect.amount *= scale
self.duration = sum(delay for delay, _ in self.effect.ticks)
else:
self.effect = None
self.duration = aura_props.duration
self.end_time = time.time() + self.duration
@property
def display(self):
return self.aura.display
def start(self):
if self.effect:
self.stop_event = async.call_sequence(
1,
[async.delayed(delay, self.tick, tick_index)
for tick_index, (delay, _) in enumerate(self.effect.ticks)])
else:
self.stop_event = async.call_later(self.duration, self.end)
def tick(self, tick_index):
events.publish('do_tick_aura', target=self.target, aura=self)
if tick_index == len(self.effect.ticks) - 1:
self.end()
def end(self):
actions.do(self.target, 'remove_aura', target=self.target, aura=self)
def stop(self):
self.stop_event.stop()
@events.subscriber
def do_apply_aura(event):
actor = event.actor
target = event.target
aura = event.aura
try:
combatant = target.combatant
except:
return False
auras = setdefaultattr(combatant, '_auras', {})
applied_aura = AppliedAura(actor, target, aura)
old_aura = auras.pop(applied_aura.key, None)
if old_aura is not None:
old_aura.stop()
auras[applied_aura.key] = applied_aura
applied_aura.start()
event.aura = applied_aura
@events.subscriber
def do_remove_aura(event):
actor = event.actor
target = event.target
aura = event.aura
try:
combatant = target.combatant
except:
return None
auras = setdefaultattr(combatant, '_auras', {})
auras.pop(aura.key).stop()
@entity_method
def get_auras(self):
try:
return self.combatant._auras.values()
except:
return ()
@entity_method
def get_aura_modifiers(self):
results = defaultdict(float)
for aura in self.get_auras():
for key, value in aura.modifiers.items():
results[key] = max(results[key], value)
return results
| wirefish/amber | amber/systems/auras.py | Python | bsd-3-clause | 3,039 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2007 Donald N. Allingham
# Copyright (C) 2008-2010 Brian G. Matherly
# Copyright (C) 2007-2010 Jerome Rapinat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
#-------------------------------------------------------------------------
"""
French-specific classes for relationships.
"""
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
import gramps.gen.relationship
#-------------------------------------------------------------------------
# level is used for generation level:
# at %th generation
_LEVEL_NAME = [
u"première",
u"deuxième",
u"troisième",
u"quatrième",
u"cinquième",
u"sixième",
u"septième",
u"huitième",
u"neuvième",
u"dixième",
u"onzième",
u"douzième",
u"treizième",
u"quatorzième",
u"quinzième",
u"seizième",
u"dix-septième",
u"dix-huitième",
u"dix-neuvième",
u"vingtième",
u"vingt-et-unième",
u"vingt-deuxième",
u"vingt-troisième",
u"vingt-quatrième",
u"vingt-cinquième",
u"vingt-sixième",
u"vingt-septième",
u"vingt-huitième",
u"vingt-neuvième",
u"trentième",
]
# for degree (canon et civil), limitation 20+20 also used for
# the first [premier] cousin
_REMOVED_LEVEL = [
u"premier",
u"deuxième",
u"troisième",
u"quatrième",
u"cinquième",
u"sixième",
u"septième",
u"huitième",
u"neuvième",
u"dixième",
u"onzième",
u"douzième",
u"treizième",
u"quatorzième",
u"quinzième",
u"seizième",
u"dix-septième",
u"dix-huitième",
u"dix-neuvième",
u"vingtième",
u"vingt-et-unième",
u"vingt-deuxième",
u"vingt-troisième",
u"vingt-quatrième",
u"vingt-cinquième",
u"vingt-sixième",
u"vingt-septième",
u"vingt-huitième",
u"vingt-neuvième",
u"trentième",
u"trente-et-unième",
u"trente-deuxième",
u"trente-troisième",
u"trente-quatrième",
u"trente-cinquième",
u"trente-sixième",
u"trente-septième",
u"trente-huitième",
u"trente-neuvième",
u"quarantième",
u"quanrante-et-unième",
]
# small lists, use generation level if > [5]
_FATHER_LEVEL = [u"", u"le père%s", u"le grand-père%s",
u"l'arrière-grand-père%s", u"le trisaïeul%s"]
_MOTHER_LEVEL = [u"", u"la mère%s", u"la grand-mère%s",
u"l'arrière-grand-mère%s", u"la trisaïeule%s"]
_SON_LEVEL = [u"", u"le fils%s", u"le petit-fils%s",
u"l'arrière-petit-fils%s"]
_DAUGHTER_LEVEL = [u"", u"la fille%s", u"la petite-fille%s",
u"l'arrière-petite-fille%s"]
_SISTER_LEVEL = [u"", u"la sœur%s", u"la tante%s", u"la grand-tante%s",
u"l'arrière-grand-tante%s"]
_BROTHER_LEVEL = [u"", u"le frère%s", u"l'oncle%s", u"le grand-oncle%s",
u"l'arrière-grand-oncle%s"]
_NEPHEW_LEVEL = [u"", u"le neveu%s", u"le petit-neveu%s",
u"l'arrière-petit-neveu%s"]
_NIECE_LEVEL = [u"", u"la nièce%s", u"la petite-nièce%s",
u"l'arrière-petite-nièce%s"]
# kinship report
_PARENTS_LEVEL = [u"", u"les parents", u"les grands-parents",
u"les arrières-grands-parents", u"les trisaïeux"]
_CHILDREN_LEVEL = [u"", u"les enfants", u"les petits-enfants",
u"les arrières-petits-enfants",
u"les arrières-arrières-petits-enfants"]
_SIBLINGS_LEVEL = [u"", u"les frères et les sœurs",
u"les oncles et les tantes",
u"les grands-oncles et les grands-tantes",
u"les arrières-grands-oncles et les arrières-grands-tantes"]
_NEPHEWS_NIECES_LEVEL = [u"", u"les neveux et les nièces",
u"les petits-neveux et les petites-nièces",
u"les arrière-petits-neveux et les arrières-petites-nièces"]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
# from active person to common ancestor Ga=[level]
def get_cousin(level, removed, inlaw=""):
"""
cousins = same level, gender = male
"""
if removed == 0 and level < len(_LEVEL_NAME):
return "le %s cousin%s" % (_REMOVED_LEVEL[level - 1], inlaw)
elif level < removed:
get_uncle(level - 1, inlaw)
elif level < 30:
# limitation gen = 30
return u"le cousin lointain, relié à la %s génération" % \
_LEVEL_NAME[removed]
else:
# use numerical generation
return u"le cousin lointain, relié à la %dème génération" % \
(level + 1)
def get_cousine(level, removed, inlaw=""):
"""
cousines = same level, gender = female
"""
if removed == 0 and level < len(_LEVEL_NAME):
return "la %s cousine%s" % (_LEVEL_NAME[level - 1], inlaw)
elif level < removed:
get_aunt(level - 1, inlaw)
elif level < 30:
# limitation gen = 30
return u"la cousine lointaine, reliée à la %s génération" % \
_LEVEL_NAME[removed]
else:
# use numerical generation
return u"la cousine lointaine, reliée à la %dème génération" % \
(level + 1)
def get_parents(level):
"""
ancestors
"""
if level > len(_PARENTS_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"les ascendants lointains, à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_PARENTS_LEVEL) - 1:
# use numerical generation
return u"les ascendants lointains, à la %dème génération" % level
else:
return _PARENTS_LEVEL[level]
def get_father(level, inlaw=""):
"""
ancestor, gender = male
"""
if level > len(_FATHER_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"l'ascendant lointain, à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_FATHER_LEVEL) - 1:
# use numerical generation
return u"l'ascendant lointain, à la %dème génération" % level
else:
return _FATHER_LEVEL[level] % inlaw
def get_mother(level, inlaw=""):
"""
ancestor, gender = female
"""
if level > len(_MOTHER_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"l'ascendante lointaine, à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_MOTHER_LEVEL) - 1:
# use numerical generation
return u"l'ascendante lointaine, à la %dème génération" % level
else:
return _MOTHER_LEVEL[level] % inlaw
def get_parent_unknown(level, inlaw=""):
"""
unknown parent
"""
if level > len(_LEVEL_NAME) - 1 and level < 30:
# limitation gen = 30
return u"l'ascendant lointain, à la %s génération" % \
_LEVEL_NAME[level]
elif level == 1:
return u"un parent%s" % inlaw
else:
return u"un parent lointain%s" % inlaw
def get_son(level, inlaw=""):
"""
descendant, gender = male
"""
if level > len(_SON_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"le descendant lointain, à la %s génération" % \
_LEVEL_NAME[level + 1]
elif level > len(_SON_LEVEL) - 1:
# use numerical generation
return u"le descendant lointain, à la %dème génération" % level
else:
return _SON_LEVEL[level] % inlaw
def get_daughter(level, inlaw=""):
"""
descendant, gender = female
"""
if level > len(_DAUGHTER_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"la descendante lointaine, à la %s génération" % \
_LEVEL_NAME[level + 1]
elif level > len(_DAUGHTER_LEVEL) - 1:
# use numerical generation
return u"la descendante lointaine, à la %dème génération" % level
else:
return _DAUGHTER_LEVEL[level] % inlaw
def get_child_unknown(level, inlaw=""):
"""
descendant, gender = unknown
"""
if level > len(_LEVEL_NAME) - 1 and level < 30:
# limitation gen = 30
return u"le descendant lointain, à la %s génération" % \
_LEVEL_NAME[level + 1]
elif level == 1:
return u"un enfant%s" % inlaw
else:
return u"un descendant lointain%s" % inlaw
def get_sibling_unknown(inlaw=""):
"""
sibling of an ancestor, gender = unknown
"""
return u"un parent lointain%s" % inlaw
def get_uncle(level, inlaw=""):
"""
sibling of an ancestor, gender = male
"""
if level > len(_BROTHER_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"l'oncle lointain, relié à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_BROTHER_LEVEL) - 1:
# use numerical generation
return u"l'oncle lointain, relié à la %dème génération" % \
(level + 1)
else:
return _BROTHER_LEVEL[level] % inlaw
def get_aunt(level, inlaw=""):
"""
sibling of an ancestor, gender = female
"""
if level > len(_SISTER_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"la tante lointaine, reliée à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_SISTER_LEVEL) -1:
# use numerical generation
return u"la tante lointaine, reliée à la %dème génération" % \
(level + 1)
else:
return _SISTER_LEVEL[level] % inlaw
def get_nephew(level, inlaw=""):
"""
cousin of a descendant, gender = male
"""
if level > len(_NEPHEW_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"le neveu lointain, à la %s génération" % _LEVEL_NAME[level]
elif level > len(_NEPHEW_LEVEL) - 1:
# use numerical generation
return u"le neveu lointain, à la %dème génération" % (level + 1)
else:
return _NEPHEW_LEVEL[level] % inlaw
def get_niece(level, inlaw=""):
"""
cousin of a descendant, gender = female
"""
if level > len(_NIECE_LEVEL) - 1 and level < 30:
# limitation gen = 30
return u"la nièce lointaine, à la %s génération" % \
_LEVEL_NAME[level]
elif level > len(_NIECE_LEVEL) - 1:
# use numerical generation
return u"la nièce lointaine, à la %dème génération" % (level + 1)
else:
return _NIECE_LEVEL[level] % inlaw
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
INLAW = u' (par alliance)'
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
# kinship report
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""
voir relationship.py
"""
rel_str = u"des parents lointains"
atgen = u" à la %sème génération"
bygen = u" par la %sème génération"
cmt = u" (frères ou sœurs d'un ascendant" + atgen % Ga + ")"
if Ga == 0:
# These are descendants
if Gb < len(_CHILDREN_LEVEL):
rel_str = _CHILDREN_LEVEL[Gb]
else:
rel_str = u"les descendants" + atgen % (Gb + 1)
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_PARENTS_LEVEL):
rel_str = _PARENTS_LEVEL[Ga]
else:
rel_str = u"les ascendants" + atgen % (Ga + 1)
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_SIBLINGS_LEVEL):
rel_str = _SIBLINGS_LEVEL[Ga]
else:
rel_str = u"les enfants d'un ascendant" + atgen % (Ga + 1) + \
cmt
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_NEPHEWS_NIECES_LEVEL):
rel_str = _NEPHEWS_NIECES_LEVEL[Gb - 1]
else:
rel_str = u"les neveux et les nièces" + atgen % Gb
elif Ga > 1 and Ga == Gb:
# These are cousins in the same generation
# use custom level for latin words
if Ga == 2:
rel_str = u"les cousins germains et cousines germaines"
elif Ga <= len(_LEVEL_NAME):
# %ss for plural
rel_str = u"les %ss cousins et cousines" % _LEVEL_NAME[Ga -
2]
else:
# security
rel_str = u"les cousins et cousines"
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
# use custom level for latin words and specific relation
if Ga == 3 and Gb == 2:
desc = u" (cousins germains d'un parent)"
rel_str = u"les oncles et tantes à la mode de Bretagne" + \
desc
elif Gb <= len(_LEVEL_NAME) and Ga - Gb < len(_REMOVED_LEVEL) and \
Ga + Gb + 1 < len(_REMOVED_LEVEL):
can = u" du %s au %s degré (canon)" % (_REMOVED_LEVEL[Gb],
_REMOVED_LEVEL[Ga])
civ = u" et au %s degré (civil)" % _REMOVED_LEVEL[Ga + Gb +
1]
rel_str = u"les oncles et tantes" + can + civ
elif Ga < len(_LEVEL_NAME):
rel_str = u"les grands-oncles et grands-tantes" + bygen % \
(Ga + 1)
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
# use custom level for latin words and specific relation
if Ga == 2 and Gb == 3:
info = u" (cousins issus d'un germain)"
rel_str = u"les neveux et nièces à la mode de Bretagne" + \
info
elif Ga <= len(_LEVEL_NAME) and Gb - Ga < len(_REMOVED_LEVEL) and \
Ga + Gb + 1 < len(_REMOVED_LEVEL):
can = u" du %s au %s degré (canon)" % (_REMOVED_LEVEL[Gb],
_REMOVED_LEVEL[Ga])
civ = u" et au %s degré (civil)" % _REMOVED_LEVEL[Ga + Gb +
1]
rel_str = u"les neveux et nièces" + can + civ
elif Ga < len(_LEVEL_NAME):
rel_str = u"les neveux et nièces" + bygen % Gb
if in_law_b == True:
rel_str = u"les conjoints pour %s" % rel_str
return rel_str
# quick report (missing on RelCalc tool - Status Bar)
def get_single_relationship_string(
self,
Ga,
Gb,
gender_a,
gender_b,
reltocommon_a,
reltocommon_b,
only_birth=True,
in_law_a=False,
in_law_b=False,
):
"""
voir relationship.py
"""
if only_birth:
step = ""
else:
step = self.STEP
if in_law_a or in_law_b:
inlaw = self.INLAW
else:
inlaw = u""
rel_str = u"un parent lointains%s" % inlaw
bygen = u" par la %sème génération"
if Ga == 0:
# b is descendant of a
if Gb == 0:
rel_str = u"le même individu"
elif gender_b == Person.MALE and Gb < len(_SON_LEVEL):
# spouse of daughter
if inlaw and Gb == 1 and not step:
rel_str = u"le gendre"
else:
rel_str = get_son(Gb)
elif gender_b == Person.FEMALE and Gb < len(_DAUGHTER_LEVEL):
# spouse of son
if inlaw and Gb == 1 and not step:
rel_str = u"la bru"
else:
rel_str = get_daughter(Gb)
elif Gb < len(_LEVEL_NAME) and gender_b == Person.MALE:
# don't display inlaw
rel_str = u"le descendant lointain (%dème génération)" % \
(Gb + 1)
elif Gb < len(_LEVEL_NAME) and gender_b == Person.FEMALE:
rel_str = u"la descendante lointaine (%dème génération)" % \
(Gb + 1)
else:
return get_child_unknown(Gb)
elif Gb == 0:
# b is parents/grand parent of a
if gender_b == Person.MALE and Ga < len(_FATHER_LEVEL):
# other spouse of father (new parent)
if Ga == 1 and inlaw and self.STEP_SIB:
rel_str = u"le beau-père"
elif Ga == 1 and inlaw:
# father of spouse (family of spouse)
rel_str = u"le père du conjoint"
else:
rel_str = get_father(Ga, inlaw)
elif gender_b == Person.FEMALE and Ga < len(_MOTHER_LEVEL):
# other spouse of mother (new parent)
if Ga == 1 and inlaw and self.STEP_SIB:
rel_str = u"la belle-mère"
elif Ga == 1 and inlaw:
# mother of spouse (family of spouse)
rel_str = u"la mère du conjoint"
else:
rel_str = get_mother(Ga, inlaw)
elif Ga < len(_LEVEL_NAME) and gender_b == Person.MALE:
rel_str = u"l'ascendant lointain%s (%dème génération)" % \
(inlaw, Ga + 1)
elif Ga < len(_LEVEL_NAME) and gender_b == Person.FEMALE:
rel_str = u"l'ascendante lointaine%s (%dème génération)" % \
(inlaw, Ga + 1)
else:
return get_parent_unknown(Ga, inlaw)
elif Gb == 1:
# b is sibling/aunt/uncle of a
if gender_b == Person.MALE and Ga < len(_BROTHER_LEVEL):
rel_str = get_uncle(Ga, inlaw)
elif gender_b == Person.FEMALE and Ga < len(_SISTER_LEVEL):
rel_str = get_aunt(Ga, inlaw)
else:
# don't display inlaw
if gender_b == Person.MALE:
rel_str = u"l'oncle lointain" + bygen % (Ga + 1)
elif gender_b == Person.FEMALE:
rel_str = u"la tante lointaine" + bygen % (Ga + 1)
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(inlaw)
else:
return rel_str
elif Ga == 1:
# b is niece/nephew of a
if gender_b == Person.MALE and Gb < len(_NEPHEW_LEVEL):
rel_str = get_nephew(Gb - 1, inlaw)
elif gender_b == Person.FEMALE and Gb < len(_NIECE_LEVEL):
rel_str = get_niece(Gb - 1, inlaw)
else:
if gender_b == Person.MALE:
rel_str = u"le neveu lointain%s (%dème génération)" % \
(inlaw, Gb)
elif gender_b == Person.FEMALE:
rel_str = u"la nièce lointaine%s (%dème génération)" % \
(inlaw, Gb)
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(inlaw)
else:
return rel_str
elif Ga == Gb:
# a and b cousins in the same generation
if gender_b == Person.MALE:
rel_str = get_cousin(Ga - 1, 0, inlaw=inlaw)
elif gender_b == Person.FEMALE:
rel_str = get_cousine(Ga - 1, 0, inlaw=inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(inlaw)
else:
return rel_str
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
if Ga == 3 and Gb == 2:
if gender_b == Person.MALE:
desc = u" (cousin germain d'un parent)"
rel_str = u"l'oncle à la mode de Bretagne" + desc
elif gender_b == Person.FEMALE:
desc = u" (cousine germaine d'un parent)"
rel_str = u"la tante à la mode de Bretagne" + desc
elif gender_b == Person.UNKNOWN:
return get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Gb <= len(_LEVEL_NAME) and Ga - Gb < len(_REMOVED_LEVEL) and \
Ga + Gb + 1 < len(_REMOVED_LEVEL):
can = u" du %s au %s degré (canon)" % (_REMOVED_LEVEL[Gb],
_REMOVED_LEVEL[Ga])
civ = u" et au %s degré (civil)" % _REMOVED_LEVEL[Ga + Gb +
1]
if gender_b == Person.MALE:
rel_str = u"l'oncle" + can + civ
elif gender_b == Person.FEMALE:
rel_str = u"la tante" + can + civ
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(Ga, inlaw)
else:
return rel_str
else:
if gender_b == Person.MALE:
rel_str = get_uncle(Ga, inlaw)
elif gender_b == Person.FEMALE:
rel_str = get_aunt(Ga, inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
if Ga == 2 and Gb == 3:
info = u" (cousins issus d'un germain)"
if gender_b == Person.MALE:
rel_str = u"le neveu à la mode de Bretagne" + info
elif gender_b == Person.FEMALE:
rel_str = u"la nièce à la mode de Bretagne" + info
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga <= len(_LEVEL_NAME) and Gb - Ga < len(_REMOVED_LEVEL) and \
Ga + Gb + 1 < len(_REMOVED_LEVEL):
can = u" du %s au %s degré (canon)" % (_REMOVED_LEVEL[Gb],
_REMOVED_LEVEL[Ga])
civ = u" et au %s degré (civil)" % _REMOVED_LEVEL[Ga + Gb +
1]
if gender_b == Person.MALE:
rel_str = u"le neveu" + can + civ
if gender_b == Person.FEMALE:
rel_str = u"la nièce" + can + civ
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(Ga, inlaw)
else:
return rel_str
elif Ga > len(_LEVEL_NAME):
return rel_str
else:
if gender_b == Person.MALE:
rel_str = get_nephew(Ga, inlaw)
elif gender_b == Person.FEMALE:
rel_str = get_niece(Ga, inlaw)
elif gender_b == Person.UNKNOWN:
rel_str = get_sibling_unknown(Ga, inlaw)
else:
return rel_str
return rel_str
# RelCalc tool - Status Bar
def get_sibling_relationship_string(self, sib_type, gender_a,
gender_b, in_law_a=False, in_law_b=False):
"""
voir relationship.py
"""
if in_law_a or in_law_b:
inlaw = self.INLAW
else:
inlaw = u""
if sib_type == self.NORM_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = u"le frère (germain)"
elif gender_b == Person.FEMALE:
rel_str = u"la sœur (germaine)"
else:
rel_str = u"le frère ou la sœur germain(e)"
else:
if gender_b == Person.MALE:
rel_str = u"le beau-frère"
elif gender_b == Person.FEMALE:
rel_str = u"la belle-sœur"
else:
rel_str = u"le beau-frère ou la belle-sœur"
elif sib_type == self.UNKNOWN_SIB:
if not inlaw:
if gender_b == Person.MALE:
rel_str = u"le frère"
elif gender_b == Person.FEMALE:
rel_str = u"la sœur"
else:
rel_str = u"le frère ou la sœur"
else:
if gender_b == Person.MALE:
rel_str = u"le beau-frère"
elif gender_b == Person.FEMALE:
rel_str = u"la belle-sœur"
else:
rel_str = u"le beau-frère ou la belle-sœur"
elif sib_type == self.HALF_SIB_MOTHER:
# for descendants the "half" logic is reversed !
if gender_b == Person.MALE:
rel_str = u"le demi-frère consanguin"
elif gender_b == Person.FEMALE:
rel_str = u"la demi-sœur consanguine"
else:
rel_str = u"le demi-frère ou la demi-sœur consanguin(e)"
elif sib_type == self.HALF_SIB_FATHER:
# for descendants the "half" logic is reversed !
if gender_b == Person.MALE:
rel_str = u"le demi-frère utérin"
elif gender_b == Person.FEMALE:
rel_str = u"la demi-sœur utérine"
else:
rel_str = u"le demi-frère ou la demi-sœur utérin(e)"
elif sib_type == self.STEP_SIB:
if gender_b == Person.MALE:
rel_str = u"le demi-frère"
elif gender_b == Person.FEMALE:
rel_str = u"la demi-sœur"
else:
rel_str = u"le demi-frère ou la demi-sœur"
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_fr.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
| arunkgupta/gramps | gramps/plugins/rel/rel_fr.py | Python | gpl-2.0 | 28,046 |
# python standard library
import socket
# third party
import paramiko
# this package
from theape.parts.connections.clientbase import BaseClient,\
ConnectionError
PORT = 22
TIMEOUT = 10
NEWLINE = '\n'
SPACE_JOIN = "{prefix} {command}"
class SimpleClient(BaseClient):
"""
A simple wrapper around paramiko's SSHClient.
The only intended public interface is exec_command.
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `hostname`: ip address or resolvable hostname.
- `username`: the login name.
- `timeout`: Time to give the client to connect
- `port`: TCP port of the server
- `args, kwargs`: anything else that the SSHClient.connect can use will be passed in to it
"""
super(SimpleClient, self).__init__(*args, **kwargs)
self._client = None
return
@property
def client(self):
"""
The main reason for this class
:rtype: paramiko.SSHClient
:return: An instance of SSHClient connected to remote host.
:raise: ClientError if the connection fails.
"""
if self._client is None:
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._client.load_system_host_keys()
try:
self._client.connect(hostname=self.hostname,
username=self.username,
timeout=self.timeout,
port=self.port,
**self.kwargs)
# these are fatal exceptions, no one but the main program should catch them
except paramiko.AuthenticationException as error:
self.logger.error(error)
raise ConnectionError("There is a problem with the ssh-keys or password for \n{0}".format(self))
except paramiko.PasswordRequiredException as error:
self.logger.error(error)
self.logger.error("Private Keys Not Set Up, Password Required.")
raise ConnectionError("SSH Key Error :\n {0}".format(self))
except socket.timeout as error:
self.logger.error(error)
raise ConnectionError("Paramiko is unable to connect to \n{0}".format(self))
except socket.error as error:
self.logger.error(error)
if 'Connection refused' in error:
raise ConnectionError("SSH Server Not responding: check setup:\n {0}".format(self))
raise ConnectionError("Problem with connection to:\n {0}".format(self))
return self._client
@property
def port(self):
"""
The TCP port
"""
if self._port is None:
self._port = 22
return self._port
@port.setter
def port(self, new_port):
"""
Sets the port (I tried putting this in the base but you can't split the setter and property definitions)
:param:
- `new_port`: integer port number
:raise: ConnectionError if can't cast to int
"""
if new_port is not None:
try:
self._port = int(new_port)
except ValueError as error:
self.logger.error(error)
raise ConnectionError("Unable to set port to : {0}".format(new_port))
else:
self._port = new_port
return
def exec_command(self, command, timeout=TIMEOUT):
"""
A pass-through to the SSHClient's exec_command.
:param:
- `command`: A string to send to the client.
- `timeout`: Set non-blocking timeout.
:rtype: tuple
:return: stdin, stdout, stderr
:raise: ConnectionError for paramiko or socket exceptions
"""
if not command.endswith(NEWLINE):
command += NEWLINE
try:
self.logger.debug("({0}) Sending to paramiko -- '{1}', timeout={2}".format(self,
command,
timeout))
return self.client.exec_command(command, timeout=timeout)
except socket.timeout:
self.logger.debug("socket timed out")
raise ConnectionError("Timed out -- Command: {0} Timeout: {1}".format(command,
timeout))
# this catches other socket errors so it should go after any other socket exceptions
except (socket.error, paramiko.SSHException, AttributeError) as error:
# the AttributeError is raised if no connection was actually made (probably the wrong IP address)
self._client = None
self.logger.error(error)
raise ConnectionError("Problem with connection to:\n {0}".format(self))
return
# end class SimpleClient
if __name__ == '__main__':
import pudb;pudb.set_trace()
client = SimpleClient('abc', 'def') | rsnakamura/theape | theape/parts/connections/simpleclient.py | Python | mit | 5,260 |
# Python - 3.6.0
def averages(arr):
if type(arr) != list:
return []
if len(arr) < 2:
return []
result = []
c = arr[0]
for i in arr[1:]:
result.append((c + i) / 2.0)
c = i
return result
| RevansChen/online-judge | Codewars/7kyu/averages-of-numbers/Python/solution1.py | Python | mit | 243 |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='behave2cucumber',
version='1.0.3',
description='Behave to Cucumber json converter',
long_description='This project helps solving the incompatibilty of Behave\'s genereated json reports '
'to tools using Cucumber json reports. '
'Its done by reformatting the Behave json to Cucumber json.',
url='https://github.com/behalf-oss/behave2cucumber',
author='Andrey Goldgamer, Zvika Messing',
author_email='andrey.goldgamer@behalf.com, zvika@behalf.com ',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='behave2cucumber setuptools development cucumber behave automation json',
packages=find_packages(),
install_requires=[],
extras_require={},
data_files=[],
entry_points={
'console_scripts': [
'behave2cucumber = behave2cucumber.behave2cucumber.__main__:main'
],
},
)
| behalf-oss/behave2cucumber | setup.py | Python | mit | 1,279 |
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import warnings
from salt.ext.tornado.escape import utf8
from salt.ext.tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from salt.ext.tornado.options import OptionParser
from salt.ext.tornado.test.util import unittest
from salt.ext.tornado.util import basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)")
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {
logging.ERROR: u"\u0001",
}
self.formatter._normal = u"\u0002"
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger('LogFormatterTest')
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, 'log.out')
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u"\u00e9".encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
def test_bytes_exception_logging(self):
try:
raise Exception(b'\xe9')
except Exception:
self.logger.exception('caught exception')
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegexpMatches(output, br'Exception.*\\xe9')
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br'\n', output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u"\u00e9")
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super(EnablePrettyLoggingTest, self).setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger('tornado.test.log_test.EnablePrettyLoggingTest')
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(f.read(), r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_log_file_with_timed_rotating(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + '/test_log'
self.options.log_rotate_mode = 'time'
enable_pretty_logging(options=self.options, logger=self.logger)
self.logger.error('hello')
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + '/test_log*')
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegexpMatches(
f.read(),
r'^\[E [^]]*\] hello$')
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + '/test_log*'):
os.unlink(filename)
os.rmdir(tmpdir)
def test_wrong_rotate_mode_value(self):
try:
self.options.log_file_prefix = 'some_path'
self.options.log_rotate_mode = 'wrong_mode'
self.assertRaises(ValueError, enable_pretty_logging,
options=self.options, logger=self.logger)
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = 'from salt.ext.tornado.options import options, parse_command_line'
LOG_INFO = 'import logging; logging.info("hello")'
program = ';'.join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, '-c', program] + (args or []),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, 'process failed: %r' % stdout)
return b'hello' in stdout
def test_default(self):
self.assertFalse(self.logs_present('pass'))
def test_tornado_default(self):
self.assertTrue(self.logs_present('parse_command_line()'))
def test_disable_command_line(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=none']))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present('parse_command_line()',
['--logging=None']))
def test_disable_code_string(self):
self.assertFalse(self.logs_present(
'options.logging = "none"; parse_command_line()'))
def test_disable_code_none(self):
self.assertFalse(self.logs_present(
'options.logging = None; parse_command_line()'))
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(self.logs_present(
'options.logging = None; parse_command_line()',
['--logging=info']))
| saltstack/salt | salt/ext/tornado/test/log_test.py | Python | apache-2.0 | 9,737 |
import geojson
import pony.orm as pny
from flask import Flask
from flask.ext.cache import Cache
from flask_restful import Resource, Api
import logging
from public_transport_analyser.database.database import Origin, Destination, init
from public_transport_analyser.visualiser.utils import get_voronoi_map
pta = Flask(__name__)
cache = Cache(pta, config={'CACHE_TYPE': 'simple'})
api = Api(pta)
logger = logging.getLogger('PTA.flask')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
@pta.route("/")
def index():
logger.info("home page")
return pta.send_static_file("origins.html")
@pta.route("/faq")
def faq():
logger.info("faq page")
return pta.send_static_file("faq.html")
class FetchAllOriginsVor(Resource):
@cache.cached(timeout=300)
def get(self):
logger = logging.getLogger('PTA.flask.get_all_origins_voronoi')
logger.info("Start")
# Get info from DB
retrycount = 3
for _ in range(retrycount):
try:
logger.info("Fetch from DB")
with pny.db_session:
origins = pny.select((
o.location,
pny.avg(t.duration for d in o.destinations for t in d.trips if
t.mode == "driving"),
pny.avg(t.duration for d in o.destinations for t in d.trips if
t.mode == "transit")
)
for o in Origin)[:]
logger.info("DB access went OK.")
break
except ValueError as ve:
properties = {"isOrigin": True,
"location": "error! reload page."}
f = geojson.Feature(geometry=geojson.Point((151.2, -33.9)), properties=properties)
logger.info("DB fetch failed, returning error point.")
return geojson.FeatureCollection([f, ])
except pny.core.RollbackException as re:
logger.error("Bad DB hit. Retrying:\n{}".format(re))
else:
logger.error("DB failed bigtime.")
# TODO: deal with this error
logger.info("Preparing GeoJSON with Voronoi")
opoints = [o[0].split(",")[::-1] for o in origins]
features = []
# Plot the origin map
try:
regions, vertices = get_voronoi_map(opoints)
for i, region in enumerate(regions):
try:
ratio = origins[i][1] / origins[i][2]
except Exception as e:
print(e)
ratio = -1
properties = {"isOPoly": True,
"ratio": ratio,
"location": origins[i][0]}
points = [(lon, lat) for lon, lat in vertices[region]] # TODO: do some rounding to save bandwidth
points.append(points[0]) # close off the polygon
features.append(geojson.Feature(geometry=geojson.Polygon([points]),
properties=properties, ))
except ValueError as ve:
logger.error("Voronoi function failed. Only sending destinations. Error: {}".format(ve))
logger.info("GeoJSON built.")
return geojson.FeatureCollection(features)
class FetchAllOrigins(Resource):
"""
Deprecated
"""
@cache.cached(timeout=300)
def get(self):
logger = logging.getLogger('PTA.flask.get_all_origins')
logger.info("Start")
# Get info from DB
retrycount = 3
for _ in range(retrycount):
try:
logger.info("Fetch from DB")
with pny.db_session:
origins = pny.select(o.location for o in Origin)[:]
logger.info("DB access went OK.")
break
except ValueError as ve:
properties = {"isOrigin": True,
"location": "error! reload page."}
f = geojson.Feature(geometry=geojson.Point((151.2, -33.9)), properties=properties)
logger.info("DB fetch failed, returning error point.")
return geojson.FeatureCollection([f, ])
except pny.core.RollbackException as re:
logger.error("Bad DB hit. Retrying:\n{}".format(re))
else:
logger.error("DB failed bigtime.")
# TODO: deal with this error
# Prepare GeoJSON
logger.info("Preparing GeoJSON")
features = []
for location in origins:
lat, lon = map(float, location.split(","))
properties = {"isOrigin": True,
"location": ",".join(map(str, (lat,lon)))}
features.append(geojson.Feature(geometry=geojson.Point((lon, lat)), properties=properties))
logger.info("GeoJSON built.")
return geojson.FeatureCollection(features)
class FetchOrigin(Resource):
def get(self, origin):
logger = logging.getLogger('PTA.flask.get_origin')
logger.info("Get origin: {}".format(origin))
destinations = []
# TODO: use prefetching: https://docs.ponyorm.com/queries.html#Query.prefetch
retrycount = 3
for _ in range(retrycount):
# Get info from DB
destinations = []
try:
logger.info("Fetch from DB")
with pny.db_session:
try:
o = Origin[origin]
except pny.ObjectNotFound:
# TODO: use response codes
logger.error("No such origin {}.".format(origin))
raise ValueError("No such origin.")
for d in o.destinations:
dlat, dlon = map(float, d.location.split(","))
driving = -1
transittimes = []
for t in d.trips:
if t.mode == "driving":
driving = t.duration
else:
transittimes.append(float(t.duration))
try:
transit = sum(transittimes) / len(transittimes)
except Exception as e:
transit = -1
logger.error("Unable to average origin {}. COmputer says:\n{}".format(o.location, e))
ratio = -1.0
if driving > 0 and transit > 0:
ratio = float(driving) / float(transit)
destinations.append((dlon, dlat, ratio))
logger.info("DB access went OK.")
break
except pny.core.RollbackException as re:
logger.error("Bad DB hit. Retrying:\n{}".format(re))
else:
logger.error("DB failed bigtime.")
# TODO: deal with this error
# Build GeoJSON features
# Plot the origin point
logger.info("Preparing GeoJSON")
features = []
olat, olon = map(float, origin.split(","))
properties = {"isOrigin": True,
"location": (olat, olon),
}
features.append(geojson.Feature(geometry=geojson.Point((olon, olat)), properties=properties))
logger.info("Preparing GeoJSON for destinations")
# Plot the destination points
for details in destinations:
dlon, dlat, ratio = details
#if ratio == -1:
# continue # Don't send bad data
properties = {"ratio": ratio,
"isDestination": True,
"location": (dlon, dlat)}
features.append(geojson.Feature(geometry=geojson.Point((dlon, dlat)), properties=properties))
logger.info("Preparing GeoJSON with Voronoi")
# Plot the destination map
try:
regions, vertices = get_voronoi_map(destinations)
for i, region in enumerate(regions):
ratio = destinations[i][2]
#if ratio == -1:
# continue
properties = {"isPolygon": True,
"ratio": ratio}
points = [(lon, lat) for lon, lat in vertices[region]] # TODO: do some rounding to save bandwidth
points.append(points[0]) # close off the polygon
features.append(geojson.Feature(geometry=geojson.Polygon([points]),
properties=properties, ))
except ValueError as ve:
logger.error("Voronoi function failed. Only sending destinations. Error: {}".format(ve))
logger.info("GeoJSON built.")
return geojson.FeatureCollection(features)
api.add_resource(FetchAllOrigins, '/api/pointorigins')
api.add_resource(FetchAllOriginsVor, '/api/origins')
api.add_resource(FetchOrigin, '/api/origin/<string:origin>')
init() # Start up the DB
if __name__ == "__main__":
pta.debug = True
pta.run(host='0.0.0.0')
| OlympusMonds/PTA | public_transport_analyser/rest_backend/main.py | Python | gpl-3.0 | 9,524 |
#!/usr/bin/env python
import telnetlib
import time
import sys
import socket
TEL_PORT = 23
TEL_TO = 3
def write_cmd(cmd, conn):
cmd = cmd.rstrip()
conn.write(cmd + '\n')
time.sleep(1)
return conn.read_very_eager()
def telnet_conn(ip, port, timeout):
try:
conn = telnetlib.Telnet(ip, port, timeout)
except socket.timeout:
sys.exit("connection timed out")
return conn
def login(user, passwd, conn):
output = conn.read_until("sername:", TEL_TO)
conn.write(user + '\n')
output += conn.read_until("assword:", TEL_TO)
conn.write(passwd + '\n')
return output
def main():
ip = '50.76.53.27'
user = 'pyclass'
passwd = '88newclass'
conn = telnet_conn(ip, TEL_PORT, TEL_TO)
login(user, passwd, conn)
hostname = write_cmd('show run | i hostname', conn)
hostname.lstrip('hostname ')
write_cmd('terminal length 0', conn)
out = write_cmd('show ver ', conn)
print out.rstrip('\n' + hostname + '#')
conn.close()
if __name__ == "__main__":
main()
| bluetiki/pylab | telnet.py | Python | bsd-2-clause | 1,063 |
# $Id: frontend.py 7339 2012-02-03 12:23:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`, `validate_boolean`,
`validate_threshold`, `validate_colon_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
* SettingSpec manipulation: `filter_settings_spec`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import optparse
from optparse import SUPPRESS_HELP
import docutils
import docutils.utils
import docutils.nodes
from docutils.error_reporting import locale_encoding, ErrorOutput, ErrorString
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup_error(value)
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
if isinstance(value, unicode):
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if isinstance(value, unicode):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
# convert to list:
if isinstance(value, unicode):
value = [value]
class_values = filter(None, [v.strip() for v in value.pop().split(',')])
# validate:
for class_value in class_values:
normalized = docutils.nodes.make_id(class_value)
if class_value != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (class_value, normalized))
value.extend(class_values)
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwdu() # type(base_path) == unicode
# to allow combining non-ASCII cwd with unicode values in `pathdict`
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
def filter_settings_spec(settings_spec, *exclude, **replace):
"""Return a copy of `settings_spec` excluding/replacing some settings.
`settings_spec` is a tuple of configuration settings with a structure
described for docutils.SettingsSpec.settings_spec.
Optional positional arguments are names of to-be-excluded settings.
Keyword arguments are option specification replacements.
(See the html4strict writer for an example.)
"""
settings = list(settings_spec)
# every third item is a sequence of option tuples
for i in range(2, len(settings), 3):
newopts = []
for opt_spec in settings[i]:
# opt_spec is ("<help>", [<option strings>], {<keyword args>})
opt_name = [opt_string[2:].replace('-', '_')
for opt_string in opt_spec[1]
if opt_string.startswith('--')
][0]
if opt_name in exclude:
continue
if opt_name in replace.keys():
newopts.append(replace[opt_name])
else:
newopts.append(opt_spec)
settings[i] = tuple(newopts)
return tuple(settings)
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value = getattr(self, setting)
if value:
value += other_dict[setting]
del other_dict[setting]
self._update_loose(other_dict)
def copy(self):
"""Return a shallow copy of `self`."""
return self.__class__(defaults=self.__dict__)
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
def process(self, opt, value, values, parser):
"""
Call the validator function on applicable settings and
evaluate the 'overrides' option.
Extends `optparse.Option.process`.
"""
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s'
% (opt, ErrorString(error))),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
standard_config_files = [
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
'~/.docutils'] # user-specific
"""Docutils configuration files, using ConfigParser syntax. Filenames
will be tilde-expanded later. Later files override earlier ones."""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
'0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
"""Lookup table for boolean configuration file settings."""
default_error_encoding = getattr(sys.stderr, 'encoding',
None) or locale_encoding or 'ascii'
default_error_encoding_error_handler = 'backslashreplace'
settings_spec = (
'General Docutils Options',
None,
(('Specify the document title as metadata.',
['--title'], {}),
('Include a "Generated by Docutils" credit and link.',
['--generator', '-g'], {'action': 'store_true',
'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link.',
['--source-link', '-s'], {'action': 'store_true',
'validator': validate_boolean}),
('Use <URL> for a source link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Link from section headers to TOC entries. (default)',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Link from section headers to the top of the TOC.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Link from footnotes/citations to references. (default)',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1,
'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Enable section numbering by Docutils. (default)',
['--section-numbering'],
{'action': 'store_true', 'dest': 'sectnum_xform',
'default': 1, 'validator': validate_boolean}),
('Disable section numbering by Docutils.',
['--no-section-numbering'],
{'action': 'store_false', 'dest': 'sectnum_xform'}),
('Remove comment elements from the document tree.',
['--strip-comments'],
{'action': 'store_true', 'validator': validate_boolean}),
('Leave comment elements in the document tree. (default)',
['--leave-comments'],
{'action': 'store_false', 'dest': 'strip_comments'}),
('Remove all elements with classes="<class>" from the document tree. '
'Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-elements-with-class'],
{'action': 'append', 'dest': 'strip_elements_with_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Remove all classes="<class>" attributes from elements in the '
'document tree. Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-class'],
{'action': 'append', 'dest': 'strip_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", '
'"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>',
'validator': validate_threshold}),
('Report all system messages. (Same as "--report=1".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Report no system messages. (Same as "--report=5".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Halt execution at system messages at or above <level>. '
'Levels as in --report. Default: 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>',
'validator': validate_threshold}),
('Halt at the slightest problem. Same as "--halt=info".',
['--strict'], {'action': 'store_const', 'const': 1,
'dest': 'halt_level'}),
('Enable a non-zero exit status for non-halting system messages at '
'or above <level>. Default: 5 (disabled).',
['--exit-status'], {'choices': threshold_choices,
'dest': 'exit_status_level',
'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Enable debug-level system messages and diagnostics.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Disable debug output. (default)',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when Docutils is halted.',
['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}),
('Disable Python tracebacks. (default)',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding and optionally the '
'error handler of input text. Default: <locale-dependent>:strict.',
['--input-encoding', '-i'],
{'metavar': '<name[:handler]>',
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for undecodable characters. '
'Choices: "strict" (default), "ignore", and "replace".',
['--input-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding and optionally the error handler for '
'output. Default: UTF-8:strict.',
['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}),
('Specify error handler for unencodable output characters; '
'"strict" (default), "ignore", "replace", '
'"xmlcharrefreplace", "backslashreplace".',
['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify text encoding and error handler for error output. '
'Default: %s:%s.'
% (default_error_encoding, default_error_encoding_error_handler),
['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': default_error_encoding,
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for unencodable characters in '
'error output. Default: %s.'
% default_error_encoding_error_handler,
['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}),
('Specify the language (as BCP 47 language tag). Default: en.',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Write output file dependencies to <file>.',
['--record-dependencies'],
{'metavar': '<file>', 'validator': validate_dependency_file,
'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Typically not useful for non-programmatical use:
(SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
(SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals',
'validator': validate_colon_separated_string_list}),
(SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None,
'_config_files': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
config_section = 'general'
version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)'
% (docutils.__version__, docutils.__version_details__,
sys.version.split()[0], sys.platform))
"""Default version message."""
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
self.lists = {}
"""Set of list-type settings."""
self.config_files = []
"""List of paths of applied configuration files."""
optparse.OptionParser.__init__(
self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components)
self.populate_from_components(self.components)
self.set_defaults_from_dict(defaults or {})
if read_config_files and not self.defaults['_disable_config']:
try:
config_settings = self.get_standard_config_settings()
except ValueError, error:
self.error(error)
self.set_defaults_from_dict(config_settings.__dict__)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def get_standard_config_files(self):
"""Return list of config files, from environment or standard."""
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()]
def get_standard_config_settings(self):
settings = Values()
for filename in self.get_standard_config_files():
settings.update(self.get_config_file_settings(filename), self)
return settings
def get_config_file_settings(self, config_file):
"""Returns a dictionary containing appropriate config file settings."""
parser = ConfigParser()
parser.read(config_file, self)
self.config_files.extend(parser._files)
base_path = os.path.dirname(config_file)
applied = {}
settings = Values()
for component in self.components:
if not component:
continue
for section in (tuple(component.config_section_dependencies or ())
+ (component.config_section,)):
if section in applied:
continue
applied[section] = 1
settings.update(parser.get_section(section), self)
make_paths_absolute(
settings.__dict__, self.relative_path_settings, base_path)
return settings.__dict__
def check_values(self, values, args):
"""Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings)
values._config_files = self.config_files
return values
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
def set_defaults_from_dict(self, defaults):
self.defaults.update(defaults)
def get_default_values(self):
"""Needed to get custom `Values` instances."""
defaults = Values(self.defaults)
defaults._config_files = self.config_files
return defaults
def get_option_by_dest(self, dest):
"""
Get an option by its dest.
If you're supplying a dest which is shared by several options,
it is undefined which option of those is returned.
A KeyError is raised if there is no option with the supplied
dest.
"""
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest)
class ConfigParser(CP.RawConfigParser):
old_settings = {
'pep_stylesheet': ('pep_html writer', 'stylesheet'),
'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
'pep_template': ('pep_html writer', 'template')}
"""{old setting: (new section, new setting)} mapping, used by
`handle_old_config`, to convert settings from the old [options] section."""
old_warning = """
The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""
not_utf8_error = """\
Unable to read configuration file "%s": content not encoded as UTF-8.
Skipping "%s" configuration file.
"""
def __init__(self, *args, **kwargs):
CP.RawConfigParser.__init__(self, *args, **kwargs)
self._files = []
"""List of paths of configuration files read."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def read(self, filenames, option_parser):
if type(filenames) in (str, unicode):
filenames = [filenames]
for filename in filenames:
try:
# Config files must be UTF-8-encoded:
fp = codecs.open(filename, 'r', 'utf-8')
except IOError:
continue
try:
if sys.version_info < (3,2):
CP.RawConfigParser.readfp(self, fp, filename)
else:
CP.RawConfigParser.read_file(self, fp, filename)
except UnicodeDecodeError:
self._stderr.write(self.not_utf8_error % (filename, filename))
fp.close()
continue
fp.close()
self._files.append(filename)
if self.has_section('options'):
self.handle_old_config(filename)
self.validate_settings(filename, option_parser)
def handle_old_config(self, filename):
warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
filename, 0)
options = self.get_section('options')
if not self.has_section('general'):
self.add_section('general')
for key, value in options.items():
if key in self.old_settings:
section, setting = self.old_settings[key]
if not self.has_section(section):
self.add_section(section)
else:
section = 'general'
setting = key
if not self.has_option(section, setting):
self.set(section, setting, value)
self.remove_section('options')
def validate_settings(self, filename, option_parser):
"""
Call the validator function and implement overrides on all applicable
settings.
"""
for section in self.sections():
for setting in self.options(section):
try:
option = option_parser.get_option_by_dest(setting)
except KeyError:
continue
if option.validator:
value = self.get(section, setting)
try:
new_value = option.validator(
setting, value, option_parser,
config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s\n'
' %s = %s'
% (filename, section, ErrorString(error),
setting, value)), None, sys.exc_info()[2])
self.set(section, setting, new_value)
if option.overrides:
self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option)
return section_dict
class ConfigDeprecationWarning(DeprecationWarning):
"""Warning for deprecated configuration file features."""
| ddd332/presto | presto-docs/target/sphinx/docutils/frontend.py | Python | apache-2.0 | 34,078 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SiteSettings.language_code'
db.add_column(u'begood_sites_sitesettings', 'language_code',
self.gf('django.db.models.fields.CharField')(default='sv', max_length=10),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SiteSettings.language_code'
db.delete_column(u'begood_sites_sitesettings', 'language_code')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'live_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['auth.Group']"}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'segment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sites': ('begood_sites.fields.MultiSiteField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sites': ('begood_sites.fields.MultiSiteField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'begood.template': {
'Meta': {'object_name': 'Template'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sites': ('begood_sites.fields.MultiSiteField', [], {'to': u"orm['sites.Site']", 'symmetrical': 'False'})
},
u'begood_sites.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'extra_html_head': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "'sv'", 'max_length': '10'}),
'site': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'settings'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['sites.Site']"}),
'template_404': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['begood.Template']"}),
'template_search': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['begood.Template']"})
},
u'begood_sites.versionsite': {
'Meta': {'unique_together': "(('revision', 'site'),)", 'object_name': 'VersionSite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versionsites'", 'to': u"orm['reversion.Revision']"}),
'site': ('begood_sites.fields.SingleSiteField', [], {'to': u"orm['sites.Site']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'reversion.revision': {
'Meta': {'object_name': 'Revision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manager_slug': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['begood_sites'] | AGoodId/begood-sites | begood_sites/migrations/0005_auto__add_field_sitesettings_language_code.py | Python | mit | 7,865 |
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.client_api import ClientApi # noqa: E501
from swagger_client.rest import ApiException
class TestClientApi(unittest.TestCase):
"""ClientApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.client_api.ClientApi() # noqa: E501
def tearDown(self):
pass
def test_client_add_arrival(self):
"""Test case for client_add_arrival
Add an arrival for a client. # noqa: E501
"""
pass
def test_client_add_client(self):
"""Test case for client_add_client
Add a client to a site. # noqa: E501
"""
pass
def test_client_add_contact_log(self):
"""Test case for client_add_contact_log
Add a contact log to a client's account. # noqa: E501
"""
pass
def test_client_get_active_client_memberships(self):
"""Test case for client_get_active_client_memberships
Get a client's active memberships. # noqa: E501
"""
pass
def test_client_get_client_account_balances(self):
"""Test case for client_get_client_account_balances
Get account balance information for one or more client(s). # noqa: E501
"""
pass
def test_client_get_client_contracts(self):
"""Test case for client_get_client_contracts
Get contracts that a client has purchased. # noqa: E501
"""
pass
def test_client_get_client_formula_notes(self):
"""Test case for client_get_client_formula_notes
Get a client's formula notes. # noqa: E501
"""
pass
def test_client_get_client_indexes(self):
"""Test case for client_get_client_indexes
Get a site's configured client indexes and client index values. # noqa: E501
"""
pass
def test_client_get_client_purchases(self):
"""Test case for client_get_client_purchases
Get a client's purchase history. # noqa: E501
"""
pass
def test_client_get_client_referral_types(self):
"""Test case for client_get_client_referral_types
Get a site's configured client referral types. # noqa: E501
"""
pass
def test_client_get_client_services(self):
"""Test case for client_get_client_services
Get pricing options that a client has purchased. # noqa: E501
"""
pass
def test_client_get_client_visits(self):
"""Test case for client_get_client_visits
Get a client's visit history. # noqa: E501
"""
pass
def test_client_get_clients(self):
"""Test case for client_get_clients
Get clients. # noqa: E501
"""
pass
def test_client_get_contact_logs(self):
"""Test case for client_get_contact_logs
Get contact logs on a client's account. # noqa: E501
"""
pass
def test_client_get_cross_regional_client_associations(self):
"""Test case for client_get_cross_regional_client_associations
Get a client's cross regional site associations. # noqa: E501
"""
pass
def test_client_get_custom_client_fields(self):
"""Test case for client_get_custom_client_fields
Get a site's configured custom client fields. # noqa: E501
"""
pass
def test_client_get_required_client_fields(self):
"""Test case for client_get_required_client_fields
Get client required fields for a site. # noqa: E501
"""
pass
def test_client_send_password_reset_email(self):
"""Test case for client_send_password_reset_email
Send a password reset email to a client. # noqa: E501
"""
pass
def test_client_update_client(self):
"""Test case for client_update_client
Update a client at a site. # noqa: E501
"""
pass
def test_client_update_client_service(self):
"""Test case for client_update_client_service
Update a client's purchase pricing option. # noqa: E501
"""
pass
def test_client_update_client_visit(self):
"""Test case for client_update_client_visit
Update a client's visit. # noqa: E501
"""
pass
def test_client_update_contact_log(self):
"""Test case for client_update_contact_log
Update a contact log on a client's account. # noqa: E501
"""
pass
def test_client_upload_client_document(self):
"""Test case for client_upload_client_document
Upload a document to a client's profile. # noqa: E501
"""
pass
def test_client_upload_client_photo(self):
"""Test case for client_upload_client_photo
Upload a profile photo to a client's profile. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| mindbody/API-Examples | SDKs/Python/test/test_client_api.py | Python | bsd-2-clause | 5,280 |
from django.template.defaultfilters import escapejs_filter
from django.test import SimpleTestCase
from django.utils.functional import lazy
from ..utils import setup
class EscapejsTests(SimpleTestCase):
@setup({'escapejs01': '{{ a|escapejs }}'})
def test_escapejs01(self):
output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
@setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'})
def test_escapejs02(self):
output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'})
self.assertEqual(output, 'testing\\u000D\\u000Ajavascript '
'\\u0027string\\u0022 \\u003Cb\\u003E'
'escaping\\u003C/b\\u003E')
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
escapejs_filter('"double quotes" and \'single quotes\''),
'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027',
)
def test_backslashes(self):
self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too')
def test_whitespace(self):
self.assertEqual(
escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),
'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008',
)
def test_script(self):
self.assertEqual(
escapejs_filter(r'<script>and this</script>'),
'\\u003Cscript\\u003Eand this\\u003C/script\\u003E',
)
def test_paragraph_separator(self):
self.assertEqual(
escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),
'paragraph separator:\\u2029and line separator:\\u2028',
)
def test_lazy_string(self):
append_script = lazy(lambda string: r'<script>this</script>' + string, str)
self.assertEqual(
escapejs_filter(append_script('whitespace: \r\n\t\v\f\b')),
'\\u003Cscript\\u003Ethis\\u003C/script\\u003E'
'whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'
)
| mjtamlyn/django | tests/template_tests/filter_tests/test_escapejs.py | Python | bsd-3-clause | 2,409 |
import click
from docker import Client
from docker.utils import kwargs_from_env
import json
import git
from tardis.utils import ok, error, warn
import yaml
import os
import collections
POSTGRES_DATA_MOUNT = '/var/lib/postgresql/data'
POSTGRES_USER = 'postgres'
POSTGRES_PASSWORD= 'postgres'
CONFIG_FILE = './tardis2.yml'
SESSION_FILE = '.{}.tardis.session'
@click.group()
def cli():
pass
def load_configuration():
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, 'rb') as fd:
return yaml.safe_load(fd)
else:
return dict()
def dump_to_session_data(travel_plan, data):
with open(SESSION_FILE.format(travel_plan), 'w') as f:
json.dump(data, f)
def load_session_data(travel_plan):
with open(SESSION_FILE.format(travel_plan), 'r') as f:
return json.load(f)
def create_docker_client():
return Client(base_url='unix://var/run/docker.sock')
@cli.command()
def configure():
"""
Configure your local Postgres Docker image
"""
config = load_configuration()
travel_plan_name = click.prompt('travel plan name')
if not config:
config = dict()
if not travel_plan_name in config:
config[travel_plan_name] = dict()
config[travel_plan_name] = dict()
image = click.prompt('docker image', default='postgres')
tag = click.prompt('docker image tag', default='latest')
config[travel_plan_name]['image'] = image
config[travel_plan_name]['tag'] = tag
config[travel_plan_name]['db_user'] = click.prompt('DB user', default=POSTGRES_USER)
config[travel_plan_name]['db_password'] = click.prompt('DB password', default=POSTGRES_PASSWORD)
config[travel_plan_name]['db_port'] = click.prompt('DB port', default=5432)
config[travel_plan_name]['data_share'] = click.prompt('data share between host and Docker container')
click.echo('pulling "{}:{}"...'.format(image,tag))
docker_client = create_docker_client()
docker_client.pull(repository=image, tag=tag)
ok('pulled "{}:{}"'.format(image,tag))
click.echo('saving travel plan "{}" to "{}"...'.format(travel_plan_name, CONFIG_FILE))
with open(CONFIG_FILE, 'w') as fd:
yaml.dump(config, fd, default_flow_style=False)
ok('saved travel plan "{}" to "{}"'.format(travel_plan_name, CONFIG_FILE))
# TODO error handling
@cli.command()
@click.option('--travel-plan', help='name of the travel plan configuration')
@click.option('--config_path', help='path to tardis config file', default='.')
def run(travel_plan, config_path, recent_checkpoint = None):
"""
Runs your local DB image according to tardis configuration
"""
config = load_configuration()
travel_plan_config = config[travel_plan]
docker_image = travel_plan_config['image'] + ':' + travel_plan_config['tag']
client = create_docker_client();
container = client.create_container(docker_image, environment = { 'POSTGRES_USER': travel_plan_config['db_user'],
'POSTGRES_PASSWORD': travel_plan_config['db_password'] })
container_id = container.get('Id')
dump_to_session_data(travel_plan,
{ 'container_id' : container_id,
'recent_checkpoint': recent_checkpoint })
response = client.start(container = container_id,
binds = {
travel_plan_config['data_share']:
{
'bind': POSTGRES_DATA_MOUNT,
'ro': False
}
},
port_bindings = { 5432: travel_plan_config['db_port'] } )
ok('started container "{}"'.format(container_id))
def is_git_directory(dir):
return git.repo.fun.is_git_dir(dir)
def is_dirty(data_share):
"""
Checks if the current reposity has untracked or changed files
"""
repo = git.repo.base.Repo(path=data_share)
return repo.is_dirty(untracked_files=True)
def init_git_repo_if_not_exists(path):
if is_git_directory(path):
click.echo('"{}" is already a GIT repo --> utilizing this repo'.format(path))
else:
git.repo.base.Repo.init(path=path)
ok('initialized GIT repo in "{}"'.format(path))
@cli.command()
@click.option('--travel-plan', help='name of the travel plan configuration')
@click.option('--checkpoint', help='name of the checkpoint representing the current DB state')
def save(travel_plan, checkpoint):
"""
Sets a checkpoint for the current DB state
"""
config = load_configuration()
data_share = config[travel_plan]['data_share']
init_git_repo_if_not_exists(data_share)
if is_dirty(data_share):
docker_client = create_docker_client()
session_data = load_session_data(travel_plan)
container_id = session_data['container_id']
try:
docker_client.pause(container_id)
ok('paused container "{}"'.format(container_id))
click.echo('repo has changed...')
git_cmd = git.Git(data_share)
git_cmd.add('--all', data_share)
git_cmd.commit(message=checkpoint)
git_cmd.tag('--annotate', checkpoint, message=checkpoint)
dump_to_session_data(travel_plan,
{ 'container_id': container_id,
'recent_checkpoint': checkpoint })
except Exception as e:
error(e)
finally:
docker_client.unpause(container_id)
ok('unpaused container "{}"'.format(container_id))
else:
warn('repo has not changed... -> no checkpoint was created')
@cli.command('travel-to')
@click.option('--travel-plan', help='name of the travel plan configuration')
@click.option('--checkpoint', help='name of the checkpoint representing the DB state you want to switch to')
@click.pass_context
def travel_to(ctx, travel_plan, checkpoint):
"""
Sets DB state back to state saved in the target checkpoint
"""
config = load_configuration()
data_share = config[travel_plan]['data_share']
ctx.invoke(stop, travel_plan=travel_plan)
git_cmd = git.Git(data_share)
git_cmd.checkout('--force','tags/{}'.format(checkpoint))
ok('travelled back to "{}"'.format(checkpoint))
# FIXME we need to reuse the same config path as we did in 'travis run'
ctx.invoke(run, travel_plan=travel_plan, recent_checkpoint=checkpoint)
@cli.command('travel-back')
@click.option('--travel-plan', help='name of the travel plan configuration')
@click.pass_context
def travel_back(ctx, travel_plan):
"""
Sets DB state back to the recent checkpoint
"""
session_data = load_session_data(travel_plan)
ctx.invoke(travel_to, travel_plan=travel_plan, checkpoint=session_data['recent_checkpoint'])
@cli.command()
@click.option('--travel-plan', help='name of the travel plan configuration')
def list(travel_plan):
"""
Lists all checkpoints
"""
config = load_configuration()
data_share = config[travel_plan]['data_share']
# TODO mark current checkpoint
repo = git.repo.base.Repo(path=data_share)
[print(tag) for tag in repo.tags]
@cli.command()
@click.option('--travel-plan', help='name of the travel plan configuration')
def stop(travel_plan):
session_data = load_session_data(travel_plan)
docker_client = create_docker_client()
container_id = session_data['container_id']
docker_client.stop(container_id)
ok('stopped container "{}"'.format(container_id))
def main():
cli()
if __name__ == '__main__':
main()
| codechimp/tardis | tardis/cli.py | Python | mit | 7,795 |
"""Different kinds of SAX Exceptions"""
import sys
if sys.platform[:4] == "java":
from java.lang import Exception
del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to throw
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to throw the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is thrown, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
return "%s:%d:%d: %s" % (sysid, self.getLineNumber(),
self.getColumnNumber(), self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
| trivoldus28/pulsarch-verilog | tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/xml/sax/_exceptions.py | Python | gpl-2.0 | 4,662 |
def main(request, response):
headers = [(b"Content-Type", b"text/plain")]
command = request.GET.first(b"cmd").lower()
test_id = request.GET.first(b"id")
header = request.GET.first(b"header")
if command == b"put":
request.server.stash.put(test_id, request.headers.get(header, b""))
elif command == b"get":
stashed_header = request.server.stash.take(test_id)
if stashed_header is not None:
headers.append((b"x-request-" + header, stashed_header))
else:
response.set_error(400, u"Bad Command")
return u"ERROR: Bad Command!"
return headers, u""
| scheib/chromium | third_party/blink/web_tests/external/wpt/beacon/resources/inspect-header.py | Python | bsd-3-clause | 628 |
# -*- coding: utf-8 -*-
import ast
import re
from collections import defaultdict
from .utils import GenericStack
from .utils import LeveledStack
from .utils import ScopeStack
from .utils import to_camel_case
from .utils import normalize
from . import ast as ecma_ast
class TranslateVisitor(ast.NodeVisitor):
def __init__(self, module_as_closure=False, auto_camelcase=False, debug=True):
super().__init__()
self.level_stack = LeveledStack()
self.bin_op_stack = GenericStack()
self.scope = ScopeStack()
self.references = defaultdict(lambda: 0)
self.indentation = 0
self.meta_debug = debug
self.meta_auto_camelcase = auto_camelcase
self.meta_module_as_closure = module_as_closure
self.meta_global_object = None
self.meta_global_new = None
def print(self, *args, **kwargs):
if self.meta_debug:
prefix = " " * self.indentation
print(prefix, *args, **kwargs)
def translate(self, tree):
return self.visit(tree, root=True)
def process_idf(self, identifier):
if self.meta_auto_camelcase:
identifier.value = to_camel_case(identifier.value)
return identifier
def visit(self, node, root=False):
self.level_stack.inc_level()
self.print("enter:", node)
if isinstance(node, (ast.Module, ast.FunctionDef)):
self.scope.new_scope()
self.indentation += 1
super().visit(node)
self.indentation -= 1
js_node = self._translate_node(node, self.level_stack.get_value())
self.print("childs:", self.level_stack.get_value())
self.print("exit:", node)
self.level_stack.dec_level()
if js_node is not None:
self.level_stack.append(js_node)
return js_node
# Special visit fields
def visit_BinOp(self, node):
self.bin_op_stack.push(node)
self.generic_visit(node)
self.bin_op_stack.pop()
def visit_BoolOp(self, node):
self.bin_op_stack.push(node)
self.generic_visit(node)
self.bin_op_stack.pop()
# Compile methods
def _translate_node(self, node, childs):
name = node.__class__.__name__
fn = getattr(self, "_translate_{}".format(name), None)
if fn:
return fn(node, childs)
# Specific compile methods
def _translate_UnaryOp(self, node, childs):
operator = ""
if type(node.op) == ast.USub:
operator = "-"
elif type(node.op) == ast.UAdd:
operator = "+"
elif type(node.op) == ast.Not:
operator = "!"
elif type(node.op) == ast.Invert:
operator = "~"
return ecma_ast.UnaryOp(operator, childs[0], postfix=False)
def _translate_BinOp(self, node, childs):
if type(node.op) == ast.Pow:
da = ecma_ast.DotAccessor(ecma_ast.Identifier("Math"),
ecma_ast.Identifier("pow"))
n = ecma_ast.FunctionCall(da, [childs[0], childs[1]])
elif type(node.op) == ast.FloorDiv:
da = ecma_ast.DotAccessor(ecma_ast.Identifier("Math"),
ecma_ast.Identifier("floor"))
op = ecma_ast.BinOp("/", childs[0], childs[1])
n = ecma_ast.FunctionCall(da, [op])
elif type(node.op) == ast.BitOr:
n = ecma_ast.BinOp("|", childs[0], childs[1])
elif type(node.op) == ast.BitAnd:
n = ecma_ast.BinOp("&", childs[0], childs[1])
elif type(node.op) == ast.BitXor:
n = ecma_ast.BinOp("^", childs[0], childs[1])
elif type(node.op) == ast.LShift:
n = ecma_ast.BinOp("<<", childs[0], childs[1])
elif type(node.op) == ast.RShift:
n = ecma_ast.BinOp(">>", childs[0], childs[1])
else:
n = ecma_ast.BinOp(childs[1], childs[0], childs[2])
if not self.bin_op_stack.is_empty():
n._parens = True
return n
def _translate_BoolOp(self, node, childs):
binop = ecma_ast.BinOp(childs[0], childs[1], childs[2])
if not self.bin_op_stack.is_empty():
binop._parens = True
return binop
def _translate_Num(self, node, childs):
return ecma_ast.Number(str(node.n))
def _translate_Add(self, node, childs):
return "+"
def _translate_Mult(self, node, childs):
return "*"
def _translate_Sub(self, node, childs):
return "-"
def _translate_Div(self, node, childs):
return "/"
def _translate_Mod(self, node, childs):
return "%"
def _translate_Is(self, node, childs):
return "==="
def _translate_Eq(self, node, childs):
return "==="
def _translate_NotEq(self, node, childs):
return "!=="
def _translate_Lt(self, node, childs):
return "<"
def _translate_LtE(self, node, childs):
return "<="
def _translate_Gt(self, node, childs):
return ">"
def _translate_GtE(self, node, childs):
return ">="
def _translate_And(self, node, childs):
return "&&"
def _translate_Or(self, node, childs):
return "||"
def _translate_Delete(self, node, childs):
deletes = []
for child in childs:
if child.value not in self.scope:
self.scope.set(child.value, child)
deletes.append(ecma_ast.ExprStatement(ecma_ast.UnaryOp('delete', child)))
return ecma_ast.SetOfNodes(deletes)
def _translate_Return(self, node, childs):
if childs:
return ecma_ast.Return(childs[0])
return ecma_ast.Return()
def _create_scope_var_statement(self, root=False):
scope_identifiers = self.scope.get_scope_identifiers(root=root)
if len(scope_identifiers) == 0:
return None
scope_var_decls = list(map(lambda x: ecma_ast.VarDecl(x), scope_identifiers))
return ecma_ast.VarStatement(scope_var_decls)
def _translate_FunctionDef(self, node, childs):
scope_var_statement = self._create_scope_var_statement()
if node.decorator_list:
body_stmts = childs[1:-len(node.decorator_list)]
decorators = childs[-len(node.decorator_list):]
else:
body_stmts = childs[1:]
decorators = []
# Add scope var statement only if any var is defined
if scope_var_statement:
body_stmts = [scope_var_statement] + body_stmts
identifier = self.process_idf(ecma_ast.Identifier(node.name))
func_expr = ecma_ast.FuncExpr(None, childs[0], body_stmts)
var_decl = ecma_ast.VarDecl(identifier, func_expr)
# Drop inner scope (temporary is unused)
self.scope.drop_scope()
if node.name not in self.scope:
self.scope.set(node.name, identifier)
expr_stmt = ecma_ast.ExprStatement(var_decl)
# Add fast link to func expression
# Usefull for class translations
expr_stmt._func_expr = func_expr
expr_stmt._func_expr._identifier = identifier
decoration_statements = []
for decorator in decorators:
fcall_expr = ecma_ast.FunctionCall(decorator, [identifier])
assign_expr = ecma_ast.Assign("=", identifier, fcall_expr)
decoration_statements.append(ecma_ast.ExprStatement(assign_expr))
if decoration_statements:
reversed_decoration_statements = list(reversed(decoration_statements))
return ecma_ast.SetOfNodes([expr_stmt] + reversed_decoration_statements)
return expr_stmt
def _translate_Lambda(self, node, childs):
exprs = map(ecma_ast.ExprStatement, childs[1:])
func_expr = ecma_ast.FuncExpr(None, childs[0], list(exprs))
return func_expr
def _translate_Module(self, node, childs):
body_stmts = childs
if self.meta_global_object:
global_idf = self.process_idf(ecma_ast.Identifier(self.meta_global_object))
self.scope.set(self.meta_global_object, global_idf, special_form=True)
global_assign = ecma_ast.Assign("=", global_idf, ecma_ast.Identifier("this"))
global_stmt = ecma_ast.ExprStatement(global_assign)
body_stmts = [global_stmt] + body_stmts
if self.meta_global_new:
new_idf = self.process_idf(ecma_ast.Identifier(self.meta_global_new))
self.scope.set(self.meta_global_new, new_idf, special_form=True)
raw_new_js = """
function() {
var ___args_array = Array.apply(null, arguments);
var ___clazz = ___args_array.slice(0, 1)[0];
return new (___clazz.bind.apply(___clazz, ___args_array))();
}"""
normalized_new_js = "".join(normalize(raw_new_js).split("\n"))
normalized_new_js = re.sub(r"\s+", " ", normalized_new_js)
new_assign = ecma_ast.Assign("=", new_idf, ecma_ast.String(normalized_new_js))
new_stmt = ecma_ast.ExprStatement(new_assign)
body_stmts = [new_stmt] + body_stmts
scope_var_statement = self._create_scope_var_statement(root=True)
self.scope.drop_scope()
if scope_var_statement:
body_stmts = [scope_var_statement] + body_stmts
if self.meta_module_as_closure:
container_func_expr = ecma_ast.FuncExpr(None, None, body_stmts)
container_func_expr._parens = True
dotaccessor_func_expr = ecma_ast.DotAccessor(container_func_expr,
ecma_ast.Identifier("call"))
main_function_call = ecma_ast.FunctionCall(dotaccessor_func_expr, [ecma_ast.This()])
main_expr = ecma_ast.ExprStatement(main_function_call)
return ecma_ast.Program([main_expr])
return ecma_ast.Program(body_stmts)
def _translate_Import(self, node, childs):
for child in childs:
if child["name"] == "_global":
self.meta_global_object = child["asname"] or child["name"]
elif child["name"] == "_new":
self.meta_global_new = child["asname"] or child["name"]
return None
def _translate_alias(self, node, childs):
return node.__dict__
def _translate_Expr(self, node, childs):
return ecma_ast.ExprStatement(childs[0])
def _translate_arguments(self, node, childs):
return childs
def _translate_Break(self, node, childs):
return ecma_ast.Break()
def _translate_Continue(self, node, childs):
return ecma_ast.Continue()
def _translate_Name(self, node, childs):
if node.id == "None":
return ecma_ast.Null(node.id)
elif node.id == "True":
return ecma_ast.Boolean("true")
elif node.id == "False":
return ecma_ast.Boolean("false")
name = node.id
return self.process_idf(ecma_ast.Identifier(name))
def _translate_arg(self, node, childs):
return self.process_idf(ecma_ast.Identifier(node.arg))
def _translate_Str(self, node, childs):
return ecma_ast.String('"{}"'.format(node.s))
def _translate_Call(self, node, childs):
if isinstance(node.func, ast.Name):
fcall = ecma_ast.FunctionCall(childs[0], childs[1:])
return fcall
else:
dotaccessor = childs[0]
arguments = list(filter(bool, childs[1:]))
function_call = ecma_ast.FunctionCall(dotaccessor, arguments)
return function_call
def _translate_Attribute(self, node, childs):
variable_identifier = childs[0]
attribute_access_identifier = self.process_idf(ecma_ast.Identifier(node.attr))
dotaccessor = ecma_ast.DotAccessor(variable_identifier, attribute_access_identifier)
return dotaccessor
def _translate_AugAssign(self, node, childs):
target = childs[0]
assign_decl = None
# FIXME: should be used issubclass instead of type
if type(node.op) == ast.Pow or type(node.op) == ast.FloorDiv:
if type(node.op) == ast.Pow:
da = ecma_ast.DotAccessor(ecma_ast.Identifier("Math"),
ecma_ast.Identifier("pow"))
n = ecma_ast.FunctionCall(da, [childs[0], childs[1]])
elif type(node.op) == ast.FloorDiv:
op = ecma_ast.BinOp("/", childs[0], childs[1])
da = ecma_ast.DotAccessor(ecma_ast.Identifier("Math"),
ecma_ast.Identifier("floor"))
n = ecma_ast.FunctionCall(da, [op])
assign_decl = ecma_ast.Assign("=", target, n)
else:
op, value = childs[1], childs[2]
if isinstance(target, ecma_ast.Identifier):
if target.value not in self.scope:
self.scope.set(target.value, target)
if assign_decl is None:
assign_decl = ecma_ast.Assign(op + "=", target, value)
else:
assign_decl = ecma_ast.Assign(op + "=", target, assign_decl)
return ecma_ast.ExprStatement(assign_decl)
def _translate_Assign(self, node, childs):
identifiers = childs[:-1]
value = childs[-1]
main_assign_decl = None
extra_exprs = []
for target in reversed(identifiers):
if isinstance(target, ecma_ast.Identifier):
if target.value not in self.scope:
self.scope.set(target.value, target)
# Multiple assignation
if isinstance(target, ecma_ast.Array) and isinstance(value, ecma_ast.Array):
# Mock array target with identifier
new_target = self.get_unique_identifier("_ref")
# Create additional assing expresion for each item of array/tuple.
for i, item in enumerate(target):
_ba = ecma_ast.BracketAccessor(new_target, ecma_ast.Number(str(i)))
_as = ecma_ast.Assign("=", item, _ba)
extra_exprs.append(ecma_ast.ExprStatement(_as))
# Substitute a real target with autogenerated new identifier.
target = new_target
if main_assign_decl is None:
main_assign_decl = ecma_ast.Assign("=", target, value)
else:
main_assign_decl = ecma_ast.Assign("=", target, main_assign_decl)
main_expr = ecma_ast.ExprStatement(main_assign_decl)
if len(extra_exprs) == 0:
return main_expr
return ecma_ast.SetOfNodes([main_expr] + extra_exprs)
def _translate_Index(self, node, childs):
return childs[0]
def _translate_Subscript(self, node, childs):
node_identifier = childs[0]
if hasattr(node.slice, 'lower') or hasattr(node.slice, 'upper') or hasattr(node.slice, 'step'):
if hasattr(node.slice, 'step') and node.slice.step:
raise NotImplementedError(":D")
slice_values = []
if hasattr(node.slice, 'lower') and node.slice.lower:
slice_values.append(self.translate(node.slice.lower))
else:
slice_values.append(ecma_ast.Number("0"))
if hasattr(node.slice, 'upper') and node.slice.upper:
slice_values.append(self.translate(node.slice.upper))
da = ecma_ast.DotAccessor(node_identifier, ecma_ast.Identifier("slice"))
return ecma_ast.FunctionCall(da, slice_values)
else:
expr_identifier = childs[1]
# FIXME: convert to warning.
# if node_identifier.value not in self.scope:
# raise RuntimeError("undefined variable {} at line {}".format(node_identifier.value,
# node.lineno))
return ecma_ast.BracketAccessor(node_identifier, expr_identifier)
def _translate_List(self, node, childs):
return ecma_ast.Array(childs)
def _translate_Tuple(self, node, childs):
return ecma_ast.Array(childs)
def _translate_Dict(self, node, childs):
properties = []
msize = int(len(childs)/2)
keys = childs[:msize]
values = childs[msize:]
for key, value in zip(keys, values):
identifier = self.process_idf(ecma_ast.Identifier(key.value))
assign_instance = ecma_ast.Assign(":", identifier, value)
properties.append(assign_instance)
return ecma_ast.Object(properties)
def _translate_If(self, node, childs):
predicate = childs[0]
# consecuent
consequent_blocks_size = len(node.body)
consequent_blocks = childs[1:consequent_blocks_size+1]
consequent = ecma_ast.Block(consequent_blocks)
# alternative
alternative_blocks_size = len(node.orelse)
alternative_blocks = childs[consequent_blocks_size+1:]
if alternative_blocks_size > 0:
if alternative_blocks_size == 1 and isinstance(alternative_blocks[0], ecma_ast.If):
alternative = alternative_blocks[0]
else:
alternative = ecma_ast.Block(alternative_blocks)
else:
alternative = None
ifnode = ecma_ast.If(predicate, consequent, alternative)
return ifnode
def _translate_Compare(self, node, childs):
binop = ecma_ast.BinOp(childs[1], childs[0], childs[2])
# if not self.bin_op_stack.is_empty():
# n._parens = True
return binop
def get_unique_identifier(self, prefix="ref"):
for i in range(100000000):
candidate = "{}_{}".format(prefix, i)
if candidate not in self.scope:
identifier = self.process_idf(ecma_ast.Identifier(candidate))
self.scope.set(candidate, identifier)
return identifier
def _translate_While(self, node, childs):
predicate = childs[0]
# consecuent
body_blocks_size = len(node.body)
body = childs[1:body_blocks_size+1]
else_body = None
if node.orelse:
else_condition_idf = self.get_unique_identifier()
else_body = childs[body_blocks_size+1:]
if else_body is None:
return ecma_ast.While(predicate, ecma_ast.Block(body))
# FIXME: this seems inconsistent :S
initialize_assign = ecma_ast.Assign("=", else_condition_idf,
ecma_ast.Boolean("true"))
initialize_condition = ecma_ast.ExprStatement(initialize_assign)
while_body = ecma_ast.Block([
ecma_ast.ExprStatement(ecma_ast.Assign("=", else_condition_idf, ecma_ast.Boolean("false")))
] + body)
while_sentence = ecma_ast.While(predicate, while_body)
else_sentence = ecma_ast.If(else_condition_idf, ecma_ast.Block(else_body))
return ecma_ast.SetOfNodes([initialize_condition, while_sentence, else_sentence])
def _translate_ListComp(self, node, childs):
if len(node.generators) != 1:
raise RuntimeError("Only implemented 1 generator per comprehension")
generator = node.generators[0]
values = generator.iter
target = generator.target
expresion = childs
ifs = generator.ifs
counter_idf = self.get_unique_identifier("_i")
len_idf = self.get_unique_identifier("_len")
values_idf = self.get_unique_identifier("_values")
results_idf = self.get_unique_identifier("_results")
counter_var_decl = ecma_ast.VarDecl(counter_idf)
len_var_decl = ecma_ast.VarDecl(len_idf)
values_var_decl = ecma_ast.VarDecl(values_idf)
results_var_decl = ecma_ast.VarDecl(results_idf)
var_stmt = ecma_ast.VarStatement([counter_var_decl, len_var_decl, values_var_decl, results_var_decl])
initialize_values = ecma_ast.ExprStatement(ecma_ast.Assign("=", values_idf, self.translate(values)))
initialize_results = ecma_ast.ExprStatement(ecma_ast.Assign("=", results_idf, ecma_ast.Array([])))
# For init
init = ecma_ast.Comma(
ecma_ast.Assign("=", counter_idf, ecma_ast.Number("0")),
ecma_ast.Assign("=", len_idf, ecma_ast.DotAccessor(values_idf, ecma_ast.Identifier("length")))
)
# For condition
cond = ecma_ast.BinOp("<", counter_idf, len_idf)
# For count
count = ecma_ast.UnaryOp("++", counter_idf, postfix=True)
push_on_results = ecma_ast.FunctionCall(
ecma_ast.DotAccessor(results_idf, ecma_ast.Identifier("push")),
ecma_ast.ExprStatement(ecma_ast.BracketAccessor(values_idf, counter_idf))
)
if ifs:
composed_condition = None
for comprehension_cond in ifs:
if composed_condition is None:
composed_condition = self.translate(comprehension_cond)
else:
composed_condition = ecma_ast.BinOp("&&", composed_condition, self.translate(comprehension_cond))
for_loop_block = ecma_ast.Block([ecma_ast.If(composed_condition, ecma_ast.Block([push_on_results]))])
else:
for_loop_block = ecma_ast.Block([push_on_results])
for_stmt = ecma_ast.For(init, cond, count, for_loop_block)
return_results = ecma_ast.Return(results_idf)
func_block = ecma_ast.Block([var_stmt, initialize_values, initialize_results, for_stmt, return_results])
func_expr = ecma_ast.FuncExpr(None, None, func_block)
func_expr._parens = True
listcomp_stmt = ecma_ast.FunctionCall(func_expr)
return listcomp_stmt
def _translate_For(self, node, childs):
counter_idf = self.get_unique_identifier()
iterable_idf = self.get_unique_identifier()
item_idf = childs[0]
iterable = childs[1]
main_body_expr = childs[2]
iterable_var_decl = ecma_ast.VarDecl(iterable_idf, iterable)
iterable_var_stmt = ecma_ast.VarStatement(iterable_var_decl)
# For condition
cond_right_stmt = ecma_ast.DotAccessor(iterable_idf, ecma_ast.Identifier("length"))
cond = ecma_ast.BinOp("<", counter_idf, cond_right_stmt)
# For count
count = ecma_ast.UnaryOp("++", counter_idf, postfix=True)
# For init
init_first = ecma_ast.Assign("=", counter_idf, ecma_ast.Number("0"))
init_second = ecma_ast.Assign("=", iterable_idf, iterable)
init = ecma_ast.Comma(init_first, init_second)
# For body
accesor = ecma_ast.BracketAccessor(iterable_idf, counter_idf)
item_body_stmt = ecma_ast.ExprStatement(
ecma_ast.Assign("=", item_idf, accesor))
if item_idf.value not in self.scope:
self.scope.set(item_idf.value, item_idf)
body_block = ecma_ast.Block([item_body_stmt, main_body_expr])
# For
for_stmt = ecma_ast.For(init, cond, count, body_block)
return for_stmt
def _translate_Raise(self, node, childs):
return ecma_ast.Throw(childs[0])
def _translate_Try(self, node, childs):
fin_stmts = []
finally_node = None
if len(node.finalbody) > 0:
fin_stmts = childs[-len(node.finalbody):]
childs = childs[:-len(node.finalbody)]
catches_stmts = list(filter(lambda x: isinstance(x, ecma_ast.Catch), childs))
try_stmts = list(filter(lambda x: isinstance(x, ecma_ast.ExprStatement), childs))
catch_stmt = len(catches_stmts) > 0 and catches_stmts[0] or None
if len(fin_stmts) > 0:
fin_block = ecma_ast.Block(fin_stmts)
finally_node = ecma_ast.Finally(fin_block)
try_block = ecma_ast.Block(try_stmts)
try_node = ecma_ast.Try(try_block, catch=catch_stmt, fin=finally_node)
return try_node
def _translate_ExceptHandler(self, node, childs):
expr_stmts = list(filter(lambda x: isinstance(x, ecma_ast.ExprStatement), childs))
identifier = self.process_idf(ecma_ast.Identifier(node.name))
# identifiers = list(filter(lambda x: isinstance(x, ecma_ast.Identifier), childs))
block_stmt = ecma_ast.Block(expr_stmts)
return ecma_ast.Catch(identifier, block_stmt)
def _translate_ClassDef(self, node, childs):
functions = list(map(lambda x: x._func_expr,
filter(lambda x: hasattr(x, "_func_expr"), childs)))
childs = list(filter(lambda x: not hasattr(x, "_func_expr"), childs))
# Constructor
constructor_func_expr = None
for fn in functions:
if fn._identifier.value == "__init__":
constructor_func_expr = fn
self.scope.unset("__init__")
break
if constructor_func_expr is None:
constructor_func_expr = ecma_ast.FuncExpr(None, None, None)
else:
functions = list(filter(lambda x: x is not constructor_func_expr, functions))
self.scope.new_scope()
inner_class_idf = self.get_unique_identifier("classref")
assign_expr = ecma_ast.Assign("=", inner_class_idf, constructor_func_expr)
constructor_expr = ecma_ast.ExprStatement(assign_expr)
body_stmts = [constructor_expr]
# Functions definition
for fn in functions:
fn_dt_prototype = ecma_ast.DotAccessor(inner_class_idf,
ecma_ast.Identifier("prototype"))
fn_dt_attr = ecma_ast.DotAccessor(fn_dt_prototype, fn._identifier)
fn_assign_expr = ecma_ast.Assign("=", fn_dt_attr, fn)
fn_expr = ecma_ast.ExprStatement(fn_assign_expr)
body_stmts.append(fn_expr)
body_stmts.append(ecma_ast.Return(inner_class_idf))
# Class closure
# Contains all class definition
scope_var_statement = self._create_scope_var_statement()
main_container_func = ecma_ast.FuncExpr(None, None, [scope_var_statement] + body_stmts)
main_container_func._parens = True
main_function_call = ecma_ast.FunctionCall(main_container_func)
main_identifier = self.process_idf(ecma_ast.Identifier(node.name))
main_assign = ecma_ast.Assign("=", main_identifier, main_function_call)
main_expr = ecma_ast.ExprStatement(main_assign)
self.scope.drop_scope()
if node.name not in self.scope:
self.scope.set(node.name, main_identifier)
return main_expr
| niwinz/cobrascript | cobra/translator.py | Python | bsd-3-clause | 26,780 |
# -*- coding: utf-8 -*-
from opus_api.util import jsonify, parse_num_tokens, minint, maxint
from opus_api.cache import jcache
import opus_api.crawler as crawler
import opus_api.settings as settings
import requests
import json
import bs4
from opus_api.exceptions import InvalidSrcException, InvalidTrgException, InvalidFormException
"""
Main module.
Provides OPUS queries
"""
def checkLangs(src, target):
"""
Ensure src and target are both valid,
raise specific exceptions in case either are not.
"""
valid_langs = json.loads(lang_map())
if src not in valid_langs:
raise InvalidSrcException(src)
if target not in valid_langs:
raise InvalidTrgException(target)
def checkForm(form):
"""
Ensure form is valid
"""
if form not in {'moses', 'tmx'}:
raise InvalidFormException(form)
@jcache
def get(src, target, minimum=minint(), maximum=maxint(), form='moses'):
"""
Get corpora for src-target (default format: MOSES)
"""
checkLangs(src, target)
checkForm(form)
html = crawler.get(src, target)
crawl_soup = bs4.BeautifulSoup(html, 'html.parser')
counts = crawl_soup.find('div', {'class': 'counts'})
corpora = []
link_id = 1
moses_links = counts.find_all('a', text=form)
for link in moses_links:
row = link.parent.parent.contents
name = row[0].text
url = settings.site_url + link['href']
src_tokens = row[3].text
trg_tokens = row[4].text
src_tokens_f = parse_num_tokens(src_tokens)
trg_tokens_f = parse_num_tokens(trg_tokens)
total_s = src_tokens_f + trg_tokens_f
if (total_s > minimum
and (total_s < maximum or maximum == maxint())):
corpora.append(
{
'name': name,
'id': link_id,
'url': url,
'src_tokens': src_tokens,
'trg_tokens': trg_tokens
})
link_id += 1
corpora = jsonify({'corpora': corpora})
return corpora
@jcache
def lang_map():
"""
Get name-id mapping of available languages
"""
lang_list = json.loads(langs())
name_id_map = {}
for lang in lang_list:
name_id_map[lang['name']] = lang['id']
return jsonify(name_id_map)
@jcache
def langs():
"""
Get list of both src and target languages
"""
html = requests.get(settings.site_url).content
crawl_soup = bs4.BeautifulSoup(html, 'html.parser')
langs = []
lang_id = 0
tags = crawl_soup.find('select').find_all('option')
for tag in tags:
name = tag['value']
langs.append({
'name': name,
'id': lang_id,
'description': tag.text
})
lang_id += 1
langs.pop(0)
langs = jsonify(langs)
return langs
| yonkornilov/opus-api | opus_api/opus_api.py | Python | mit | 2,872 |
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
for i in range(0, l):
cur = nums[i]
while cur >= 1 and cur <= l and nums[cur - 1] != cur:
tmp = nums[cur - 1]
nums[cur - 1] = cur
cur = tmp
for i in range(0, l):
if nums[i] != i + 1:
return i + 1
return l + 1
| hawkphantomnet/leetcode | FirstMissingPositive/Solution.py | Python | mit | 495 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import collections
import pytz
from datetime import timedelta
from pytz import timezone
import os
class CityInfo():
def __init__(
self,
dayStations,
hourStations,
label,
name,
hourlyDataTimeOffset,
timezone,
#*,
skipOb=None,
skipDailyFields=None,
weatherStatsSite=None,
stationName=None,
airportCode=None,
skipMetar=False,
):
self.dayStations = dayStations
self.hourStations = hourStations
self.label = label
self.name = name
self.airportCode = airportCode
self.skipMetar = skipMetar
if skipOb == None:
skipOb = []
self.skipOb=skipOb
if skipDailyFields == None:
skipDailyFields = []
self.skipDailyFields=skipDailyFields
self.weatherStatsSite = weatherStatsSite
self.stationName = stationName
self.hourlyDataTimeOffset = hourlyDataTimeOffset
self.timezone = timezone
def weatherStatsSite(cityName):
if city[cityName].weatherStatsSite != None:
return city[cityName].weatherStatsSite
return cityName
city = {
'barrie': CityInfo(
dayStations = collections.OrderedDict( [
( 42183, (2003,2017) ), # Barrie-Oro
( 4408, (1968,2003) ), # Barrie WPCC
] ),
hourStations = collections.OrderedDict( [
( 42183, (2003,2017) ),
] ),
label = 'on-151',
name = 'Barrie',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
skipDailyFields=['TOTAL_RAIN_MM', 'TOTAL_SNOW_CM'],
skipOb = ['visibility'],
stationName="Lake Simcoe Regional Airport",
),
'calgary': CityInfo(
dayStations = collections.OrderedDict( [
( 50430, (2012,2017) ),
( 2205, (1881,2012) ),
] ),
hourStations = collections.OrderedDict( [
( 50430, (2012,2017) ),
( 2205, (1953,2012) ),
] ),
label = 'ab-52',
name = 'Calgary',
hourlyDataTimeOffset=timedelta(hours=7),
timezone=timezone('America/Edmonton'),
stationName="Calgary Int'l Airport",
airportCode='YYC',
),
'charlottetown': CityInfo(
dayStations = collections.OrderedDict( [
( 50621, (2012,2017) ),
( 6526, (1943,2012) ),
] ),
hourStations = collections.OrderedDict( [
( 50621, (2012,2017) ),
( 6526, (1953,2012) ),
] ),
label = 'pe-5',
name = 'Charlottetown',
hourlyDataTimeOffset=timedelta(hours=4),
timezone=timezone('America/Moncton'),
stationName="Charlottetown Airport",
airportCode='YYG',
),
'edmonton-airport': CityInfo(
dayStations = collections.OrderedDict( [
( 50149, (2012,2017) ),
( 1865, (1959,2012) ),
] ),
hourStations = collections.OrderedDict( [
( 50149, (2012,2017) ),
( 1865, (1961,2012) ),
] ),
label = 'ab-71',
name = 'Edmonton-Airport',
weatherStatsSite='edmontonairport',
hourlyDataTimeOffset=timedelta(hours=7),
timezone=timezone('America/Edmonton'),
stationName="Edmonton Int'l Airport",
airportCode='YEG',
),
'edmonton': CityInfo(
dayStations = collections.OrderedDict( [
( 27214, (1996,2017) ),
( 1867, (1937,1996) ),
# ( 1863, (1880,1943) ),
] ),
hourStations = collections.OrderedDict( [
( 27214, (1999,2017) ),
( 1867, (1953,1999) ),
] ),
label = 'ab-50',
name = 'Edmonton-Blatchford',
weatherStatsSite='edmonton',
skipOb = ['visibility'],
skipDailyFields=['TOTAL_RAIN_MM', 'TOTAL_SNOW_CM'],
hourlyDataTimeOffset=timedelta(hours=7),
timezone=timezone('America/Edmonton'),
stationName="Edmonton Blatchford",
airportCode='XEC',
skipMetar=True,
),
'fredericton': CityInfo(
dayStations = collections.OrderedDict( [
( 48568, (2010,2017) ),
( 6157, (1951,2010) ),
( 6159, (1971,1951) ),
] ),
hourStations = collections.OrderedDict( [
( 48568, (2010,2017) ),
( 6157, (1953,2010) ),
] ),
label = 'nb-29',
name = 'Fredericton',
skipDailyFields=['TOTAL_RAIN_MM', 'TOTAL_SNOW_CM'],
hourlyDataTimeOffset=timedelta(hours=4),
timezone=timezone('America/Moncton'),
stationName="Fredericton Int'l Airport",
airportCode='YFC',
),
# 'gagetown': CityInfo(
# dayStations=None,
# hourStations=None,
# label=None,
# hourlyDataTimeOffset=None,
# name = 'CFB Gagetown',
# timezone=timezone('America/Moncton'),
# stationName="CFB Gagetown",
# airportCode='YCX',
# ),
'halifax': CityInfo(
dayStations = collections.OrderedDict( [
( 50620, (2012,2017) ),
( 6358, (1953,2012) ),
( 6357, (1933,1953) ),
( 6355, (1871,1933) ),
] ),
hourStations = collections.OrderedDict( [
( 50620, (2012,2017) ),
( 6358, (1961,2012) ),
] ),
label = 'ns-19',
name = 'Halifax',
hourlyDataTimeOffset=timedelta(hours=4),
timezone=timezone('America/Halifax'),
weatherStatsSite='halifaxairport',
stationName="Halifax Stanfield Int'l Airport",
airportCode='YHZ',
),
'hamilton': CityInfo(
dayStations = collections.OrderedDict( [
( 49908, (2011,2017) ),
( 4932, (1959,2011) ),
( 4612, (1931,1953) ),
] ),
hourStations = collections.OrderedDict( [
( 49908, (2011,2017) ),
( 4932, (1970,2011) ),
] ),
label = 'on-77',
name = 'Hamilton',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Hamilton Munro Int'l Airport",
airportCode='YHM',
),
'kingston': CityInfo(
dayStations = collections.OrderedDict( [
( 47267, (2008,2017) ),
( 4295, (1930,1996) ),
( 4301, (1872,1930) ),
] ),
hourStations = collections.OrderedDict( [
( 47267, (2008,2017) ),
( 4295, (1967,2008) ),
] ),
label = 'on-69',
name = 'Kingston',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
skipDailyFields=['TOTAL_RAIN_MM', 'TOTAL_SNOW_CM'],
stationName="Kingston Airport",
airportCode='YGK',
),
'london': CityInfo(
dayStations = collections.OrderedDict( [
( 50093, (2012,2017) ),
( 10999, (2002,2012) ),
( 4789, (1940,2002) ),
] ),
hourStations = collections.OrderedDict( [
( 50093, (2012,2017) ),
( 10999, (2002,2012) ),
( 4789, (1953,2002) ),
] ),
label = 'on-137',
name = 'London',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
skipDailyFields=['MIN_WINDCHILL',
'TOTAL_RAIN_MM',
'TOTAL_SNOW_CM',
'AVG_WIND',
'SPD_OF_MAX_GUST_KPH'],
stationName="London Int'l Airport",
airportCode='YXU',
),
'montreal': CityInfo(
dayStations = collections.OrderedDict( [
( 51157, (2013,2017) ),
( 5415, (1941,2013) ),
( 5420, (1871,1940) ),
] ),
hourStations = collections.OrderedDict( [
( 51157, (2013,2017) ),
( 5415, (1953,2013) ),
] ),
label = 'qc-147',
name = 'Montréal',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Montréal-Trudeau Int'l Airport",
airportCode='YUL',
),
'ottawa': CityInfo(
dayStations = collections.OrderedDict( [
( 49568, (2011,2017) ),
( 4337, (1938,2011) ),
#( 4333, (1889,2014) ),
( 4333, (1889,1938) ),
( 4327, (1872,1935) ),
] ),
hourStations = collections.OrderedDict( [
( 49568, (2011,2017) ),
( 4337, (1953,2011) ),
] ),
label = 'on-118',
name = 'Ottawa',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Ottawa Macdonald-Cartier Int'l Airport",
airportCode='YOW',
),
'quebec': CityInfo(
dayStations = collections.OrderedDict( [
( 26892, (1992,2017) ),
( 5251, (1943,2017) ),
( 5249, (1872,1943) ),
] ),
hourStations = collections.OrderedDict( [
( 26892, (2005,2017) ),
( 5251, (1953,2013) ),
] ),
label = 'qc-133',
name = 'Québec City',
skipDailyFields=['TOTAL_RAIN_MM',
'TOTAL_SNOW_CM',
'SPD_OF_MAX_GUST_KPH',
'SNOW_ON_GRND_CM'],
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Quebec Lesage Int'l Airport",
airportCode='YQB',
),
'regina': CityInfo(
dayStations = collections.OrderedDict( [
( 28011, (1999,2017) ),
( 3002, (1883,1999) ),
] ),
hourStations = collections.OrderedDict( [
( 28011, (1999,2017) ),
( 3002, (1953,1999) ),
] ),
label = 'sk-32',
name = "Regina",
skipDailyFields=['MEAN_HUMIDITY', 'MIN_HUMIDITY'],
hourlyDataTimeOffset=timedelta(hours=6),
timezone=timezone('America/Regina'),
stationName="Regina Int'l Airport",
airportCode='YQR',
),
'stjohn': CityInfo(
dayStations = collections.OrderedDict( [
( 50310, (2012,2017) ),
( 6250, (1946,2012) ),
] ),
hourStations = collections.OrderedDict( [
( 50310, (2012,2017) ),
( 6250, (1953,2012) ),
] ),
label = 'nb-23',
name = "StJohn",
hourlyDataTimeOffset=timedelta(hours=4),
timezone=timezone('America/Moncton'),
stationName="Saint John Airport",
airportCode='YSJ',
),
'stjohns': CityInfo(
dayStations = collections.OrderedDict( [
( 50089, (2012,2017) ),
( 6720, (1942,2017) ),
( 6718, (1874,1956) ),
] ),
hourStations = collections.OrderedDict( [
( 50089, (2012,2017) ),
( 6720, (1959,2013) ),
] ),
label = 'nl-24',
name = "StJohns",
hourlyDataTimeOffset=timedelta(hours=3, minutes=30),
timezone=timezone('America/St_Johns'),
stationName="St. John's Int'l Airport",
airportCode='YYT',
),
'toronto': CityInfo(
dayStations = collections.OrderedDict( [
( 51459, (2013,2017) ),
( 5097, (1937,2013) ),
] ),
hourStations = collections.OrderedDict( [
( 51459, (2013,2017) ),
( 5097, (1953,2013) ),
] ),
label = 'on-143',
name = 'Toronto',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Toronto Pearson Int'l Airport",
airportCode='YYZ',
),
'thunderBay': CityInfo(
dayStations = collections.OrderedDict( [
( 30682, (2003,2017) ),
( 4055, (1941,2004) ),
] ),
hourStations = collections.OrderedDict( [
( 30682, (2000,2017) ),
( 4055, (1953,2013) ),
] ),
label = 'on-100',
name = 'Thunder Bay',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
stationName="Thunder Bay Airport",
airportCode='YQT',
),
'vancouver': CityInfo(
dayStations = collections.OrderedDict( [
( 51442, (2013,2017) ),
( 889, (1937,2013) ),
] ),
hourStations = collections.OrderedDict( [
( 51442, (2013,2017) ),
( 889, (1953,2013) ),
] ),
label = 'bc-74',
name = 'Vancouver',
hourlyDataTimeOffset=timedelta(hours=8),
timezone=timezone('America/Vancouver'),
stationName="Vancouver Int'l Airport",
airportCode='YVR',
),
'vernon': CityInfo(
dayStations = collections.OrderedDict( [
( 46987, (2005,2017) ),
( 6837, (1991,2005) ),
( 1068, (1900,1997) ),
] ),
hourStations = collections.OrderedDict( [
( 46987, (2007,2017) ),
( 6837, (1994,2008) ),
( 1065, (1989,1995) ),
#( 1065, (1971,1979) ),
] ),
label = 'bc-27',
name = 'Vernon',
hourlyDataTimeOffset=timedelta(hours=8),
timezone=timezone('America/Vancouver'),
skipOb = ['visibility'],
stationName="Vernon",
airportCode='WJV',
skipMetar=True,
),
'victoria': CityInfo(
dayStations = collections.OrderedDict( [
( 51337, (2013,2017) ),
( 118, (1940,2013) ),
] ),
hourStations = collections.OrderedDict( [
( 51337, (2013,2017) ),
( 118, (1953,2013) ),
] ),
label = 'bc-85',
name = 'Victoria',
hourlyDataTimeOffset=timedelta(hours=8),
timezone=timezone('America/Vancouver'),
stationName="Victoria Int'l Airport",
airportCode='YYJ',
),
'waterloo': CityInfo(
dayStations = collections.OrderedDict( [
( 48569, (2010,2017) ),
( 32008, (2002,2011) ),
( 4832, (1970,2003) ),
] ),
hourStations = collections.OrderedDict( [
( 48569, (2010,2017) ),
( 32008, (2002,2010) ),
( 4832, (1966,2002) ),
] ),
label = 'on-82',
name = 'Waterloo',
hourlyDataTimeOffset=timedelta(hours=5),
timezone=timezone('America/Toronto'),
skipDailyFields=[],
weatherStatsSite='kitchenerwaterloo',
stationName="Region of Waterloo Int'l Airport",
airportCode='YKF',
),
'whitehorse': CityInfo(
dayStations = collections.OrderedDict( [
( 48168, (2009,2017) ),
( 1617, (1942,2009) ),
( 1616, (1900,1942) ),
] ),
hourStations = collections.OrderedDict( [
( 48168, (2009,2017) ),
( 1617, (1953,2015) ),
] ),
label = 'yt-16',
name = 'Whitehorse',
skipDailyFields=['MIN_WINDCHILL',
'TOTAL_RAIN_MM',
'TOTAL_SNOW_CM',
'AVG_WIND',
'SPD_OF_MAX_GUST_KPH'],
hourlyDataTimeOffset=timedelta(hours=7),
timezone=timezone('America/Edmonton'),
stationName="Whitehorse Airport",
airportCode='YXY',
),
'winnipeg': CityInfo(
dayStations = collections.OrderedDict( [
( 27174, (2008,2017) ),
( 3698, (1938,2008) ),
( 3703, (1872,1938) ),
] ),
hourStations = collections.OrderedDict( [
( 27174, (2013,2017) ),
( 51097, (2013,2013) ),
( 3698, (1953,2013) ),
] ),
label = 'mb-38',
name = 'Winnipeg',
skipDailyFields=['TOTAL_RAIN_MM', 'TOTAL_SNOW_CM'],
hourlyDataTimeOffset=timedelta(hours=6),
timezone=timezone('America/Winnipeg'),
stationName="Winnipeg Richardson Int'l Airport",
airportCode='YWG',
),
}
if 'METAR' in os.environ:
city.update({
'gagetown': CityInfo(
dayStations=None,
hourStations=None,
label=None,
hourlyDataTimeOffset=None,
name = 'CFB Gagetown',
timezone=timezone('America/Moncton'),
stationName="CFB Gagetown",
airportCode='YCX',
),
'gatineau': CityInfo(
dayStations=None,
hourStations=None,
label=None,
hourlyDataTimeOffset=None,
name = 'Gatineau',
timezone=timezone('America/Toronto'),
airportCode='YND',
),
'moncton': CityInfo(
dayStations=None,
hourStations=None,
label=None,
hourlyDataTimeOffset=None,
name = 'Moncton',
timezone=timezone('America/Moncton'),
airportCode='YQM',
)
})
for cityName, apc in {
'Sault Ste. Marie': 'CYAM',
'Geraldton': 'CYGQ',
'Buttonville': 'CYKZ',
'Chapleau': 'CYLD',
'Pickle Lake': 'CYPL',
'Windsor': 'CYQG',
'Kenora': 'CYQK',
'Red Lake': 'CYRL',
'Sudbury': 'CYSB',
'St. Catharines/Niagara': 'CYSN',
'Marathon': 'CYSP',
'Trenton': 'CYTR',
'Timmins': 'CYTS',
'Wiarton': 'CYVV',
'Sioux Lookout': 'CYXL',
'Wawa': 'CYXZ',
'Kapuskasing': 'CYYU',
}.items():
city[cityName] = CityInfo(
dayStations=None,
hourStations=None,
label=None,
hourlyDataTimeOffset=None,
name=cityName,
timezone=timezone('America/Toronto'),
airportCode=apc)
del city['london']
#city = {'ottawa': city['ottawa']}
| endlisnis/weather-records | stations.py | Python | gpl-3.0 | 18,155 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostCnxFailedAccountFailedEvent(vim, *args, **kwargs):
'''This event records a failure to connect to a host due to a failure to set up a
management account.'''
obj = vim.client.factory.create('ns0:HostCnxFailedAccountFailedEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'changeTag', 'computeResource', 'datacenter', 'ds', 'dvs',
'fullFormattedMessage', 'host', 'net', 'vm', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| xuru/pyvisdk | pyvisdk/do/host_cnx_failed_account_failed_event.py | Python | mit | 1,222 |
#-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from PyQt4 import QtCore, QtGui, QtWebKit
import util
class TourneyItemDelegate(QtGui.QStyledItemDelegate):
#colors = json.loads(util.readfile("client/colors.json"))
def __init__(self, *args, **kwargs):
QtGui.QStyledItemDelegate.__init__(self, *args, **kwargs)
self.height = 125
def paint(self, painter, option, index, *args, **kwargs):
self.initStyleOption(option, index)
painter.save()
html = QtGui.QTextDocument()
html.setHtml(option.text)
if self.height < html.size().height() :
self.height = html.size().height()
option.text = ""
option.widget.style().drawControl(QtGui.QStyle.CE_ItemViewItem, option, painter, option.widget)
#Description
painter.translate(option.rect.left(), option.rect.top())
#painter.fillRect(QtCore.QRect(0, 0, option.rect.width(), option.rect.height()), QtGui.QColor(36, 61, 75, 150))
clip = QtCore.QRectF(0, 0, option.rect.width(), option.rect.height())
html.drawContents(painter, clip)
painter.restore()
def sizeHint(self, option, index, *args, **kwargs):
self.initStyleOption(option, index)
html = QtGui.QTextDocument()
html.setHtml(option.text)
return QtCore.QSize(int(html.size().width()), int(html.size().height()))
class QWebPageChrome(QtWebKit.QWebPage):
def __init__(self, *args, **kwargs):
QtWebKit.QWebPage.__init__(self, *args, **kwargs)
def userAgentForUrl(self, url):
return "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2"
class TourneyItem(QtGui.QListWidgetItem):
FORMATTER_SWISS_OPEN = unicode(util.readfile("tournaments/formatters/open.qthtml"))
def __init__(self, parent, uid, *args, **kwargs):
QtGui.QListWidgetItem.__init__(self, *args, **kwargs)
self.uid = int(uid)
self.parent = parent
self.type = None
self.client = None
self.title = None
self.description = None
self.state = None
self.players = []
self.playersname = []
self.viewtext = ""
self.height = 40
self.setHidden(True)
def update(self, message, client):
'''
Updates this item from the message dictionary supplied
'''
self.client = client
old_state = self.state
self.state = message.get('state', "close")
''' handling the listing of the tournament '''
self.title = message['name']
self.type = message['type']
self.url = message['url']
self.description = message.get('description', "")
self.players = message.get('participants', [])
if old_state != self.state and self.state == "started" :
widget = QtWebKit.QWebView()
webPage = QWebPageChrome()
widget.setPage(webPage)
widget.setUrl(QtCore.QUrl(self.url))
self.parent.topTabs.addTab(widget, self.title)
self.playersname= []
for player in self.players :
self.playersname.append(player["name"])
if old_state != self.state and self.state == "started" and player["name"] == self.client.login :
channel = "#" + self.title.replace(" ", "_")
self.client.autoJoin.emit([channel])
QtGui.QMessageBox.information(self.client, "Tournament started !", "Your tournament has started !\nYou have automatically joined the tournament channel.")
playerstring = "<br/>".join(self.playersname)
self.viewtext = self.FORMATTER_SWISS_OPEN.format(title=self.title, description=self.description, numreg=str(len(self.players)), playerstring=playerstring)
self.setText(self.viewtext)
def display(self):
return self.viewtext
def data(self, role):
if role == QtCore.Qt.DisplayRole:
return self.display()
elif role == QtCore.Qt.UserRole :
return self
return super(TourneyItem, self).data(role)
def permutations(self, items):
"""Yields all permutations of the items."""
if items == []:
yield []
else:
for i in range(len(items)):
for j in self.permutations(items[:i] + items[i+1:]):
yield [items[i]] + j
def __ge__(self, other):
''' Comparison operator used for item list sorting '''
return not self.__lt__(other)
def __lt__(self, other):
''' Comparison operator used for item list sorting '''
if not self.client: return True # If not initialized...
if not other.client: return False;
# Default: Alphabetical
return self.title.lower() < other.title.lower()
| HaraldWeber/client | src/tourneys/tourneyitem.py | Python | gpl-3.0 | 5,935 |
'''
Event Manager
=============
The :class:`EventManagerBase` is the abstract class intended for specific
implementation of dispatching motion events
(instances of :class:`~kivy.input.motionevent.MotionEvent`) to widgets through
:meth:`~kivy.uix.widget.Widget.on_motion` method of the
:class:`~kivy.uix.widget.Widget` class.
.. warning::
This feature is experimental and it remains so while this warning is
present.
Manager is a layer between the window and its widgets.
:class:`~kivy.core.window.WindowBase` will forward all the events it receives
in :meth:`~kivy.core.window.WindowBase.on_motion` method to the all managers
who declared to receive types of those events. Event will continue to go
through the managers list even if one of them accept it (by returning `True`).
When to use an event manager
----------------------------
Use a manager when you want to:
- Dispatch touch, hover, keyboard, joystick or any other events to the widgets
through :meth:`~kivy.uix.widget.Widget.on_motion` method.
- Dispatch filtered motion events by any criteria, like by a
:attr:`~kivy.input.motionevent.MotionEvent.device` or a
:attr:`~kivy.input.motionevent.MotionEvent.profile`.
- Combine several motion events (touch, hover etc.) into one new event and
dispatch it to the widgets.
- Dispatch one-time generic events, like app pause/resume.
- Write an event simulator, like a touch simulator which draws a circle on
window's canvas for every simulated touch.
Defining and registering an event manager
-----------------------------------------
1. Inherit :class:`EventManagerBase` and set which events this manager
should receive by declaring event types in
:attr:`EventManagerBase.type_ids` attribute.
2. Implement :meth:`EventManagerBase.dispatch` which will be called by window
to pass event type (one of "begin", "update", "end") and an event.
3. Implement :meth:`EventManagerBase.start` and :meth:`EventManagerBase.stop`
to allocate and release additional resources if needed.
4. Register a manager instance to window using method
:meth:`~kivy.core.window.WindowBase.register_event_manager`. This can be
done by overriding methods :meth:`~kivy.app.App.build` or
:meth:`~kivy.app.App.on_start`.
All registered managers are kept in the
:attr:`~kivy.core.window.WindowBase.event_managers` list. To unregister a
manager call :meth:`~kivy.core.window.WindowBase.unregister_event_manager`
which itself can be called in :meth:`~kivy.app.App.on_stop` method.
Dispatching events to the widgets
---------------------------------
Once registered, window will start the manager and forward all events of types
declared in :attr:`EventManagerBase.type_ids` to the manager's
:meth:`EventManagerBase.dispatch` method. It's up to manager to decide how to
dispatch them, either by going through :attr:`EventManagerBase.window.children`
list and dispatching `on_motion` event or by using some different logic. It's
also up to manager to dispatch grabbed events if grab feature is supported by
the event (see :meth:`~kivy.input.motionevent.MotionEvent.grab` and
:meth:`~kivy.input.motionevent.MotionEvent.ungrab` methods).
Manager can assign a different dispatch mode to decide how event
should be dispatched throughout the widget tree by changing the value of the
:attr:`~kivy.input.motionevent.MotionEvent.dispatch_mode` attribute. Before
changing the mode manager should store/restore the current one, either by using
a local variable or by using event's
:meth:`~kivy.input.motionevent.MotionEvent.push` /
:meth:`~kivy.input.motionevent.MotionEvent.pop` methods.
Currently there are three dispatch modes (behaviors) recognized by the
`on_motion` method in :class:`~kivy.uix.widget.Widget` class:
1. Default dispatch (requires :const:`MODE_DEFAULT_DISPATCH`) - event will go
through widget's `children` list, starting with the first widget in the
list until event gets accepted or last widget registered for that event is
reached. Mode :const:`MODE_DEFAULT_DISPATCH` is assigned by default in
:class:`~kivy.input.motionevent.MotionEvent` class.
2. Filtered dispatch (requires :const:`MODE_FILTERED_DISPATCH`) - event will go
only through registered child widgets.
3. No dispatch to children (requires :const:`MODE_DONT_DISPATCH`) - event will
not be dispatched to child widgets.
Note that window does not have a `motion_filter` property and therefore does
not have a list of filtered widgets from its `children` list.
'''
MODE_DEFAULT_DISPATCH = 'default'
'''Assign this mode to make event dispatch through widget's `children` list,
starting with the first widget in the list until event gets accepted or last
widget registered for that event is reached. Widgets after the last registered
widget are ignored.
.. versionadded:: 2.1.0
'''
MODE_FILTERED_DISPATCH = 'filtered'
'''Assign this mode to make event dispatch only to child widgets which were
previously registered to receive events of the same
:attr:`~kivy.input.motionevent.MotionEvent.type_id` and not to all
child widgets.
.. versionadded:: 2.1.0
'''
MODE_DONT_DISPATCH = 'none'
'''Assign this mode to prevent event from dispatching to child widgets.
.. versionadded:: 2.1.0
'''
class EventManagerBase(object):
'''Abstract class with methods :meth:`start`, :meth:`stop` and
:meth:`dispatch` for specific class to implement.
Example of the manager receiving touch and hover events::
class TouchHoverManager(EventManagerBase):
type_ids = ('touch', 'hover')
def start(self):
# Create additional resources, bind callbacks to self.window
def dispatch(self, etype, me):
if me.type_id == 'touch':
# Handle touch event
elif me.type_id == 'hover'
# Handle hover event
def stop(self):
# Release resources
'''
type_ids = None
'''Override this attribute to declare the type ids of the events which
manager wants to receive. This attribute will be used by
:class:`~kivy.core.window.WindowBase` to know which events to pass to the
:meth:`dispatch` method.
.. versionadded:: 2.1.0
'''
window = None
'''Holds the instance of the :class:`~kivy.core.window.WindowBase`.
.. versionadded:: 2.1.0
'''
def start(self):
'''Start the manager, bind callbacks to the objects and create
additional resources. Attribute :attr:`window` is assigned when this
method is called.
.. versionadded:: 2.1.0
'''
def dispatch(self, etype, me):
'''Dispatch event `me` to the widgets in the :attr:`window`.
:Parameters:
`etype`: `str`
One of "begin", "update" or "end"
`me`: :class:`~kivy.input.motionevent.MotionEvent`
The Motion Event currently dispatched.
:Returns: `bool`
`True` to stop event dispatching
.. versionadded:: 2.1.0
'''
def stop(self):
'''Stop the manager, unbind from any objects and release any allocated
resources.
.. versionadded:: 2.1.0
'''
| kivy/kivy | kivy/eventmanager/__init__.py | Python | mit | 7,176 |
'''
Created on April 1, 2014
@package: support testing
@copyright: 2014 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Ioan v. Pocol
Contains the service setups.
'''
from ally.container import support, ioc, bind
from __plugin__.superdesk.db_superdesk import bindSuperdeskSession,\
bindSuperdeskValidations
from itertools import chain
from __plugin__.plugin.registry import addService
# --------------------------------------------------------------------
@ioc.entity
def binders(): return [bindSuperdeskSession]
@ioc.entity
def bindersService(): return list(chain((bindSuperdeskValidations,), binders()))
SERVICES = 'support_testing.api.**.I*Service'
bind.bindToEntities('support_testing.impl.**.*Service', binders=binders)
support.createEntitySetup('support_testing.impl.**.*')
support.listenToEntities(SERVICES, listeners=addService(bindersService))
support.loadAllEntities(SERVICES)
| superdesk/Live-Blog | plugins/support-testing/__plugin__/support_testing/service.py | Python | agpl-3.0 | 938 |
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vectorized embedding pairwise distances computation functions"""
from abc import ABC, abstractmethod
from typing import Union, List
import tensorflow as tf
from .types import FloatTensor
class Distance(ABC):
"""
Note: don't forget to add your distance to the DISTANCES list
and add alias names in it.
"""
def __init__(self, name: str, aliases: List[str] = []):
self.name = name
self.aliases = aliases
@abstractmethod
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
def __call__(self, embeddings: FloatTensor):
return self.call(embeddings)
def __str__(self) -> str:
return self.name
def get_config(self):
return {}
@tf.keras.utils.register_keras_serializable(package="Similarity")
class InnerProductSimilarity(Distance):
"""Compute the pairwise inner product between embeddings.
The [Inner product](https://en.wikipedia.org/wiki/Inner_product_space) is
a measure of similarity where the more similar vectors have the largest
values.
NOTE! This is not a distance and is likely not what you want to use with
the built in losses. At the very least this will flip the sign on the
margin in many of the losses. This is likely meant to be used with custom
loss functions that expect a similarity instead of a distance.
"""
def __init__(self):
"Init Inner product similarity"
super().__init__('inner_product', ['ip'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise similarities for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
sims: FloatTensor = tf.linalg.matmul(embeddings, embeddings, transpose_b=True)
return sims
@tf.keras.utils.register_keras_serializable(package="Similarity")
class CosineDistance(Distance):
"""Compute pairwise cosine distances between embeddings.
The [Cosine Distance](https://en.wikipedia.org/wiki/Cosine_similarity) is
an angular distance that varies from 0 (similar) to 1 (dissimilar).
"""
def __init__(self):
"Init Cosine distance"
super().__init__('cosine')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one. The embeddings
are expected to be normalized.
Returns:
FloatTensor: Pairwise distance tensor.
"""
distances = 1 - tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
min_clip_distances: FloatTensor = tf.math.maximum(distances, 0.0)
return min_clip_distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class EuclideanDistance(Distance):
"""Compute pairwise euclidean distances between embeddings.
The [Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the standard distance to measure the line segment between two embeddings
in the Cartesian point. The larger the distance the more dissimilar
the embeddings are.
**Alias**: L2 Norm, Pythagorean
"""
def __init__(self):
"Init Euclidean distance"
super().__init__('euclidean', ['l2', 'pythagorean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
# Avoid NaN and inf gradients when back propagating through the sqrt.
# values smaller than 1e-18 produce inf for the gradient, and 0.0
# produces NaN. All values smaller than 1e-13 should produce a gradient
# of 1.0.
dist_mask = tf.math.greater_equal(distances, 1e-18)
distances = tf.math.maximum(distances, 1e-18)
distances = tf.math.sqrt(distances) * tf.cast(dist_mask, tf.float32)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SquaredEuclideanDistance(Distance):
"""Compute pairwise squared Euclidean distance.
The [Squared Euclidean Distance](https://en.wikipedia.org/wiki/Euclidean_distance#Squared_Euclidean_distance) is
a distance that varies from 0 (similar) to infinity (dissimilar).
"""
def __init__(self):
super().__init__('squared_euclidean', ['sql2', 'sqeuclidean'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
squared_norm = tf.math.square(embeddings)
squared_norm = tf.math.reduce_sum(squared_norm, axis=1, keepdims=True)
distances: FloatTensor = 2.0 * tf.linalg.matmul(
embeddings, embeddings, transpose_b=True)
distances = squared_norm - distances + tf.transpose(squared_norm)
distances = tf.math.maximum(distances, 0.0)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class ManhattanDistance(Distance):
"""Compute pairwise Manhattan distances between embeddings.
The [Manhattan Distance](https://en.wikipedia.org/wiki/Euclidean_distance)
is the sum of the lengths of the projections of the line segment between
two embeddings onto the Cartesian axes. The larger the distance the more
dissimilar the embeddings are.
"""
def __init__(self):
"Init Manhattan distance"
super().__init__('manhattan', ['l1', 'taxicab'])
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise distances for a given batch of embeddings.
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
deltas = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
distances: FloatTensor = tf.norm(deltas, 1, axis=2)
return distances
@tf.keras.utils.register_keras_serializable(package="Similarity")
class SNRDistance(Distance):
"""
Computes pairwise SNR distances between embeddings.
The [Signal-to-Noise Ratio distance](https://arxiv.org/abs/1904.02616)
is the ratio of noise variance to the feature variance.
"""
def __init__(self):
"Init SNR distance"
super().__init__('snr')
@tf.function
def call(self, embeddings: FloatTensor) -> FloatTensor:
"""Compute pairwise snr distances for a given batch of embeddings.
SNR(i, j): anchor i and compared feature j
SNR(i,j) may not be equal to SNR(j, i)
Args:
embeddings: Embeddings to compute the pairwise one.
Returns:
FloatTensor: Pairwise distance tensor.
"""
# Calculating feature variance for each example
embed_mean = tf.math.reduce_mean(embeddings, axis=1)
embed_square = tf.math.square(embeddings)
embed_sq_mean = tf.math.reduce_mean(embed_square, axis=1)
anchor_var = embed_sq_mean - tf.square(embed_mean)
# Calculating pairwise noise variances
x_rs = tf.reshape(embeddings, shape=[tf.shape(embeddings)[0], -1])
delta = tf.expand_dims(x_rs, axis=1) - tf.expand_dims(x_rs, axis=0)
delta_mean = tf.math.reduce_mean(delta, axis=2)
delta_sq = tf.math.square(delta)
delta_sq_mean = tf.math.reduce_mean(delta_sq, axis=2)
noise_var = delta_sq_mean - tf.square(delta_mean)
distances: FloatTensor = tf.divide(noise_var,
tf.expand_dims(anchor_var, axis=1))
return distances
# List of implemented distances
DISTANCES = [
InnerProductSimilarity(),
EuclideanDistance(),
SquaredEuclideanDistance(),
ManhattanDistance(),
CosineDistance(),
SNRDistance()
]
def distance_canonicalizer(user_distance: Union[Distance, str]) -> Distance:
"""Normalize user requested distance to its matching Distance object.
Args:
user_distance: Requested distance either by name or by object
Returns:
Distance: Requested object name.
"""
# just return Distance object
if isinstance(user_distance, Distance):
# user supplied distance function
return user_distance
mapping = {}
name2fn = {}
for distance in DISTANCES:
# self reference
mapping[distance.name] = distance.name
name2fn[distance.name] = distance
# aliasing
for alias in distance.aliases:
mapping[alias] = distance.name
if isinstance(user_distance, str):
user_distance = user_distance.lower().strip()
if user_distance in mapping:
user_distance = mapping[user_distance]
else:
raise ValueError('Metric not supported by the framework')
return name2fn[user_distance]
raise ValueError('Unknown distance: must either be a MetricDistance\
or a known distance function')
| tensorflow/similarity | tensorflow_similarity/distances.py | Python | apache-2.0 | 10,647 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from __future__ import division
import numpy as np
import yaml
import message_filters
import rospy
from jsk_recognition_msgs.msg import ClassificationResult
from jsk_topic_tools import ConnectionBasedTransport
class BoostObjectRecognition(ConnectionBasedTransport):
def __init__(self):
super(BoostObjectRecognition, self).__init__()
weight_yaml = rospy.get_param('~weight', None)
if weight_yaml is None:
rospy.logerr('must set weight yaml file path to ~weight')
return
with open(weight_yaml) as f:
self.weight = yaml.load(f)
self.pub = self.advertise(
'~output', ClassificationResult, queue_size=1)
def subscribe(self):
self.sub_bof = message_filters.Subscriber(
'~input/bof', ClassificationResult)
self.sub_ch = message_filters.Subscriber(
'~input/ch', ClassificationResult)
queue_size = rospy.get_param('~queue_size', 100)
if rospy.get_param('~approximate_sync', False):
sync = message_filters.ApproximateTimeSynchronizer(
[self.sub_bof, self.sub_ch], queue_size=queue_size,
slop=1)
else:
sync = message_filters.TimeSynchronizer(
[self.sub_bof, self.sub_ch], queue_size=queue_size)
sync.registerCallback(self._apply)
def unsubscribe(self):
self.sub_bof.unregister()
self.sub_ch.unregister()
def _apply(self, bof_msg, ch_msg):
target_names = bof_msg.target_names
assert target_names == ch_msg.target_names
N_label = len(target_names)
bof_proba = np.array(bof_msg.probabilities).reshape((-1, N_label))
ch_proba = np.array(ch_msg.probabilities).reshape((-1, N_label))
bof_weight = np.array([self.weight[n]['bof'] for n in target_names])
ch_weight = np.array([self.weight[n]['color'] for n in target_names])
y_proba = (bof_weight * bof_proba) + (ch_weight * ch_proba)
# verification result for debug
y_pred = np.argmax(y_proba, axis=-1)
target_names = np.array(target_names)
label_proba = [p[i] for p, i in zip(y_proba, y_pred)]
# compose msg
msg = ClassificationResult()
msg.header = bof_msg.header
msg.labels = y_pred
msg.label_names = target_names[y_pred]
msg.label_proba = label_proba
msg.probabilities = y_proba.reshape(-1)
msg.classifier = '<jsk_2015_05_baxter_apc.BoostObjectRecognition>'
msg.target_names = target_names
self.pub.publish(msg)
if __name__ == '__main__':
rospy.init_node('boost_object_recognition')
boost_or = BoostObjectRecognition()
rospy.spin()
| start-jsk/jsk_apc | jsk_2015_05_baxter_apc/node_scripts/boost_object_recognition.py | Python | bsd-3-clause | 2,781 |
# Copyright (c) 2013 Greg Lange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License
import json
import random
import uuid
from qork.queue import aws as queue
import util
class SQSMessage(object):
def __init__(self):
self.id = uuid.uuid4()
class SQSQueue(object):
def __init__(self, name):
self.name = name
def set_attribute(self, *args, **kwargs):
pass
class SQSConnection(object):
def __init__(self, test, access_key, secret_access_key):
self.test = test
self.access_key = access_key
self.secret_access_key = secret_access_key
def method(self, access_key, secret_access_key):
self.test.assertEquals(self.access_key, access_key)
self.test.assertEquals(self.secret_access_key,
secret_access_key)
return self
def create_queue(self, name):
return SQSQueue(name)
class TestQueueReader(util.MockerTestCase):
def test_init(self):
access_key = 'some_key'
secret_access_key = 'some_other_key'
global_prefix = 'test'
queue_prefixes = 'one two three'.split()
sqs_connection = SQSConnection(self, access_key, secret_access_key)
self.mock(queue, 'SQSConnection', sqs_connection.method)
qr = queue.QueueReader(access_key, secret_access_key, global_prefix,
queue_prefixes)
self.assertEquals(['%s_%s' % (global_prefix, x) for x in
queue_prefixes], qr._queue_prefixes)
self.assertEquals(access_key, qr._access_key)
self.assertEquals(secret_access_key, qr._secret_access_key)
def test_get_message(self):
class MessageQueue(object):
def __init__(self, test, vtime, messages):
self.test = test
self.vtime = vtime
self.messages = messages.split()
def get_message(self, vtime):
if self.messages:
return self.messages.pop(0)
return None
class QueueReader(queue.QueueReader):
def __init__(self, test, vtime):
self.queues = [
MessageQueue(test, vtime, '1 2 3'),
MessageQueue(test, vtime, ''),
MessageQueue(test, vtime, '4'),
MessageQueue(test, vtime, '5 6')
]
def get_queues(self):
return self.queues
vtime = 'some_vtime'
msgs = []
qr = QueueReader(self, vtime)
m = qr.get_message(vtime)
while m:
msgs.append(m)
m = qr.get_message(vtime)
self.assertEquals('1 2 3 4 5 6'.split(), msgs)
def test_get_queues(self):
class MessageQueue(object):
def __init__(self, access_key, secret_access_key, name,
sqs_queue=None, max_failure_count=10):
self.access_key = access_key
self.secret_access_key = secret_access_key
self.name = name
self.sqs_queue = sqs_queue
self._max_failure_count = max_failure_count
class Connection(object):
def __init__(self, queues):
self.queues = queues
def get_all_queues(self, prefix):
if prefix in self.queues:
qs = [SQSQueue('%s_%s' % (prefix, x)) for x in
self.queues[prefix]]
random.shuffle(qs)
return qs
else:
return []
class QueueReader(queue.QueueReader):
def __init__(self, access_key, secret_access_key, queue_prefixes,
queues):
self._access_key = access_key
self._secret_access_key = secret_access_key
self._queue_prefixes = queue_prefixes
self._conn = Connection(queues)
self._max_failure_count = None
access_key = 'some_key'
secret_access_key = 'some_secret_key'
queue_prefixes = 'one two three four'.split()
queues = {
'one': '1 2 3 4 failure'.split(),
'two': ''.split(),
'three': '01 02 03 04 05 06 07 08'.split(),
'four': '20110101 20110510 failure'.split(),
}
self.mock(queue, 'MessageQueue', MessageQueue)
# without failure queues
qr = QueueReader(access_key, secret_access_key, queue_prefixes, queues)
queue_list = []
for p in queue_prefixes:
for q in queues[p]:
if q == 'failure':
continue
queue_list.append('%s_%s' % (p, q))
for q in qr.get_queues():
expected = queue_list.pop(0)
self.assertEquals(expected, q.name)
self.assertEquals(0, len(queue_list))
# with failure queues
qr = QueueReader(access_key, secret_access_key, queue_prefixes, queues)
queue_list = []
for p in queue_prefixes:
for q in queues[p]:
queue_list.append('%s_%s' % (p, q))
for q in qr.get_queues(True):
expected = queue_list.pop(0)
self.assertEquals(expected, q.name)
self.assertEquals(0, len(queue_list))
class TestMessageQueue(util.MockerTestCase):
def test_init(self):
access_key = 'some_key'
secret_access_key = 'some_secret_key'
sqs_connection = SQSConnection(self, access_key, secret_access_key)
self.mock(queue, 'SQSConnection', sqs_connection.method)
data = [
['some_name', 'some_name_failure', None],
['some_name_20111008', 'some_name_failure', None],
['some_name_failure', None, None],
['some_name', 'some_name_failure', SQSQueue('some_name')],
['some_name_20111008', 'some_name_failure',
SQSQueue('some_name_20111008')],
['some_name_failure', None, SQSQueue('some_name_failure')],
]
for d in data:
q = queue.MessageQueue(access_key, secret_access_key, d[0], d[2])
self.assertEquals(access_key, q._access_key)
self.assertEquals(secret_access_key, q._secret_access_key)
self.assertEquals(SQSConnection, type(q._conn))
self.assertEquals(SQSQueue, type(q._sqs_queue))
self.assertEquals(d[0], q._sqs_queue.name)
self.assertEquals(d[1], q._failure_queue_name)
self.assertRaises(ValueError, queue.MessageQueue, access_key,
secret_access_key, 'some_name',
SQSQueue('some_other_name'))
def test_delete(self):
class SQSQueue(object):
def __init__(self):
self.delete_called = 0
def delete(self):
self.delete_called += 1
class MessageQueue(queue.MessageQueue):
def __init__(self):
self._sqs_queue = SQSQueue()
q = MessageQueue()
q.delete()
self.assertEquals(q._sqs_queue.delete_called, 1)
def test_delete_message(self):
class SQSQueue(object):
def __init__(self, test, sqs_message):
self.test = test
self.sqs_message = sqs_message
self.delete_message_called = 0
def delete_message(self, sqs_message):
self.test.assertEquals(self.sqs_message, sqs_message)
self.delete_message_called += 1
class MessageQueue(queue.MessageQueue):
def __init__(self, test, sqs_message):
self._sqs_queue = SQSQueue(test, sqs_message)
sqs_message = SQSMessage()
q = MessageQueue(self, sqs_message)
q.delete_message(sqs_message)
self.assertEquals(1, q._sqs_queue.delete_message_called)
def test_get_message(self):
class Message(object):
def __init__(self, test, queue, sqs_message, max_failure_count):
self.test = test
self.queue = queue
self.sqs_message = sqs_message
self.max_failure_count = max_failure_count
def method(self, queue, sqs_message, max_failure_count):
self.test.assertEquals(self.queue, queue)
self.test.assertEquals(self.sqs_message, sqs_message)
self.test.assertEquals(self.max_failure_count,
max_failure_count)
return self
class SQSQueue(object):
def __init__(self, test, vtime, message):
self.test = test
self.vtime = vtime
self.message = message
def read(self, vtime):
self.test.assertEquals(self.vtime, vtime)
return self.message
class MessageQueue(queue.MessageQueue):
def __init__(self, sqs_queue, max_failure_count):
self._sqs_queue = sqs_queue
self._max_failure_count = max_failure_count
vtime = 'some_vtime'
# message returned
sqs_message = SQSMessage()
sqs_queue = SQSQueue(self, vtime, sqs_message)
max_failure_count = 'some_count'
q = MessageQueue(sqs_queue, max_failure_count)
message = Message(self, q, sqs_message, max_failure_count)
self.mock(queue, 'Message', message.method)
message_returned = q.get_message(vtime)
self.assertEquals(message, message_returned)
# message not returned
sqs_queue = SQSQueue(self, vtime, None)
q = MessageQueue(sqs_queue, max_failure_count)
message_returned = q.get_message(vtime)
self.assertEquals(None, message_returned)
def test_message_count(self):
class SQSQueue(object):
def __init__(self, message_count):
self.message_count = message_count
self.count_called = 0
def count(self):
self.count_called += 1
return self.message_count
class MessageQueue(queue.MessageQueue):
def __init__(self, message_count):
self._sqs_queue = SQSQueue(message_count)
for c in [0, 1, 2, 4, 8, 16]:
q = MessageQueue(c)
self.assertEquals(c, q.message_count())
self.assertEquals(1, q._sqs_queue.count_called)
def test_read_message(self):
class Message(object):
def __init__(self, queue, sqs_message):
self.queue = queue
self.sqs_message = sqs_message
class SQSQueue(object):
def __init__(self, test, messages):
self.test = test
self.messages = messages
self.vtime = 1
self.seen = set()
def read(self, vtime):
if not self.messages:
return None
message = self.messages.pop(0)
self.test.assertEquals(self.vtime, vtime)
if message in self.seen:
self.vtime += 1
self.seen.add(message)
return message
class MessageQueue(queue.MessageQueue):
def __init__(self, sqs_queue):
self._sqs_queue = sqs_queue
self.mock(queue, 'Message', Message)
msg1 = SQSMessage()
msg2 = SQSMessage()
msg3 = SQSMessage()
messages = [SQSMessage(), msg1, msg1, SQSMessage(), msg2, msg3, msg2,
SQSMessage(), SQSMessage(), SQSMessage(), msg3, msg3,
msg3, msg3, ]
unique_messages = set(messages)
sqs_queue = SQSQueue(self, messages)
q = MessageQueue(sqs_queue)
for message in q.read_messages():
self.assertEquals(q, message.queue)
self.assert_(message.sqs_message in unique_messages)
unique_messages.remove(message.sqs_message)
def test_search_message(self):
class Message(object):
def __init__(self, test, meta, body, match):
self.test = test
self.meta = meta
self.body = body
self.match = match
def matches(self, meta, body):
self.test.assertEquals(self.meta, meta)
self.test.assertEquals(self.body, body)
return self.match
class MessageQueue(queue.MessageQueue):
def __init__(self, messages):
self.messages = messages
def read_messages(self):
for message in self.messages:
yield message
meta = 'some_meta'
body = 'some_body'
messages = [
Message(self, meta, body, True),
Message(self, meta, body, True),
Message(self, meta, body, False),
Message(self, meta, body, False),
Message(self, meta, body, True),
Message(self, meta, body, False),
Message(self, meta, body, True),
]
q = MessageQueue(messages)
for message in q.search_messages(meta, body):
self.assert_(message in messages)
self.assertEquals(True, message.match)
def test_send_failure(self):
class MessageQueue(queue.MessageQueue):
def __init__(self, test, msg, access_key, secret_access_key,
failure_queue_name, sqs_queue=None):
self.test = test
self.msg = msg
self._access_key = access_key
self._secret_access_key = secret_access_key
self._failure_queue_name = failure_queue_name
self.sqs_queue = sqs_queue
self.send_message_called = 0
def method(self, access_key, secret_access_key, name,
sqs_queue=None):
self.test.assertEquals(self._access_key, access_key)
self.test.assertEquals(self._secret_access_key,
secret_access_key)
self.test.assertEquals(self._failure_queue_name, name)
self.test.assertEquals(self.sqs_queue, sqs_queue)
return self
def send_message(self, msg):
self.test.assertEquals(self.msg, msg)
self.send_message_called += 1
msg = {'some_key': 'some_value'}
q = MessageQueue(self, msg, 'some_key', 'some_secret_key',
'failed_messages')
self.mock(queue, 'MessageQueue', q.method)
q.send_failure(msg)
self.assertEquals(1, q.send_message_called)
def test_send_restore(self):
class MessageQueue(queue.MessageQueue):
def __init__(self, test, msg, access_key, secret_access_key,
restore_queue_name, sqs_queue=None):
self.test = test
self.msg = msg
self._access_key = access_key
self._secret_access_key = secret_access_key
self.restore_queue_name = restore_queue_name
self.sqs_queue = sqs_queue
self.send_message_called = 0
def method(self, access_key, secret_access_key, name,
sqs_queue=None):
self.test.assertEquals(self._access_key, access_key)
self.test.assertEquals(self._secret_access_key,
secret_access_key)
self.test.assertEquals(self.restore_queue_name, name)
self.test.assertEquals(self.sqs_queue, sqs_queue)
return self
def send_message(self, msg):
self.test.assertEquals(self.msg, msg)
self.send_message_called += 1
msg = {'meta': {'queue_name': 'some_queue'}}
q = MessageQueue(self, msg, 'some_key', 'some_secret_key',
msg['meta']['queue_name'])
self.mock(queue, 'MessageQueue', q.method)
q.send_restore(msg)
self.assertEquals(1, q.send_message_called)
def test_send_message_value_error(self):
class MessageQueue(queue.MessageQueue):
def __init__(self):
pass
q = MessageQueue()
self.assertRaises(ValueError, q.send_message, list())
def test_send_message(self):
def uuid4():
return 'some_uuid4_key'
def timestamp():
return 'some_timestamp_value'
class SQSMessage(object):
def __init__(self, test, meta, body):
self.test = test
self.meta = meta
self.body = body
def method(self):
return self
def set_body(self, content):
msg = json.loads(content)
meta = dict([[str(x), str(y)] for x, y in
self.meta.iteritems()])
self.test.assertEquals(self.meta, meta)
body = dict([[str(x), str(y)] for x, y in
self.body.iteritems()])
self.test.assertEquals(self.body, body)
self.test.assertEquals(2, len(msg.keys()))
class SQSQueue(object):
def __init__(self, test, sqs_message):
self.test = test
self.sqs_message = sqs_message
def write(self, sqs_message):
self.test.assertEquals(self.sqs_message, sqs_message)
return True
class MessageQueue(queue.MessageQueue):
def __init__(self, test, name, sqs_message):
self.name = name
self._sqs_queue = SQSQueue(test, sqs_message)
queue_name = 'some_queue_name'
# no body, meta
msg = {'some_key': 'some_value'}
meta = {'message_id': uuid4(), 'timestamp': timestamp(),
'queue_name': queue_name}
sqs_message = SQSMessage(self, meta, msg)
self.mock(queue, 'SQSMessage', sqs_message.method)
self.mock(queue, 'uuid4', uuid4)
self.mock(queue, 'timestamp', timestamp)
q = MessageQueue(self, 'some_queue', sqs_message)
q.send_message(msg)
self.mock.restore()
# body and meta
msg = {'meta': {'some_meta_key': 'some_value'},
'body': {'some_body_key': 'some_value'}}
sqs_message = SQSMessage(self, msg['meta'], msg['body'])
self.mock(queue, 'SQSMessage', sqs_message.method)
self.mock(queue, 'uuid4', uuid4)
self.mock(queue, 'timestamp', timestamp)
q = MessageQueue(self, 'some_queue', sqs_message)
q.send_message(msg)
def test_send_message_write_failure(self):
class SQSMessage(object):
def set_body(self, content):
pass
class SQSQueue(object):
def write(self, message):
return False
class MessageQueue(queue.MessageQueue):
def __init__(self):
self.name = 'some_name'
self._sqs_queue = SQSQueue()
self.mock(queue, 'SQSMessage', SQSMessage)
q = MessageQueue()
self.assertRaises(RuntimeError, q.send_message, {})
class TestMessage(util.MockerTestCase):
def test_init(self):
q, sqs_message = 'queue', 'sqs_message'
m = queue.Message(q, sqs_message)
self.assertEquals(q, m._queue)
self.assertEquals(sqs_message, m._sqs_message)
self.assertEquals(None, m._msg)
self.assertEquals(10, m._max_failure_count)
def test_delete(self):
class MessageQueue(object):
def __init__(self, test, sqs_message):
self.test = test
self.sqs_message = sqs_message
self.delete_called = 0
def delete_message(self, sqs_message):
self.delete_called += 1
self.test.assertEquals(self.sqs_message, sqs_message)
class Message(queue.Message):
def __init__(self, test):
self._sqs_message = SQSMessage()
self._queue = MessageQueue(test, self._sqs_message)
m = Message(self)
m.delete()
self.assertEquals(1, m._queue.delete_called)
def test_body(self):
class Message(queue.Message):
def __init__(self, msg):
self._msg = msg
@property
def msg(self):
return self._msg
msg = {'body': {'some_key': 'some_value'}}
m = Message(msg)
self.assertEquals(msg['body'], m.body)
def test_get_exception(self):
class Message(queue.Message):
def __init__(self):
pass
m = Message()
try:
raise Exception()
except Exception:
ret = m._get_exception()
self.assertEquals('Exception', ret[0])
self.assertEquals((), ret[1])
self.assertEquals(list, type(ret[2]))
self.assert_(len(ret[2]) > 0)
for line in ret[2]:
self.assertEquals(str, type(line))
e_arg = 'some text'
try:
raise Exception(e_arg)
except:
ret = m._get_exception()
self.assertEquals('Exception', ret[0])
self.assertEquals((e_arg,), ret[1])
self.assertEquals(list, type(ret[2]))
self.assert_(len(ret[2]) > 0)
for line in ret[2]:
self.assertEquals(str, type(line))
def test_handle_exception(self):
class MessageQueue(object):
def __init__(self, test, msg):
self.test = test
self.msg = msg
self.send_message_called = 0
self.send_failure_called = 0
def send_message(self, msg):
self.send_message_called += 1
self.test.assertEquals(self.msg, msg)
def send_failure(self, msg):
self.send_failure_called += 1
self.test.assertEquals(self.msg, msg)
class Message(queue.Message):
def __init__(self, queue, msg, exc):
self._msg = msg
self._queue = queue
self.exc = exc
self._max_failure_count = 10
self.delete_called = 0
self.get_exception_called = 0
def delete(self):
self.delete_called += 1
def _get_exception(self):
self.get_exception_called += 1
return self.exc
# message retry, send_message called
msg = {'meta': {}, 'body': {'some_key': 'some_value'}}
q = MessageQueue(self, msg)
exc = "some_exception"
m = Message(q, msg, exc)
m.handle_exception()
self.assertEquals(1, q.send_message_called)
self.assertEquals(0, q.send_failure_called)
self.assertEquals(1, msg['meta']['failure_count'])
self.assertEquals(1, len(msg['meta']['exceptions']))
# message over max failures, send_failure called
msg = {
'meta': {'exceptions': ['some_exception'] * 10,
'failure_count': 10},
'body': {'some_key': 'some_value'}}
q = MessageQueue(self, msg)
exc = "some_exception"
m = Message(q, msg, exc)
m.handle_exception()
self.assertEquals(0, q.send_message_called)
self.assertEquals(1, q.send_failure_called)
self.assertEquals(11, msg['meta']['failure_count'])
self.assertEquals(11, len(msg['meta']['exceptions']))
def test_meta(self):
class Message(queue.Message):
def __init__(self, msg):
self._msg = msg
@property
def msg(self):
return self._msg
msg = {'meta': {'some_key': 'some_value'}}
m = Message(msg)
self.assertEquals(msg['meta'], m.meta)
def test_matches(self):
class Message(queue.Message):
def __init__(self, meta, body):
self._msg = {'meta': meta, 'body': body}
# no match keys
m = Message({}, {})
self.assertEquals(True, m.matches({}, {}))
# a meta match key
m = Message({'some_key': 'some_value'}, {})
self.assertEquals(True, m.matches({'some_key': 'some_value'}, {}))
self.assertEquals(False, m.matches({'some_key': 'some_other_value'},
{}))
# a body match key
m = Message({}, {'some_key': 'some_value'})
self.assertEquals(True, m.matches({}, {'some_key': 'some_value'}))
self.assertEquals(False, m.matches({},
{'some_key': 'some_other_value'}))
# meta key that doesn't exist in message
m = Message({}, {})
self.assertEquals(False, m.matches({'some_key': 'some_value'}, {}))
# body key that doesn't exist in message
m = Message({}, {})
self.assertEquals(False, m.matches({}, {'some_key': 'some_value'}))
# timestamp range [ . . . .
m = Message({'timestamp': '2010-06-12 12:25:10'}, {})
meta = {'begin_timestamp': '2010-06-12 12:25:10'}
self.assertEquals(True, m.matches(meta, {}))
meta = {'begin_timestamp': '2009-06-12 12:25:10'}
self.assertEquals(True, m.matches(meta, {}))
meta = {'begin_timestamp': '2011-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
# timestamp range . . . . ]
m = Message({'timestamp': '2010-06-12 12:25:10'}, {})
meta = {'end_timestamp': '2010-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
meta = {'end_timestamp': '2009-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
meta = {'end_timestamp': '2011-06-12 12:25:10'}
self.assertEquals(True, m.matches(meta, {}))
# timestamp range [ . . . . ]
m = Message({'timestamp': '2010-06-12 12:25:10'}, {})
meta = {'begin_timestamp': '2010-06-12 12:25:10',
'end_timestamp': '2011-06-12 12:25:10'}
self.assertEquals(True, m.matches(meta, {}))
meta = {'begin_timestamp': '2009-06-12 12:25:10',
'end_timestamp': '2011-06-12 12:25:10'}
self.assertEquals(True, m.matches(meta, {}))
meta = {'begin_timestamp': '2009-06-12 12:25:10',
'end_timestamp': '2010-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
meta = {'begin_timestamp': '2008-06-12 12:25:10',
'end_timestamp': '2009-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
meta = {'begin_timestamp': '2011-06-12 12:25:10',
'end_timestamp': '2012-06-12 12:25:10'}
self.assertEquals(False, m.matches(meta, {}))
def test_msg(self):
class SQSMessage(object):
def __init__(self, return_value):
self.return_value = return_value
self.get_body_called = 0
def get_body(self):
self.get_body_called += 1
return self.return_value
class Message(queue.Message):
def __init__(self, msg, sqs_message):
self._msg = msg
self._sqs_message = sqs_message
class LoadS(object):
def __init__(self, test, arg, return_value):
self.test = test
self.arg = arg
self.return_value = return_value
self.called = 0
def method(self, arg):
self.called += 1
self.test.assertEquals(self.arg, arg)
return self.return_value
arg = 'some_arg'
return_value = 'some_return_value'
loads = LoadS(self, arg, return_value)
sqs_message = SQSMessage(arg)
m = Message(None, sqs_message)
self.mock(queue, 'loads', loads.method)
# verify msg comes from the right place
self.assertEquals(return_value, m.msg)
self.assertEquals(1, loads.called)
self.assertEquals(1, sqs_message.get_body_called)
# verify msg is cached
self.assertEquals(return_value, m.msg)
self.assertEquals(1, loads.called)
self.assertEquals(1, sqs_message.get_body_called)
def test_restore(self):
class MessageQueue(object):
def __init__(self, test, msg):
self.test = test
self.msg = msg
self.send_restore_called = 0
def send_restore(self, msg):
self.send_restore_called += 1
self.test.assertEquals(self.msg, msg)
class Message(queue.Message):
def __init__(self, queue, msg):
self._queue = queue
self._msg = msg
self.delete_called = 0
def delete(self):
self.delete_called += 1
msg = {
'body': {'some_key': 'some_value'},
'meta': {
'message_id': 'some_id',
'queue_name': 'some_queue',
'timestamp': 'some_timestamp',
}
}
exp_msg = {'meta': {}, 'body': msg['body']}
for key in "message_id queue_name timestamp".split():
exp_msg['meta'][key] = msg['meta'][key]
q = MessageQueue(self, exp_msg)
m = Message(q, msg)
m.restore()
self.assertEquals(1, q.send_restore_called)
self.assertEquals(1, m.delete_called)
| greglange/qork | test_qork/unit/test_queue.py | Python | apache-2.0 | 30,141 |
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs33_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_curr', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='19990131',
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr=None,
correction_delete_indicatr='c')
det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='',
correction_delete_indicatr=None)
# Ignore correction delete indicator of D
det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='1234',
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 0
def test_failure(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='19990132',
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='19991331',
correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='1234',
correction_delete_indicatr='c')
det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_curr='200912',
correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 4
| fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs33_detached_award_financial_assistance_1.py | Python | cc0-1.0 | 2,532 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_alias
short_description: Configure alias command in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and alias category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_alias:
description:
- Configure alias command.
default: null
type: dict
suboptions:
command:
description:
- Command list to execute.
type: str
name:
description:
- Alias command name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure alias command.
fortios_system_alias:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_alias:
command: "<your_own_value>"
name: "default_name_4"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_alias_data(json):
option_list = ['command', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_alias(data, fos):
vdom = data['vdom']
state = data['state']
system_alias_data = data['system_alias']
filtered_data = underscore_to_hyphen(filter_system_alias_data(system_alias_data))
if state == "present":
return fos.set('system',
'alias',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'alias',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_alias']:
resp = system_alias(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_alias": {
"required": False, "type": "dict", "default": None,
"options": {
"command": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| kustodian/ansible | lib/ansible/modules/network/fortios/fortios_system_alias.py | Python | gpl-3.0 | 9,082 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# aviau, alexandre.viau@savoirfairelinux.com
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import socket
from alignak.objects.satellitelink import SatelliteLink, SatelliteLinks
from alignak.property import IntegerProp, StringProp
from alignak.http_client import HTTPExceptions
from alignak.log import logger
""" TODO: Add some comment about this class for the doc"""
class ArbiterLink(SatelliteLink):
id = 0
my_type = 'arbiter'
properties = SatelliteLink.properties.copy()
properties.update({
'arbiter_name': StringProp(),
'host_name': StringProp(default=socket.gethostname()),
'port': IntegerProp(default=7770),
})
def get_name(self):
return self.arbiter_name
def get_config(self):
return self.con.get('get_config')
# Look for ourself as an arbiter. If we search for a specific arbiter name, go forit
# If not look be our fqdn name, or if not, our hostname
def is_me(self, lookup_name):
logger.info("And arbiter is launched with the hostname:%s "
"from an arbiter point of view of addr:%s", self.host_name, socket.getfqdn())
if lookup_name:
return lookup_name == self.get_name()
else:
return self.host_name == socket.getfqdn() or self.host_name == socket.gethostname()
def give_satellite_cfg(self):
return {'port': self.port, 'address': self.address, 'name': self.arbiter_name,
'use_ssl': self.use_ssl, 'hard_ssl_name_check': self.hard_ssl_name_check}
def do_not_run(self):
if self.con is None:
self.create_connection()
try:
self.con.get('do_not_run')
return True
except HTTPExceptions, exp:
self.con = None
return False
def get_satellite_list(self, daemon_type):
if self.con is None:
self.create_connection()
try:
r = self.con.get_satellite_list(daemon_type)
return r
except HTTPExceptions, exp:
self.con = None
return []
def get_satellite_status(self, daemon_type, name):
if self.con is None:
self.create_connection()
try:
r = self.con.get_satellite_status(daemon_type, name)
return r
except HTTPExceptions, exp:
self.con = None
return {}
def get_all_states(self):
if self.con is None:
self.create_connection()
try:
r = self.con.get('get_all_states')
return r
except HTTPExceptions, exp:
self.con = None
return None
def get_objects_properties(self, table, properties=[]):
if self.con is None:
self.create_connection()
try:
print properties
r = self.con.get('get_objects_properties', {'table': table, 'properties': properties})
return r
except HTTPExceptions, exp:
self.con = None
return None
class ArbiterLinks(SatelliteLinks):
name_property = "arbiter_name"
inner_class = ArbiterLink
# We must have a realm property, so we find our realm
def linkify(self, modules):
self.linkify_s_by_plug(modules)
| ddurieux/alignak | alignak/objects/arbiterlink.py | Python | agpl-3.0 | 4,948 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0002_donation_donor_not_null'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='stripe_charge_id',
field=models.CharField(max_length=100, unique=True),
),
]
| xavierdutreilh/djangoproject.com | fundraising/migrations/0003_payment_stripe_charge_id_unique.py | Python | bsd-3-clause | 436 |
class model5(object):
def __init__(self):
self.__chain__ = None
def __getattribute__(self,x):
if x.startswith('__') or x in ('chain',):
return object.__getattribute__(self,x)
if self.__chain__:
v = self.__dict__[x] if x in self.__dict__ else getattr(self.__chain__,x)
else:
v = self.__dict__[x]
return v() if callable(v) else v
def __setattr__(self,x,y):
object.__setattr__(self,x,y)
if self.__chain__:
object.__setattr__(self.__chain__,x,y)
def chain(self,other):
self.__chain__=other
return self
if __name__=="__main__":
x = model5()
x.a = lambda: 20
x.b = lambda: x.a+1
print(x.a)
print(x.b)
y = model5()
y.c = lambda: y.b*y.d
y.chain(x)
y.d = 2
y.a = 0
print(y.c)
| mobarski/sandbox | space/old/model5.py | Python | mit | 714 |
import re
import os
import sys
import glob
import stat
import time
import errno
#start:
import subprocess
import getpass
import pwd
import tempfile
from . import manager
#config:
from .shared import find_config, open_resource
#attach:
from . import user_client
#send/stop/kill
import json
import socket
#jar-list/jar-get
from . import servers
from twisted.internet import reactor
usage_text = "usage: mark2 command [options] [...]"
help_text = """
mark2 is a minecraft server wrapper
{usage}
commands:
{commands}
examples:
mark2 start /home/you/mcservers/pvp
mark2 attach
mark2 send say hello!
mark2 stop
"""
help_sub_text = """
mark2 {subcommand}: {doc}
usage: mark2 {subcommand} {value_spec}
"""
class Mark2Error(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return "error: %s" % self.error
class Mark2ParseError(Mark2Error):
def __str__(self):
return "%s\nparse error: %s" % (usage_text, self.error)
class Command(object):
name = ""
value_spec = ""
options_spec = tuple()
def __init__(self):
pass
def do_start(self):
pass
def do_end(self):
pass
@classmethod
def get_bases(cls):
o = []
while True:
cls = cls.__bases__[0]
if cls is object:
break
o.append(cls)
return o
@classmethod
def get_options_spec(cls):
return sum([list(b.options_spec) for b in [cls] + cls.get_bases()[::-1]], [])
def parse_options(self, c_args):
options = {}
options_tys = {}
#transform
for opt in self.__class__.get_options_spec():
for flag in opt[1]:
options_tys[flag] = opt
while len(c_args) > 0:
head = c_args[0]
if head[0] != '-':
break
elif head == '--':
c_args.pop(0)
break
elif head in options_tys:
opt = options_tys[c_args.pop(0)]
try:
options[opt[0]] = c_args.pop(0) if opt[2] != '' else True
except IndexError:
raise Mark2ParseError("option `%s` missing argument" % opt[0])
else:
raise Mark2Error("%s: unknown option %s" % (self.name, head))
self.options = options
self.value = ' '.join(c_args) if len(c_args) else None
def start(self):
bases = self.__class__.get_bases()
for b in bases[::-1]:
b.do_start(self)
self.run()
for b in bases:
b.do_end(self)
def run(self):
raise NotImplementedError
class CommandTyStateful(Command):
options_spec = (('base', ('-b', '--base'), 'PATH', 'the directory to put mark2-related temp files (default: /tmp/mark2)'),)
def do_start(self):
self.shared_path = self.options.get('base', '/tmp/mark2')
self.make_writable(self.shared_path)
#get servers
o = []
for path in glob.glob(self.shared('pid', '*')):
with open(path) as fp:
pid = int(fp.read())
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
os.remove(path)
continue
f = os.path.basename(path)
f = os.path.splitext(f)[0]
o.append(f)
self.servers = sorted(o)
def shared(self, ty, name=None):
if name is None:
name = self.server_name
return os.path.join(self.shared_path, "%s.%s" % (name, ty))
def make_writable(self, directory):
need_modes = stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH | stat.S_IRWXG | stat.S_IRWXO
good_modes = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
if not os.path.exists(directory):
os.makedirs(directory, good_modes)
st = os.stat(directory)
if (st.st_mode & need_modes) == need_modes:
return True
try:
os.chmod(directory, good_modes)
return True
except Exception:
raise Mark2Error('%s does not have the necessary modes to run mark2 and I do not have permission to change them!' % directory)
class CommandTySelective(CommandTyStateful):
options_spec = (('name', ('-n', '--name'), 'NAME', 'create or select a server with this name'),)
name_should_exist = True
server_name = None
def do_start(self):
name = self.options.get('name', None)
if self.name_should_exist:
if name is None:
if len(self.servers) > 0:
name = self.servers[0]
else:
raise Mark2Error("no servers running!")
elif name not in self.servers:
raise Mark2Error("server not running: %s" % name)
else:
if name is None:
pass #CommandStart will fill it.
elif name in self.servers:
raise Mark2Error("server already running: %s" % name)
self.server_name = name
def do_send(self, data):
d = {
'type': 'input',
'user': '@external',
'line': data
}
d = json.dumps(d) + "\n"
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.shared('sock'))
s.send(d)
s.close()
class CommandTyTerminal(CommandTySelective):
options_spec = (
('wait', ('-w', '--wait'), 'REGEX', 'wait for this line of output to appear on console before returning.'),
('only', ('-o', '--only'), '', 'print the matched line and no others'),
('immediate', ('-i', '--immediate'), '', 'don\'t wait for any output'))
wait = None
wait_from_start = False
only = False
def do_end(self):
if 'wait' in self.options:
self.wait = self.options['wait']
if 'only' in self.options:
self.only = True
if 'immediate' in self.options:
self.wait = None
try:
self.do_wait()
except KeyboardInterrupt:
pass
def do_wait(self):
if self.wait is None:
return
while not os.path.exists(self.shared('log')):
time.sleep(0.1)
with open(self.shared('log'), 'r') as f:
if not self.wait_from_start:
f.seek(0,2)
while True:
line = f.readline().rstrip()
if not line:
time.sleep(0.1)
continue
if line[0] in (" ", "\t"):
print line
continue
line = line.split(" ", 3)
if line[2] == '[mark2]':
line2 = line[3].split(" ", 2)
if re.search(self.wait, line2[2]):
print line[3]
return
elif not self.only:
print line[3]
class CommandHelp(Command):
"""display help and available options"""
name = 'help'
value_spec = "[COMMAND]"
def run(self):
if self.value is None:
print help_text.format(
usage=usage_text,
commands=self.columns([(c.name, c.value_spec, c.__doc__) for c in commands]))
elif self.value in commands_d:
cls = commands_d[self.value]
print help_sub_text.format(
subcommand = self.value,
doc = cls.__doc__,
value_spec = cls.value_spec
)
opts = cls.get_options_spec()
if len(opts) > 0:
print "options:"
print self.columns([(' '.join(o[1]), o[2], o[3]) for o in opts]) + "\n"
else:
raise Mark2Error("Unknown command: %s" % self.value)
def columns(self, data):
o = []
for tokens in data:
line = ""
for i, token in enumerate(tokens):
line += token
line += " "*(((i+1)*12)-len(line))
o.append(line)
return "\n".join((" "+l for l in o))
class CommandStart(CommandTyTerminal):
"""start a server"""
name = 'start'
value_spec='[PATH]'
name_should_exist = False
def get_server_path(self):
self.jar_file = None
self.server_path = os.path.realpath("" if self.value is None else self.value)
if os.path.isdir(self.server_path):
pass
elif os.path.isfile(self.server_path):
if self.server_path.endswith('.jar'):
self.server_path, self.jar_file = os.path.split(self.server_path)
else:
raise Mark2Error("unknown file type: " + self.server_path)
else:
raise Mark2Error("path does not exist: " + self.server_path)
def check_config(self):
new_cfg = find_config('mark2.properties', ignore_errors=True)
if os.path.exists(new_cfg):
return
if os.path.exists(os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'config'))):
new_dir = os.path.dirname(new_cfg)
raise Mark2Error("mark2's configuration location has changed! move your config files to {0}".format(new_dir))
else:
raise Mark2Error("mark2 is unconfigured! run `mark2 config` or `mkdir /etc/mark2 && touch /etc/mark2/mark2.properties` as root")
def check_ownership(self):
d_user = pwd.getpwuid(os.stat(self.server_path).st_uid).pw_name
m_user = getpass.getuser()
if d_user != m_user:
e = "server directory is owned by '{d_user}', but mark2 is running as '{m_user}'. " + \
"please start mark2 as `sudo -u {d_user} mark2 start ...`"
raise Mark2Error(e.format(d_user=d_user,m_user=m_user))
def daemonize(self):
if os.fork() > 0:
return 1
os.chdir(".")
os.setsid()
os.umask(0)
if os.fork() > 0:
sys.exit(0)
null = os.open('/dev/null', os.O_RDWR)
for fileno in (1, 2, 3):
try:
os.dup2(null, fileno)
except:
pass
return 0
def run(self):
# parse the server path
self.get_server_path()
# get server name
if self.server_name is None:
self.server_name = os.path.basename(self.server_path)
if self.server_name in self.servers:
raise Mark2Error("server already running: %s" % self.server_name)
# check for mark2.properties
self.check_config()
# check we own the server dir
self.check_ownership()
# clear old stuff
for x in ('log', 'sock', 'pid'):
if os.path.exists(self.shared(x)):
os.remove(self.shared(x))
i = 1
while True:
p = self.shared("log.%d" % i)
if not os.path.exists(p):
break
os.remove(p)
i += 1
if self.daemonize() == 0:
with open(self.shared('pid'), 'w') as f:
f.write("{0}\n".format(os.getpid()))
mgr = manager.Manager(self.shared_path, self.server_name, self.server_path, self.jar_file)
reactor.callWhenRunning(mgr.startup)
reactor.run()
sys.exit(0)
self.wait = '# mark2 started|stopped\.'
self.wait_from_start = True
class CommandConfig(Command):
"""configure mark2"""
options_spec = (('ask', ('-a', '--ask'), '', 'Ask before starting an editor'),)
name = 'config'
def check_executable(self, cmd):
return subprocess.call(
["command", "-v", cmd],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
) == 0
def copy_config(self, src, dest, header=''):
f0 = src
f1 = dest
l0 = ''
while l0.strip() == '' or l0.startswith('### ###'):
l0 = f0.readline()
f1.write(header)
while l0 != '':
f1.write(l0)
l0 = f0.readline()
f0.close()
f1.close()
def diff_config(self, src, dest):
diff = ""
d0 = src.readlines()
d1 = dest.readlines()
import difflib
ignore = " \t\f\r\n"
s = difflib.SequenceMatcher(lambda x: x in ignore, d0, d1)
for tag, i0, i1, j0, j1 in s.get_opcodes():
if tag in ('replace', 'insert'):
for l1 in d1[j0:j1]:
if l1.strip(ignore) != '':
diff += l1
return diff
def run(self):
path_old = 'resources/mark2.default.properties'
path_new = find_config('mark2.properties')
def write_config(data=''):
data = "# see resources/mark2.default.properties for details\n" + data
with open(path_new, 'w') as file_new:
file_new.write(data)
if "MARK2_TEST" not in os.environ and self.options.get('ask', False):
response = raw_input('would you like to configure mark2 now? [yes] ') or 'yes'
if response != 'yes':
return write_config() if not os.path.exists(path_new) else None
editors = ["editor", "nano", "vim", "vi", "emacs"]
if "EDITOR" in os.environ:
editors.insert(0, os.environ["EDITOR"])
for editor in editors:
if self.check_executable(editor):
break
else:
if not os.path.exists(path_new):
write_config()
raise Mark2Error("no editor found. please set the $EDITOR environment variable.")
if os.path.exists(path_new):
subprocess.call([editor, path_new])
else:
#launch our editor
fd_tmp, path_tmp = tempfile.mkstemp(prefix='mark2.properties.', text=True)
with open_resource(path_old) as src:
with open(path_tmp, 'w') as dst:
self.copy_config(src, dst)
subprocess.call([editor, path_tmp])
#diff the files
with open_resource(path_old) as src:
with open(path_tmp, 'r') as dst:
write_config(self.diff_config(src, dst))
os.remove(path_tmp)
class CommandList(CommandTyStateful):
"""list running servers"""
name = 'list'
def run(self):
for s in self.servers:
print s
class CommandAttach(CommandTySelective):
"""attach to a server"""
name = 'attach'
def run(self):
f = user_client.UserClientFactory(self.server_name, self.shared_path)
f.main()
class CommandStop(CommandTyTerminal):
"""stop mark2"""
name = 'stop'
def run(self):
self.do_send('~stop')
self.wait='# mark2 stopped\.'
class CommandKill(CommandTyTerminal):
"""kill mark2"""
name = 'kill'
def run(self):
self.do_send('~kill')
self.wait = '# mark2 stopped\.'
class CommandSend(CommandTyTerminal):
"""send a console command"""
name = 'send'
value_spec='INPUT...'
def run(self):
if self.value is None:
raise Mark2ParseError("nothing to send!")
self.do_send(self.value)
class CommandJarList(Command):
"""list server jars"""
name = 'jar-list'
def run(self):
def err(what):
if reactor.running: reactor.stop()
print "error: %s" % what.value
def handle(listing):
if reactor.running: reactor.stop()
if len(listing) == 0:
print "error: no server jars found!"
else:
print "The following server jars/zips are available:"
print listing
def start():
d = servers.jar_list()
d.addCallbacks(handle, err)
reactor.callWhenRunning(start)
reactor.run()
class CommandJarGet(Command):
"""download a server jar"""
name = 'jar-get'
value_spec = 'NAME'
def run(self):
if self.value is None:
raise Mark2ParseError("missing jar type!")
def err(what):
#reactor.stop()
print "error: %s" % what.value
def handle((filename, data)):
reactor.stop()
if os.path.exists(filename):
print "error: %s already exists!" % filename
else:
f = open(filename, 'wb')
f.write(data)
f.close()
print "success! saved as %s" % filename
def start():
d = servers.jar_get(self.value)
d.addCallbacks(handle, err)
reactor.callWhenRunning(start)
reactor.run()
commands = (CommandHelp, CommandStart, CommandList, CommandAttach, CommandStop, CommandKill, CommandSend, CommandJarList, CommandJarGet, CommandConfig)
commands_d = dict([(c.name, c) for c in commands])
def main():
try:
c_args = sys.argv[1:]
if len(c_args) == 0:
command_name = 'help'
else:
command_name = c_args.pop(0)
command_cls = commands_d.get(command_name, None)
if command_cls is None:
raise Mark2ParseError("unknown command: %s" % command_name)
command = command_cls()
command.parse_options(c_args)
command.start()
return 0
except Mark2Error as e:
print e
return 1
| SupaHam/mark2 | mk2/launcher.py | Python | mit | 17,631 |
# The MIT License (MIT)
# Copyright (c) 2015 kupiakos
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import re
import html
from urllib.parse import urlsplit
import traceback
import requests
import mimeparse
import bs4
import praw
class DrawcrowdPlugin:
"""A tiny import plugin for drawcrowd.com
"""
def __init__(self, useragent: str, **options):
"""Initialize the drawcrowd import API.
:param useragent: The useragent to use for querying tinypic.
:param options: Other options in the configuration. Ignored.
"""
self.log = logging.getLogger('lapis.drawcrowd')
self.headers = {'User-Agent': useragent}
self.regex = re.compile(r'^(.*?\.)?drawcrowd\.com$')
def import_submission(self, submission: praw.objects.Submission) -> dict:
"""Import a submission from drawcrowd. Uses raw HTML scraping.
As it turns out, drawcrowd likes to provide different data
(all in <meta> tags) to non-web-browser requests.
Since it provides enough information anyways, we don't bother getting
around it and just parse that.
This function will define the following values in its return data:
- author: The author of the post
- source: The url of the submission
- importer_display/header
- import_urls
:param submission: A reddit submission to parse.
"""
try:
url = html.unescape(submission.url)
if not self.regex.match(urlsplit(url).netloc):
return None
data = {'source': url}
r = requests.head(url, headers=self.headers)
if r.status_code == 301: # Moved Permanently
return None
mime_text = r.headers.get('Content-Type')
mime = mimeparse.parse_mime_type(mime_text)
if mime[0] == 'image':
data['author'] = 'An unknown drawcrowd user'
image_url = url
else:
# Note: Drawcrowd provides different content to non-web-browsers.
r = requests.get(url, headers=self.headers)
bs = bs4.BeautifulSoup(r.content.decode('utf-8'))
matched = bs.find(property='og:image')
if not matched:
self.log.warning('Could not find locate drawcrowd image to scrape.')
return None
image_url = matched['content']
matched = bs.find(property='og:title')
if matched:
data['author'] = matched['content']
else:
data['author'] = 'an unknown drawcrowd author'
data['importer_display'] = {'header': 'Mirrored image from {}:\n\n'.format(data['author'])}
assert image_url
data['import_urls'] = [image_url]
return data
except Exception:
self.log.error('Could not import drawcrowd URL %s (%s)',
submission.url, traceback.format_exc())
return None
__plugin__ = DrawcrowdPlugin
# END OF LINE.
| kupiakos/LapisMirror | plugins/drawcrowd.py | Python | mit | 4,144 |
import traceback
import sys
from compliance_checker.suite import CheckSuite
from compliance_checker.roms import DefinedROMSBaseCheck
from compliance_checker.generic import DefinedGenericBaseCheck
from compliance_checker.shoc import DefinedSHOCBaseCheck
class ComplianceCheckerCheckSuiteDefined(CheckSuite):
options = []
"""
CheckSuite that defines all the possible Checker classes for the application, must be an extension of DefinedBaseCheck.
"""
checkers = {
'roms' : DefinedROMSBaseCheck,
'shoc' : DefinedSHOCBaseCheck,
'generic' : DefinedGenericBaseCheck
}
def set_optpions(self,t_options):
self.options = t_options
def run(self, ds, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
if None == checker_names or len(checker_names) == 0:
print("No valid checker type provided, falling back to generic")
checker_names = "generic"
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if len(checkers) == 0: # this is brute force to ensure some tests for limits
print("No valid checkers found for tests '%s'" % ", ".join(checker_names))
checkers = ('generic',DefinedGenericBaseCheck),
#normally this would return only one!
for checker_name, checker_class in checkers:
checker = checker_class() # @TODO: combine with load_datapair/setup
dsp = checker.load_datapair(ds)
checker.setup(dsp)
errs = {}
# here where we differ from the super implementation
# instead of finding all 'check_' functions we leave it to the implementing class to call checks based on the options defined
# it must be a class of DefinedBaseCheck
vals = None
try:
checker.set_options(self.options)
vals = checker.check(dsp)
except Exception as e:
errs['check'] = [e, sys.exc_info()[2]]
print(str(e))
ret_val[checker_name] = []
# we always need to return a list 4 elements
# score the results we got back
# if there was ana error vals is not going to be valid ....
if not vals == None:
groups = self.scores(vals)
ret_val[checker_name].append(groups)
else:
ret_val[checker_name].append(list())
try:
ret_val[checker_name].append(checker.limits(dsp))
except Exception as e:
errs['limits'] = [e, sys.exc_info()[2]]
ret_val[checker_name].append(dict())
ret_val[checker_name].append(errs)
ret_val[checker_name].append({"type":checker_name,
"requested":checker_names})
return ret_val
class ComplianceCheckerDefined(object):
@classmethod
def run_checker(cls, ds_loc, checker_names, verbose, criteria, options=None):
"""
Static check runner.
@param ds_loc Dataset location (url or file)
@param checker_names List of string names to run, should match keys of checkers dict (empty list means run all)
@param verbose Verbosity of the output (0, 1, 2)
@param criteria Determines failure (lenient, normal, strict)
@returns If the tests failed (based on the criteria)
"""
retval = True
cs = ComplianceCheckerCheckSuiteDefined()
cs.set_options(options)
ds = cs.load_dataset(ds_loc)
score_groups = cs.run(ds, *checker_names)
if criteria == 'normal':
limit = 2
elif criteria == 'strict':
limit = 1
elif criteria == 'lenient':
limit = 3
#Calls output routine to display results in terminal, including scoring. Goes to verbose function if called by user.
# @TODO cleanup
for checker, rpair in score_groups.iteritems():
groups, errors = rpair
if len(errors):
print("The following exceptions occured during the %s checker (possibly indicate compliance checker issues):" % checker)
for check_name, epair in errors.iteritems():
print("%s.%s: %s" % (checker, check_name, epair[0].message))
if verbose > 0:
traceback.print_tb(epair[1].tb_next.tb_next) # skip first two as they are noise from the running itself @TODO search for check_name
print(' ')
score_list, points, out_of = cs.standard_output(limit, checker, groups)
if not verbose:
cs.non_verbose_output_generation(score_list, groups, limit, points, out_of)
else:
cs.verbose_output_generation(groups, limit, points, out_of)
return cs.passtree(groups, limit)
| webtrike/compliance-checker | compliance_checker/defined.py | Python | apache-2.0 | 5,415 |
from flask import Blueprint, Response
from json import dumps
import urllib
# Import module models
from app.mod_endpoints.models import State
from app.mod_endpoints.models import LGA
# Define the blueprint: 'endpoints', set its url prefix: app.url/api/v1
mod_endpoints = Blueprint('api/v1', __name__, url_prefix='/api/v1')
# Set the route and accepted methods
@mod_endpoints.route('/states', methods=['GET'])
def get_states():
states = State.get_all_states()
return Response(dumps(states), mimetype='application/json')
@mod_endpoints.route('/state/<state_name_or_code>', methods=['GET'])
def get_state(state_name_or_code):
state = State.get_one_state(urllib.unquote(state_name_or_code))
return Response(dumps(state), mimetype='application/json')
@mod_endpoints.route('/state/<state_name_or_code>/lgas', methods=['GET'])
def get_lgas(state_name_or_code):
lgas = LGA.get_all_lgas(urllib.unquote(state_name_or_code))
return Response(dumps(lgas), mimetype='application/json')
@mod_endpoints.route('/state/<state_name_or_code>/cities', methods=['GET'])
def get_cities(state_name_or_code):
cities = LGA.get_all_cities(urllib.unquote(state_name_or_code))
return Response(dumps(cities), mimetype='application/json')
| devcenter-square/states-cities | app/mod_endpoints/controllers.py | Python | mit | 1,245 |
import pytest
import os
import sys
from polyglotdb.io.types.parsing import (SegmentTier, OrthographyTier,
GroupingTier, TextOrthographyTier,
TranscriptionTier,
TextTranscriptionTier, TextMorphemeTier,
MorphemeTier)
from polyglotdb.io.parsers.base import BaseParser
from polyglotdb.io import (inspect_textgrid, inspect_fave, inspect_mfa, inspect_partitur)
from polyglotdb.corpus import CorpusContext
from polyglotdb.structure import Hierarchy
from polyglotdb.config import CorpusConfig
def pytest_addoption(parser):
parser.addoption("--skipacoustics", action="store_true",
help="skip acoustic tests")
def pytest_collection_modifyitems(config, items):
if not config.getoption("--skipacoustics"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="remove --skipacoustics option to run")
for item in items:
if "acoustic" in item.keywords:
item.add_marker(skip_slow)
@pytest.fixture(scope='session')
def test_dir():
base = os.path.dirname(os.path.abspath(__file__))
generated = os.path.join(base, 'data', 'generated')
if not os.path.exists(generated):
os.makedirs(generated)
return os.path.join(base, 'data') # was tests/data
@pytest.fixture(scope='session')
def buckeye_test_dir(test_dir):
return os.path.join(test_dir, 'buckeye')
@pytest.fixture(scope='session')
def results_test_dir(test_dir):
results = os.path.join(test_dir, 'generated', 'results')
os.makedirs(results, exist_ok=True)
return results
@pytest.fixture(scope='session')
def timit_test_dir(test_dir):
return os.path.join(test_dir, 'timit')
@pytest.fixture(scope='session')
def textgrid_test_dir(test_dir):
return os.path.join(test_dir, 'textgrids')
@pytest.fixture(scope='session')
def praatscript_test_dir(test_dir):
return os.path.join(test_dir, 'praat_scripts')
@pytest.fixture(scope='session')
def praatscript_test_dir(test_dir):
return os.path.join(test_dir, 'praat_scripts')
@pytest.fixture(scope='session')
def fave_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'fave')
@pytest.fixture(scope='session')
def mfa_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'mfa')
@pytest.fixture(scope='session')
def maus_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'maus')
@pytest.fixture(scope='session')
def labbcat_test_dir(textgrid_test_dir):
return os.path.join(textgrid_test_dir, 'labbcat')
@pytest.fixture(scope='session')
def partitur_test_dir(test_dir):
return os.path.join(test_dir, 'partitur')
@pytest.fixture(scope='session')
def text_transcription_test_dir(test_dir):
return os.path.join(test_dir, 'text_transcription')
@pytest.fixture(scope='session')
def text_spelling_test_dir(test_dir):
return os.path.join(test_dir, 'text_spelling')
@pytest.fixture(scope='session')
def ilg_test_dir(test_dir):
return os.path.join(test_dir, 'ilg')
@pytest.fixture(scope='session')
def csv_test_dir(test_dir):
return os.path.join(test_dir, 'csv')
@pytest.fixture(scope='session')
def features_test_dir(test_dir):
return os.path.join(test_dir, 'features')
@pytest.fixture(scope='session')
def export_test_dir(test_dir):
path = os.path.join(test_dir, 'export')
if not os.path.exists(path):
os.makedirs(path)
return path
@pytest.fixture(scope='session')
def corpus_data_timed():
levels = [SegmentTier('label', 'phone'),
OrthographyTier('label', 'word'),
GroupingTier('line', 'line')]
phones = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('t', 0.2, 0.3), ('s', 0.3, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('uw', 0.9, 1.0), ('t', 1.0, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
lines = [(0.0, 1.1), (2.0, 2.8), (3.0, 3.6)]
levels[0].add(phones)
levels[1].add(words)
levels[2].add(lines)
hierarchy = Hierarchy({'phone': 'word', 'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_timed')
return data
@pytest.fixture(scope='session')
def subannotation_data():
levels = [SegmentTier('label', 'phone'),
OrthographyTier('label', 'word'),
OrthographyTier('stop_information', 'phone')]
levels[2].subannotation = True
phones = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('t', 0.2, 0.3), ('s', 0.3, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('u', 0.9, 1.0), ('t', 1.0, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
info = [('burst', 0, 0.05), ('vot', 0.05, 0.1), ('closure', 0.2, 0.25),
('burst', 0.25, 0.26), ('vot', 0.26, 0.3), ('closure', 2.2, 2.25),
('burst', 2.25, 2.26), ('vot', 2.26, 2.3),
('voicing_during_closure', 2.2, 2.23), ('voicing_during_closure', 2.24, 2.25)]
levels[0].add(phones)
levels[1].add(words)
levels[2].add(info)
hierarchy = Hierarchy({'phone': 'word', 'word': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_sub')
return data
@pytest.fixture(scope='session')
def corpus_data_onespeaker(corpus_data_timed):
for k in corpus_data_timed.data.keys():
corpus_data_timed.data[k].speaker = 'some_speaker'
return corpus_data_timed
@pytest.fixture(scope='session')
def corpus_data_untimed():
levels = [TextTranscriptionTier('transcription', 'word'),
TextOrthographyTier('spelling', 'word'),
TextMorphemeTier('morpheme', 'word'),
GroupingTier('line', 'line')]
transcriptions = [('k.ae.t-s', 0), ('aa.r', 1), ('k.y.uw.t', 2),
('d.aa.g-z', 3), ('aa.r', 4), ('t.uw', 5),
('ay', 6), ('g.eh.s', 7)]
morphemes = [('cat-PL', 0), ('are', 1), ('cute', 2),
('dog-PL', 3), ('are', 4), ('too', 5),
('i', 6), ('guess', 7)]
words = [('cats', 0), ('are', 1), ('cute', 2),
('dogs', 3), ('are', 4), ('too', 5),
('i', 6), ('guess', 7)]
lines = [(0, 2), (3, 5), (6, 7)]
levels[0].add(transcriptions)
levels[1].add(words)
levels[2].add(morphemes)
levels[3].add(lines)
hierarchy = Hierarchy({'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_untimed')
return data
@pytest.fixture(scope='session')
def corpus_data_ur_sr():
levels = [SegmentTier('sr', 'phone'),
OrthographyTier('word', 'word'),
TranscriptionTier('ur', 'word')]
srs = [('k', 0.0, 0.1), ('ae', 0.1, 0.2), ('s', 0.2, 0.4),
('aa', 0.5, 0.6), ('r', 0.6, 0.7),
('k', 0.8, 0.9), ('u', 0.9, 1.1),
('d', 2.0, 2.1), ('aa', 2.1, 2.2), ('g', 2.2, 2.25),
('ah', 2.25, 2.3), ('z', 2.3, 2.4),
('aa', 2.4, 2.5), ('r', 2.5, 2.6),
('t', 2.6, 2.7), ('uw', 2.7, 2.8),
('ay', 3.0, 3.1),
('g', 3.3, 3.4), ('eh', 3.4, 3.5), ('s', 3.5, 3.6)]
words = [('cats', 0.0, 0.4), ('are', 0.5, 0.7), ('cute', 0.8, 1.1),
('dogs', 2.0, 2.4), ('are', 2.4, 2.6), ('too', 2.6, 2.8),
('i', 3.0, 3.1), ('guess', 3.3, 3.6)]
urs = [('k.ae.t.s', 0.0, 0.4), ('aa.r', 0.5, 0.7), ('k.y.uw.t', 0.8, 1.1),
('d.aa.g.z', 2.0, 2.4), ('aa.r', 2.4, 2.6), ('t.uw', .6, 2.8),
('ay', 3.0, 3.1), ('g.eh.s', 3.3, 3.6)]
levels[0].add(srs)
levels[1].add(words)
levels[2].add(urs)
hierarchy = Hierarchy({'phone': 'word', 'word': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_ursr')
return data
@pytest.fixture(scope='session')
def lexicon_data():
corpus_data = [{'spelling': 'atema', 'transcription': ['ɑ', 't', 'e', 'm', 'ɑ'], 'frequency': 11.0},
{'spelling': 'enuta', 'transcription': ['e', 'n', 'u', 't', 'ɑ'], 'frequency': 11.0},
{'spelling': 'mashomisi', 'transcription': ['m', 'ɑ', 'ʃ', 'o', 'm', 'i', 's', 'i'],
'frequency': 5.0},
{'spelling': 'mata', 'transcription': ['m', 'ɑ', 't', 'ɑ'], 'frequency': 2.0},
{'spelling': 'nata', 'transcription': ['n', 'ɑ', 't', 'ɑ'], 'frequency': 2.0},
{'spelling': 'sasi', 'transcription': ['s', 'ɑ', 's', 'i'], 'frequency': 139.0},
{'spelling': 'shashi', 'transcription': ['ʃ', 'ɑ', 'ʃ', 'i'], 'frequency': 43.0},
{'spelling': 'shisata', 'transcription': ['ʃ', 'i', 's', 'ɑ', 't', 'ɑ'], 'frequency': 3.0},
{'spelling': 'shushoma', 'transcription': ['ʃ', 'u', 'ʃ', 'o', 'm', 'ɑ'], 'frequency': 126.0},
{'spelling': 'ta', 'transcription': ['t', 'ɑ'], 'frequency': 67.0},
{'spelling': 'tatomi', 'transcription': ['t', 'ɑ', 't', 'o', 'm', 'i'], 'frequency': 7.0},
{'spelling': 'tishenishu', 'transcription': ['t', 'i', 'ʃ', 'e', 'n', 'i', 'ʃ', 'u'],
'frequency': 96.0},
{'spelling': 'toni', 'transcription': ['t', 'o', 'n', 'i'], 'frequency': 33.0},
{'spelling': 'tusa', 'transcription': ['t', 'u', 's', 'ɑ'], 'frequency': 32.0},
{'spelling': 'ʃi', 'transcription': ['ʃ', 'i'], 'frequency': 2.0}]
return corpus_data
@pytest.fixture(scope='session')
def corpus_data_syllable_morpheme_srur():
levels = [SegmentTier('sr', 'phone', label=True),
TranscriptionTier('ur', 'word'),
GroupingTier('syllable', 'syllable'),
MorphemeTier('morphemes', 'word'),
OrthographyTier('word', 'word'),
GroupingTier('line', 'line')]
srs = [('b', 0, 0.1), ('aa', 0.1, 0.2), ('k', 0.2, 0.3), ('s', 0.3, 0.4),
('ah', 0.4, 0.5), ('s', 0.5, 0.6),
('er', 0.7, 0.8),
('f', 0.9, 1.0), ('er', 1.0, 1.1),
('p', 1.2, 1.3), ('ae', 1.3, 1.4), ('k', 1.4, 1.5), ('eng', 1.5, 1.6)]
urs = [('b.aa.k.s-ah.z', 0, 0.6), ('aa.r', 0.7, 0.8),
('f.ao.r', 0.9, 1.1), ('p.ae.k-ih.ng', 1.2, 1.6)]
syllables = [(0, 0.3), (0.3, 0.6), (0.7, 0.8), (0.9, 1.1),
(1.2, 1.5), (1.5, 1.6)]
morphemes = [('box-PL', 0, 0.6), ('are', 0.7, 0.8),
('for', 0.9, 1.1), ('pack-PROG', 1.2, 1.6)]
words = [('boxes', 0, 0.6), ('are', 0.7, 0.8),
('for', 0.9, 1.1), ('packing', 1.2, 1.6)]
lines = [(0, 1.6)]
levels[0].add(srs)
levels[1].add(urs)
levels[2].add(syllables)
levels[3].add(morphemes)
levels[4].add(words)
levels[5].add(lines)
hierarchy = Hierarchy({'phone': 'syllable', 'syllable': 'word',
'word': 'line', 'line': None})
parser = BaseParser(levels, hierarchy)
data = parser.parse_discourse('test_syllable_morpheme')
return data
@pytest.fixture(scope='session')
def graph_db():
config = {'graph_http_port': 7474, 'graph_bolt_port': 7687,
'acoustic_http_port': 8086}
config['host'] = 'localhost'
return config
@pytest.fixture(scope='session')
def untimed_config(graph_db, corpus_data_untimed):
config = CorpusConfig('untimed', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_untimed.types('untimed'))
c.initialize_import(corpus_data_untimed.speakers,
corpus_data_untimed.token_headers,
corpus_data_untimed.hierarchy.subannotations)
c.add_discourse(corpus_data_untimed)
c.finalize_import(corpus_data_untimed.speakers,
corpus_data_untimed.token_headers,
corpus_data_untimed.hierarchy)
return config
@pytest.fixture(scope='session')
def timed_config(graph_db, corpus_data_timed):
config = CorpusConfig('timed', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_timed.types('timed'))
c.initialize_import(corpus_data_timed.speakers,
corpus_data_timed.token_headers,
corpus_data_timed.hierarchy.subannotations)
c.add_discourse(corpus_data_timed)
c.finalize_import(corpus_data_timed.speakers,
corpus_data_timed.token_headers,
corpus_data_timed.hierarchy)
return config
@pytest.fixture(scope='session')
def syllable_morpheme_config(graph_db, corpus_data_syllable_morpheme_srur):
config = CorpusConfig('syllable_morpheme', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_syllable_morpheme_srur.types('syllable_morpheme'))
c.initialize_import(corpus_data_syllable_morpheme_srur.speakers,
corpus_data_syllable_morpheme_srur.token_headers,
corpus_data_syllable_morpheme_srur.hierarchy.subannotations)
c.add_discourse(corpus_data_syllable_morpheme_srur)
c.finalize_import(corpus_data_syllable_morpheme_srur.speakers,
corpus_data_syllable_morpheme_srur.token_headers,
corpus_data_syllable_morpheme_srur.hierarchy)
return config
@pytest.fixture(scope='session')
def ursr_config(graph_db, corpus_data_ur_sr):
config = CorpusConfig('ur_sr', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*corpus_data_ur_sr.types('ur_sr'))
c.initialize_import(corpus_data_ur_sr.speakers,
corpus_data_ur_sr.token_headers,
corpus_data_ur_sr.hierarchy.subannotations)
c.add_discourse(corpus_data_ur_sr)
c.finalize_import(corpus_data_ur_sr.speakers, corpus_data_ur_sr.token_headers,
corpus_data_ur_sr.hierarchy)
return config
@pytest.fixture(scope='session')
def subannotation_config(graph_db, subannotation_data):
config = CorpusConfig('subannotations', **graph_db)
with CorpusContext(config) as c:
c.reset()
c.add_types(*subannotation_data.types('subannotations'))
c.initialize_import(subannotation_data.speakers,
subannotation_data.token_headers,
subannotation_data.hierarchy.subannotations)
c.add_discourse(subannotation_data)
c.finalize_import(subannotation_data.speakers, subannotation_data.token_headers,
subannotation_data.hierarchy)
return config
@pytest.fixture(scope='session')
def lexicon_test_data():
data = {'cats': {'POS': 'NNS'}, 'are': {'POS': 'VB'}, 'cute': {'POS': 'JJ'},
'dogs': {'POS': 'NNS'}, 'too': {'POS': 'IN'}, 'i': {'POS': 'PRP'},
'guess': {'POS': 'VB'}}
return data
@pytest.fixture(scope='session')
def acoustic_config(graph_db, textgrid_test_dir):
config = CorpusConfig('acoustic', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def acoustic_syllabics():
return ['ae', 'aa', 'uw', 'ay', 'eh', 'ih', 'aw', 'ey', 'iy',
'uh', 'ah', 'ao', 'er', 'ow']
@pytest.fixture(scope='session')
def acoustic_utt_config(graph_db, textgrid_test_dir, acoustic_syllabics):
config = CorpusConfig('acoustic_utt', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
c.encode_pauses(['sil'])
c.encode_utterances(min_pause_length=0)
c.encode_syllabic_segments(acoustic_syllabics)
c.encode_syllables()
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def overlapped_config(graph_db, textgrid_test_dir, acoustic_syllabics):
config = CorpusConfig('overlapped', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'overlapped_speech')
with CorpusContext(config) as c:
c.reset()
parser = inspect_mfa(acoustic_path)
c.load(parser, acoustic_path)
c.encode_pauses(['sil'])
c.encode_utterances(min_pause_length=0)
c.encode_syllabic_segments(acoustic_syllabics)
c.encode_syllables()
config.pitch_algorithm = 'acousticsim'
config.formant_source = 'acousticsim'
return config
@pytest.fixture(scope='session')
def french_config(graph_db, textgrid_test_dir):
config = CorpusConfig('french', **graph_db)
french_path = os.path.join(textgrid_test_dir, 'FR001_5.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(french_path)
c.load(parser, french_path)
c.encode_pauses(['sil', '<SIL>'])
c.encode_utterances(min_pause_length=.15)
return config
@pytest.fixture(scope='session')
def fave_corpus_config(graph_db, fave_test_dir):
config = CorpusConfig('fave_test_corpus', **graph_db)
with CorpusContext(config) as c:
c.reset()
parser = inspect_fave(fave_test_dir)
c.load(parser, fave_test_dir)
return config
@pytest.fixture(scope='session')
def summarized_config(graph_db, textgrid_test_dir):
config = CorpusConfig('summarized', **graph_db)
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_textgrid(acoustic_path)
c.load(parser, acoustic_path)
return config
@pytest.fixture(scope='session')
def stressed_config(graph_db, textgrid_test_dir):
config = CorpusConfig('stressed', **graph_db)
stressed_path = os.path.join(textgrid_test_dir, 'stressed_corpus.TextGrid')
with CorpusContext(config) as c:
c.reset()
parser = inspect_mfa(stressed_path)
c.load(parser, stressed_path)
return config
@pytest.fixture(scope='session')
def partitur_corpus_config(graph_db, partitur_test_dir):
config = CorpusConfig('partitur', **graph_db)
partitur_path = os.path.join(partitur_test_dir, 'partitur_test.par,2')
with CorpusContext(config) as c:
c.reset()
parser = inspect_partitur(partitur_path)
c.load(parser, partitur_path)
return config
@pytest.fixture(scope='session')
def praat_path():
if sys.platform == 'win32':
return 'praat.exe'
elif os.environ.get('TRAVIS', False):
return os.path.join(os.environ.get('HOME'), 'tools', 'praat')
else:
return 'praat'
@pytest.fixture(scope='session')
def reaper_path():
if os.environ.get('TRAVIS', False):
return os.path.join(os.environ.get('HOME'), 'tools', 'reaper')
else:
return 'reaper'
@pytest.fixture(scope='session')
def vot_classifier_path(test_dir):
return os.path.join(test_dir, 'classifier', 'sotc_classifiers', 'sotc_voiceless.classifier')
@pytest.fixture(scope='session')
def localhost():
return 'http://localhost:8080'
@pytest.fixture(scope='session')
def stress_pattern_file(test_dir):
return os.path.join(test_dir, 'lexicons', 'stress_pattern_lex.txt')
@pytest.fixture(scope='session')
def timed_lexicon_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'timed_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_speaker_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'acoustic_speaker_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_discourse_enrich_file(test_dir):
return os.path.join(test_dir, 'csv', 'acoustic_discourse_enrichment.txt')
@pytest.fixture(scope='session')
def acoustic_inventory_enrich_file(test_dir):
return os.path.join(test_dir, 'features', 'basic.txt') | MontrealCorpusTools/PolyglotDB | tests/conftest.py | Python | mit | 21,216 |
from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, model_or_iterable, databrowse_class=None, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = databrowse_class or DefaultModelDatabrowse
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__class__.__name__)
self.registry[model] = databrowse_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__class__.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
| Shrews/PyGerrit | webapp/django/contrib/databrowse/sites.py | Python | apache-2.0 | 5,648 |
from phievo import __silent__,__verbose__
if __verbose__:
print("Execute palette.py")
from matplotlib import pylab,colors
default_colormap = "gist_rainbow"
def update_default_colormap(colormap):
"""
Update the color map used by the palette modules
Arg:
colormap (str): name of the matplotlib colormap
http://matplotlib.org/examples/color/colormaps_reference.html
"""
global default_colormap
default_colormap = colormap
def HSL_to_RGB(h,s,l):
'''Converts HSL colorspace (Hue/Saturation/Value) to RGB colorspace.
Formula from http://www.easyrgb.com/math.php?MATH=M19#text19
Args:
h (float) : Hue (0...1, but can be above or below
(This is a rotation around the chromatic circle))
s (float) : Saturation (0...1) (0=toward grey, 1=pure color)
l (float) : Lightness (0...1) (0=black 0.5=pure color 1=white)
Return:
(r,g,b) (integers 0...255) : Corresponding RGB values
Examples:
>>> print HSL_to_RGB(0.7,0.7,0.6)
(110, 82, 224)
>>> r,g,b = HSL_to_RGB(0.7,0.7,0.6)
>>> print g
82
'''
def Hue_2_RGB( v1, v2, vH ):
while vH<0.0: vH += 1.0
while vH>1.0: vH -= 1.0
if 6*vH < 1.0 : return v1 + (v2-v1)*6.0*vH
if 2*vH < 1.0 : return v2
if 3*vH < 2.0 : return v1 + (v2-v1)*((2.0/3.0)-vH)*6.0
return v1
if not (0 <= s <=1): raise ValueError("s (saturation) parameter must be between 0 and 1.")
if not (0 <= l <=1): raise ValueError("l (lightness) parameter must be between 0 and 1.")
r,b,g = (l*255,)*3
if s!=0.0:
if l<0.5 : var_2 = l * ( 1.0 + s )
else : var_2 = ( l + s ) - ( s * l )
var_1 = 2.0 * l - var_2
r = 255 * Hue_2_RGB( var_1, var_2, h + ( 1.0 / 3.0 ) )
g = 255 * Hue_2_RGB( var_1, var_2, h )
b = 255 * Hue_2_RGB( var_1, var_2, h - ( 1.0 / 3.0 ) )
return (int(round(r)),int(round(g)),int(round(b)))
def floatrange(start,stop,steps):
'''Computes a range of floating value.
Args:
start (float) : Start value.
end (float) : End value
steps (integer): Number of values
Return:
list: A list of floats with fixed step
Example:
>>> print floatrange(0.25, 1.3, 5)
[0.25, 0.51249999999999996, 0.77500000000000002, 1.0375000000000001, 1.3]
'''
return [start+float(i)*(stop-start)/(float(steps)-1) for i in range(steps)]
def color_generate(n,colormap=None):
"""Returns a palette of colors suited for charting.
Args:
n (int): The number of colors to return
colormap (str): matplotlib colormap name
http://matplotlib.org/examples/color/colormaps_reference.html
Return:
list: A list of colors in HTML notation (eg.['#cce0ff', '#ffcccc', '#ccffe0', '#f5ccff', '#f5ffcc'])
Example:
>>> print color_generate(5)
['#5fcbff','#e5edad','#f0b99b','#c3e5e4','#ffff64']
"""
if type(colormap)==colors.LinearSegmentedColormap:
cm = colormap
else:
if not colormap:
colormap = default_colormap
cm = pylab.get_cmap(default_colormap)
color_l= [colors.rgb2hex(cm(1.*i/n)) for i in range(n)]
return color_l
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [colors.ColorConverter().to_rgb(col) if type(col)!=float else col for col in seq]
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return colors.LinearSegmentedColormap('CustomMap', cdict)
def generate_gradient(values,seq):
"""
Generates a desired list of colors along a gradient from a custom list of colors.
args:
values: list of values that need to ba allocated to a color
seq: sequence of colors in the gradient
"""
cm = make_colormap(seq)
return [colors.rgb2hex(cm((val - min(values))/(max(values) - min(values)))) for val in values]
| phievo/phievo | phievo/AnalysisTools/palette.py | Python | lgpl-3.0 | 4,432 |
from __future__ import division, print_function, absolute_import
import inspect
import warnings
import numpy as np
import numpy.testing as npt
from scipy.lib._version import NumpyVersion
from scipy import stats
NUMPY_BELOW_1_7 = NumpyVersion(np.__version__) < '1.7.0'
def check_normalization(distfn, args, distname):
norm_moment = distfn.moment(0, *args)
npt.assert_allclose(norm_moment, 1.0)
# this is a temporary plug: either ncf or expect is problematic;
# best be marked as a knownfail, but I've no clue how to do it.
if distname == "ncf":
atol, rtol = 1e-5, 0
else:
atol, rtol = 1e-7, 1e-7
normalization_expect = distfn.expect(lambda x: 1, args=args)
npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol,
err_msg=distname, verbose=True)
normalization_cdf = distfn.cdf(distfn.b, *args)
npt.assert_allclose(normalization_cdf, 1.0)
def check_moment(distfn, arg, m, v, msg):
m1 = distfn.moment(1, *arg)
m2 = distfn.moment(2, *arg)
if not np.isinf(m):
npt.assert_almost_equal(m1, m, decimal=10, err_msg=msg +
' - 1st moment')
else: # or np.isnan(m1),
npt.assert_(np.isinf(m1),
msg + ' - 1st moment -infinite, m1=%s' % str(m1))
if not np.isinf(v):
npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10, err_msg=msg +
' - 2ndt moment')
else: # or np.isnan(m2),
npt.assert_(np.isinf(m2),
msg + ' - 2nd moment -infinite, m2=%s' % str(m2))
def check_mean_expect(distfn, arg, m, msg):
if np.isfinite(m):
m1 = distfn.expect(lambda x: x, arg)
npt.assert_almost_equal(m1, m, decimal=5, err_msg=msg +
' - 1st moment (expect)')
def check_var_expect(distfn, arg, m, v, msg):
if np.isfinite(v):
m2 = distfn.expect(lambda x: x*x, arg)
npt.assert_almost_equal(m2, v + m*m, decimal=5, err_msg=msg +
' - 2st moment (expect)')
def check_skew_expect(distfn, arg, m, v, s, msg):
if np.isfinite(s):
m3e = distfn.expect(lambda x: np.power(x-m, 3), arg)
npt.assert_almost_equal(m3e, s * np.power(v, 1.5),
decimal=5, err_msg=msg + ' - skew')
else:
npt.assert_(np.isnan(s))
def check_kurt_expect(distfn, arg, m, v, k, msg):
if np.isfinite(k):
m4e = distfn.expect(lambda x: np.power(x-m, 4), arg)
npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5,
err_msg=msg + ' - kurtosis')
else:
npt.assert_(np.isnan(k))
def check_entropy(distfn, arg, msg):
ent = distfn.entropy(*arg)
npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan')
def check_private_entropy(distfn, args, superclass):
# compare a generic _entropy with the distribution-specific implementation
npt.assert_allclose(distfn._entropy(*args),
superclass._entropy(distfn, *args))
def check_edge_support(distfn, args):
# Make sure the x=self.a and self.b are handled correctly.
x = [distfn.a, distfn.b]
if isinstance(distfn, stats.rv_continuous):
npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0])
npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0])
npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0])
npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf])
if isinstance(distfn, stats.rv_discrete):
x = [distfn.a - 1, distfn.b]
npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x)
npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1])
# out-of-bounds for isf & ppf
npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all())
npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all())
def check_named_args(distfn, x, shape_args, defaults, meths):
## Check calling w/ named arguments.
# check consistency of shapes, numargs and _parse signature
signature = inspect.getargspec(distfn._parse_args)
npt.assert_(signature.varargs is None)
npt.assert_(signature.keywords is None)
npt.assert_(signature.defaults == defaults)
shape_argnames = signature.args[1:-len(defaults)] # self, a, b, loc=0, scale=1
if distfn.shapes:
shapes_ = distfn.shapes.replace(',', ' ').split()
else:
shapes_ = ''
npt.assert_(len(shapes_) == distfn.numargs)
npt.assert_(len(shapes_) == len(shape_argnames))
# check calling w/ named arguments
shape_args = list(shape_args)
vals = [meth(x, *shape_args) for meth in meths]
npt.assert_(np.all(np.isfinite(vals)))
names, a, k = shape_argnames[:], shape_args[:], {}
while names:
k.update({names.pop(): a.pop()})
v = [meth(x, *a, **k) for meth in meths]
npt.assert_array_equal(vals, v)
if 'n' not in k.keys():
# `n` is first parameter of moment(), so can't be used as named arg
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
npt.assert_equal(distfn.moment(1, *a, **k),
distfn.moment(1, *shape_args))
# unknown arguments should not go through:
k.update({'kaboom': 42})
npt.assert_raises(TypeError, distfn.cdf, x, **k)
def check_random_state(distfn, args):
# check the random_state attribute of a distribution *instance*
# This test fiddles with distfn.random_state. This breaks other tests,
# hence need to save it and then restore.
rndm = distfn.random_state
# baseline: this relies on the global state
np.random.seed(1234)
distfn.random_state = None
r0 = distfn.rvs(*args, size=8)
# use an explicit instance-level random_state
distfn.random_state = 1234
r1 = distfn.rvs(*args, size=8)
npt.assert_equal(r0, r1)
distfn.random_state = np.random.RandomState(1234)
r2 = distfn.rvs(*args, size=8)
npt.assert_equal(r0, r2)
# can override the instance-level random_state for an individual .rvs call
distfn.random_state = 2
orig_state = distfn.random_state.get_state()
r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234))
npt.assert_equal(r0, r3)
# ... and that does not alter the instance-level random_state!
npt.assert_equal(distfn.random_state.get_state(), orig_state)
# finally, restore the random_state
distfn.random_state = rndm
| jsilter/scipy | scipy/stats/tests/common_tests.py | Python | bsd-3-clause | 6,482 |
"""
Copyright (c) 2016, Youngmin Park, Bard Ermentrout
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Youngmin Park ympark1988@gmail.com
This script file will be to explore the coupled Lambda-omega system
as discussed in our Monday meeting 5/19/2014
As of 9/18/14, I will use this script to explore and confirm some of our new results
TODO: (9/26/2014) fixed phi_experiment. Fix phi_theory.
"""
import scipy as sp
import numpy as np
import matplotlib.pyplot as mp
import scipy
from scipy import integrate
from langevin_1D import LSolve
#from euler_1D import ESolve
from euler import *
import time
def lambda_omega_uncoupled(u,t,a,q):
"""
u: first and second coordinates
t: time
a: input current
q: isochron "twist"
One lambda-omega system:
\begin{equation}
\dot{x} = x \lambda(r) - \omega(r) y,\\
\dot{y} = y \lambda(r) + \omega(r) x,
\end{equation}
where $\lambda(r) = a - r^2$ and $\omega(r) = 1+qr^2$
"""
r = np.sqrt(u[0]**2 + u[1]**2)
lambdar = r*(1 - r**2)
#omegar = 1 + q*r**2
#omegar = 1 + q*(1-r**2)
omegar = 1 + q*(r**2-1)
return [u[0]*lambdar - omegar*u[1],
u[1]*lambdar + omegar*u[0]]
def lamom_coupled(u,t,a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile=None):
"""
New lambda_omega_coupled function
same function as lambda_omega_coupled, but with q time-varying
y: first through fourth coordinates (in order of x, y, \hat{x}, \hat{y})
t: time
a: input current
q: a function of q0,q1,f,eps,t
alpha: coupling constant
beta: coupling constant
q0,q1,c: parameters for function q
Two coupled lambda omega systems:
\begin{equation}
\left ( \begin{array}{c}
\cdot{x} \\ \cdot{y}
\end{array} \right ) =
\left ( \begin{array}{c}
\cdot{x} \\ \cdot{y}
\end{array} \right ) +
\left ( \begin{array}{cc}
\alpha & -\beta \\ \beta & \alpha
\end{array} \right )
\left ( \begin{array}{c}
\hat{x} - x \\ \hat{y} - y
\end{array} \right )
\end{equation}
where $\hat{x}$ and $\hat{y}$ are the same equation. However,
the vector in the coupling term is as
\begin{equation}
\left ( \begin{array}{c}
x - \hat{x} \\ y - \hat{y}
\end{array} \right )
\end{equation}
"""
# assign readable variables
x=u[0];y=u[1]
xhat=u[2];yhat=u[3]
# uncoupled terms
q = Q(t,eps,q0,q1,f,dt,partype,noisefile)
#q2 = Q(t,eps,q0,q1,f,dt,partype,noisefile)
#q = q0 + q1*np.cos(c*eps*t)
def f(x,y,t,a,q):
return lambda_omega_uncoupled([x,y],t,a,q)[0]
def g(x,y,t,a,q):
return lambda_omega_uncoupled([x,y],t,a,q)[1]
# coupling matrix
cmat = np.array([[alpha, -beta],[beta,alpha]])
# coupling vector
cvec = np.array([xhat-x,yhat-y])
# full equations
#dot = np.transpose(np.array([f(x,y,t,a,q),g(x,y,t,a,q)])) + \
# eps*np.dot(cmat,cvec)
#(xdot,ydot) = (dot[0][0],dot[0][1])
#hatdot = np.transpose(np.array([f(xhat,yhat,t,a,q),g(xhat,yhat,t,a,q)])) + \
# eps*np.dot(cmat,-cvec)
#(xhatdot,yhatdot) = (hatdot[0][0],hatdot[0][1])
(xdot,ydot) = np.array([f(x,y,t,a,q),g(x,y,t,a,q)]) + \
eps*np.dot(cmat,cvec)
(xhatdot,yhatdot) = np.array([f(xhat,yhat,t,a,q),g(xhat,yhat,t,a,q)]) + \
eps*np.dot(cmat,-cvec)
# return right hand side
return np.array([xdot,ydot,xhatdot,yhatdot])
def Q(t,eps,q0,q1,f,dt,partype,noisefile):
"""
the q parameter, now a function of time:
Q(t) = q0+q1 cos(c eps t)
t: time
q0,q1,c: par
eps: coupling strength
"""
if partype=='p':
# periodic
return q0 + q1*np.cos(f*eps*t)
elif partype=='s':
# stochastic
assert(noisefile != None)
N = noisefile[0]
t0 = noisefile[1]
tend = noisefile[2]
idx = int(N*t/(tend-t0))+3
#print 't:',t,'idx:',idx,'N:',N,'t0:',t0,'tend:',tend
#time.sleep(5)
q = q0 + q1*noisefile[idx]
return q
elif partype=='qp':
# quasi-periodic
q = q0+(q1/2.)*(np.cos(eps*f*t)+np.cos(np.sqrt(2)*eps*f*t))
return q
#q = q0+(q1-q0)*np.cos(eps*f*t)
#print 'WARNING: QP UNDEFINED'
#return None
elif partype == 'c':
# constant
return q0 + q1
def Hoddapprox(phi,t,a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile=None):
"""
WLOG radius=1
alpha,beta:params
q: tau-dependent param
phi: phase difference
"""
#q = q0 + q1*np.cos(c*eps*t)
q = Q(t,eps,q0,q1,f,dt,partype,noisefile)
#phi = (1+a*q)*t
#print (alpha-q*beta),np.sin(phi*(1.+a*q))
#return eps*( -2.*np.sqrt(a)*(alpha-q*beta)*np.sin(phi*(1.+a*q)) )
# this one is right
b1 = 0.50178626*q -0.49707682
b2 = -0.00106664*q + 0.00105652
#b1 = 0.09642291*(q)**3 -0.44689531*(q)**2+ 0.84727802*(q) -0.48798273
#b2 = -0.001055*(q-1)
return 4*eps*(b1*np.sin(phi) + b2*np.sin(2*phi))
def Hodd(phi,t,a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile=None):
"""
WLOG radius=1
alpha,beta:params
q: tau-dependent param
phi: phase difference
"""
#q = q0 + q1*np.cos(c*eps*t)
q = Q(t,eps,q0,q1,f,dt,partype,noisefile)
# what is this
#phi = (1+a*q)*t
#print (alpha-q*beta),np.sin(phi*(1.+a*q))
#return eps*( -2.*np.sqrt(a)*(alpha-q*beta)*np.sin(phi*(1.+a*q)) )
# return RHS
return eps*( 2.*(beta*q-1.)*np.sin(phi) )
def OU(z,t,mu):
"""
OU noise RHS
"""
return -z*mu
def main():
alpha=1.;beta=1.;eps=.0025
noisefile=None;filenum=4
a=1.;dt=.05#;q=.1
# use one period for sample plots
TruePeriod = 2*np.pi#/(1+q*a)
q0=1.1;q1=1 # fixed
partype = 'p' # choose s,p,qp,c
# always use XPP's .tab files!!
if partype == 'p':
# periodic
f=1
print 'q0='+str(q0)+' q1='+str(q1)+' f='+str(f)
total = TruePeriod*2000
t = np.linspace(0,total,total/dt)
elif partype == 's':
# stochastic
f=1
print 'q0='+str(q0)+' q1='+str(q1)+' f='+str(f)
noisefile = np.loadtxt("ounormed"+str(filenum)+"_mu1k.tab")
total = noisefile[2]
t = np.linspace(0,total,total/dt)
print "Assuming noise data generated with mu=1000"
print "Data mean for seed="+str(filenum)+": "+str(np.mean(noisefile[3:]))
elif partype == 'qp':
# quasi-periodic
f=1
print 'q0='+str(q0)+' q1='+str(q1)+' f='+str(f)
total = TruePeriod*2000
t = np.linspace(0,total,total/dt)
elif partype == 'c':
f=1
print 'q0='+str(q0)+' q1='+str(q1)+' f='+str(f)
total = TruePeriod*2000
t = np.linspace(0,total,total/dt)
# analytic initial condition
#initc = [np.sqrt(a),0,-2/np.sqrt(2),2/np.sqrt(2)]
initc = [2/np.sqrt(2),2/np.sqrt(2),-2/np.sqrt(2),2/np.sqrt(2)]
# plot sample trajectory of coupled system
# get coupled lamom
#lcsolcoupled = integrate.odeint(lamom_coupled,
# initc,t,args=(a,alpha,beta,eps,q0,q1,c,dt,partype,noisefile))
if partype == 's':
lcsolcoupled = ESolve(lamom_coupled,initc,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
phi1init = np.arctan2(initc[1],initc[0])
phi2init = np.arctan2(initc[3],initc[2])
# compute Hodd
# get theory phase
phi_theory = ESolve(Hodd,
phi2init-phi1init,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
elif partype == 'p' or partype == 'qp' or partype == 'c':
lcsolcoupled = integrate.odeint(lamom_coupled,initc,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
phi1init = np.arctan2(initc[1],initc[0])
phi2init = np.arctan2(initc[3],initc[2])
# compute Hodd
# get theory phase
phi_theory = integrate.odeint(Hodd,
phi2init-phi1init,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
"""
mp.figure()
p1, = mp.plot(lcsolcoupled[:,0],lcsolcoupled[:,1])
p2, = mp.plot(lcsolcoupled[:,2],lcsolcoupled[:,3])
mp.title('Coupled lambda-omega with a='+str(a)+',q='+str(q)+',alpha='+str(alpha)+',beta='+str(beta)+',eps='+str(eps))
mp.xlabel('x or xhat')
mp.ylabel('y or yhat')
mp.legend([p1,p2],['(x,y)','(xhat,yhat)'])
"""
"""
mp.figure()
p1, = mp.plot(t,lcsolcoupled[:,0])
p2, = mp.plot(t,lcsolcoupled[:,2])
mp.title('Coupled lambda-omega with a='+str(a)+',q='+str(q)+',alpha='+str(alpha)+',beta='+str(beta)+',eps='+str(eps))
mp.xlabel('t')
mp.ylabel('x and xhat')
mp.legend([p1,p2],['x','xhat'])
"""
mp.figure()
theta1 = np.arctan2(lcsolcoupled[:,1],lcsolcoupled[:,0])
theta2 = np.arctan2(lcsolcoupled[:,3],lcsolcoupled[:,2])
phi_exp = np.mod(theta2-theta1+np.pi,2*np.pi)-np.pi
phi_theory = np.mod(phi_theory+np.pi,2*np.pi)-np.pi
#corl = scipy.stats.pearsonr(phi_exp,phi_theory)[0]
#mp.plot(t,theta1)
#mp.plot(t,theta2)
#mp.plot(np.linspace(0,t[-1],len(noisefile)),noisefile,color='r')
p1,=mp.plot(t,phi_theory,lw=2)
p2,=mp.plot(t,phi_exp,lw=2)
mp.legend([p1,p2],["phi_theory","phi_experiment"],bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0. )
mp.xlabel("t")
mp.ylabel("phi")
max_y=np.amax([phi_theory,phi_exp])
min_y=np.amin([phi_theory,phi_exp])
max_x=np.amax(t);min_x=np.amin(t)
marginy = (max_y-min_y)/20.
marginx = (max_x-min_x)/20.
x_coord = np.amin(t) + marginx # x-coord of text
y_coord = min_y - marginy # y-coord of text
mp.text(x_coord,y_coord,"phi theory vs experiment. al="+str(alpha)+",be="+str(beta))
mp.text(x_coord,y_coord-.8*marginy,"eps="+str(eps)+",q0="+str(q0)+",q1="+str(q1)+",f="+str(f)+",mu="+str(100))
mp.ylim(y_coord-marginy,max_y+marginy)
mp.xlim(min_x,max_x)
#mp.figure()
#mp.title("phi_theory histogram")
#mp.hist(phi_theory,bins=100,normed=True)
mp.show()
if __name__ == '__main__':
main()
| youngmp/park_and_ermentrout_2016 | lambda_omega.py | Python | bsd-2-clause | 11,342 |
import numpy
class wogan_array(object):
def __init__(self, partition_dimensions, partition_size, dtype):
self.partition_size = partition_size
self.partitions = [[numpy.zeros(partition_size, partition_size) for x in partition_dimensions.x] for y in partition_dimensions.y]
def get_value(self, x, y):
pass
| keepitwiel/wogan | wogan/wogan_array.py | Python | mit | 339 |
from django.contrib import admin
from .models import Category, Post, Tag
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'pub_date',)
prepopulated_fields = {'slug': ('title',)}
filter_horizontal = ('tags',)
class TagAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Tag, TagAdmin)
| richardcornish/richardcornish | richardcornish/blog/admin.py | Python | bsd-3-clause | 524 |
from .pyalarmdotcom import Alarmdotcom
| Xorso/pyalarmdotcom | pyalarmdotcom/__init__.py | Python | bsd-3-clause | 39 |
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# Hyper Parameters
input_size = 784
hidden_size = 256
dni_size = 1024
num_classes = 10
num_epochs = 50
batch_size = 500
learning_rate = 1e-3
use_cuda = torch.cuda.is_available()
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class DNI(nn.Module):
def __init__(self, input_size, hidden_size):
super(DNI, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.bn1 = nn.BatchNorm1d(hidden_size)
self.act1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, input_size)
def forward(self, x):
out = self.fc1(x)
out = self.bn1(out)
out = self.act1(out)
out = self.fc2(out)
return out
def reset_parameters(self):
super(DNI, self).reset_parameters()
for param in self.fc2.parameters():
param.data.zero_()
dni = DNI(hidden_size, dni_size)
class Net1(nn.Module):
def __init__(self, input_size, hidden_size):
super(Net1, self).__init__()
self.mlp = nn.Sequential(nn.Linear(input_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU())
def forward(self, x):
return self.mlp.forward(x)
net1 = Net1(input_size, hidden_size)
class Net2(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net2, self).__init__()
self.mlp = nn.Sequential()
self.mlp.add_module('fc1', nn.Linear(input_size, hidden_size))
self.mlp.add_module('bn1', nn.BatchNorm1d(hidden_size))
self.mlp.add_module('act1', nn.ReLU())
self.mlp.add_module('fc', nn.Linear(hidden_size, num_classes))
def forward(self, x):
return self.mlp.forward(x)
net2 = Net2(hidden_size, hidden_size, num_classes)
# Loss
xent = nn.CrossEntropyLoss()
mse = nn.MSELoss()
# Optimizers
opt_net1 = torch.optim.Adam(net1.parameters(), lr=learning_rate)
opt_net2 = torch.optim.Adam(net2.parameters(), lr=learning_rate)
opt_dni = torch.optim.Adam(dni.parameters(), lr=learning_rate)
if use_cuda:
net1.cuda()
net2.cuda()
dni.cuda()
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Convert torch tensor to Variable
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
labels = Variable(labels)
# Forward + Backward + Optimize
opt_net1.zero_grad() # zero the gradient buffer
opt_net2.zero_grad() # zero the gradient buffer
opt_dni.zero_grad() # zero the gradient buffer
# Forward, Stage1
h = net1(images)
h1 = Variable(h.data, requires_grad=True)
h2 = Variable(h.data, requires_grad=False)
# Forward, Stage2
outputs = net2(h1)
# Backward
loss = xent(outputs, labels)
loss.backward()
# Synthetic gradient and backward
grad = dni(h2)
h.backward(grad)
# regress
regress_loss = mse(grad, Variable(h1.grad.data))
regress_loss.backward()
# optimize
opt_net1.step()
opt_net2.step()
opt_dni.step()
if (i + 1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
if use_cuda:
images = images.cuda()
labels = labels.cuda()
images = Variable(images.view(-1, 28 * 28))
outputs = net2(net1(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' %
(100 * correct / total))
| DingKe/pytorch_workplace | dni/mlp.py | Python | mit | 4,728 |
from struct import unpack, pack, calcsize
from mobi_languages import LANGUAGES
from lz77 import uncompress
def LOG(*args):
pass
MOBI_HDR_FIELDS = (
("id", 16, "4s"),
("header_len", 20, "I"),
("mobi_type", 24, "I"),
("encoding", 28, "I"),
("UID", 32, "I"),
("generator_version", 36, "I"),
("reserved", 40, "40s"),
("first_nonbook_idx", 80, "I"),
("full_name_offs", 84, "I"),
("full_name_len", 88, "I"),
("locale_highbytes", 92, "H"),
("locale_country", 94, "B"),
("locale_language", 95, "B"),
("input_lang", 96, "I"),
("output_lang", 100, "I"),
("format_version", 104, "I"),
("first_image_idx", 108, "I"),
("huff/cdic_record", 112, "I"),
("huff/cdic_count", 116, "I"),
("datp_record", 120, "I"),
("datp_count", 124, "I"),
("exth_flags", 128, "I"),
("unknowni@132", 132, "32s"),
("unknown@164", 164, "I"),
("drm_offs", 168, "I"),
("drm_count", 172, "I"),
("drm_size", 176, "I"),
("drm_flags", 180, "I"),
("unknown@184", 184, "I"),
("unknown@188", 188, "I"),
("unknown@192", 192, "H"),
("last_image_record", 194, "H"),
("unknown@196", 196, "I"),
("fcis_record", 200, "I"),
("unknown@204", 204, "I"),
("flis_record", 208, "I"),
("unknown@212", 212, "I"),
("extra_data_flags", 242, "H")
)
EXTH_FMT = ">4x2I"
'''4x = "EXTH", I = hlen, I = record count'''
EXTH_RECORD_TYPES = {
1: 'drm server id',
2: 'drm commerce id',
3: 'drm ebookbase book id',
100: 'author', # list
101: 'publisher', # list
102: 'imprint',
103: 'description',
104: 'isbn', # list
105: 'subject', # list
106: 'publication date',
107: 'review',
108: 'contributor', # list
109: 'rights',
110: 'subjectcode', # list
111: 'type',
112: 'source',
113: 'asin',
114: 'version number', # int
115: 'sample', # int (or bool)?
116: 'start reading',
117: 'adult',
118: 'retail price',
119: 'retail price currency',
201: 'cover offset', # int
202: 'thumbnail offset', # int
203: 'has fake cover', # bool?
208: 'watermark',
209: 'tamper proof keys',
401: 'clipping limit', # int
402: 'publisher limit',
404: 'ttsflag',
501: 'cde type',
502: 'last update time',
503: 'updated title'
}
PRC_HDRFMT = '>H2xIHHI' # Compression,unused,Len,Count,Size,Pos
def parse_palmdb(filename):
import palm
db = palm.Database(filename)
return db
class Book:
def __init__(self, fn):
self.filename = fn
# Set some fields to defaults
self.title = fn
self.author = "??"
self.language = "??"
# Rob Addition: Description
self.description = ""
self.is_a_book = False
f = open(fn)
d = f.read(68)
f.close()
encodings = {
1252: 'cp1252',
65001: 'utf-8'
}
supported_types = ('BOOKMOBI', 'TEXtREAd')
self.type = d[60:68]
if self.type not in supported_types:
LOG(1, "Unsupported file type %s" % (self.type))
return None
try:
db = parse_palmdb(fn)
except:
return None
self.is_a_book = True
# now we have a better guess at the title, use it for now
self.title = db.name
self.records = db.records
rec0 = self.records[0].data
#LOG(5,repr(rec0))
if self.type == 'BOOKMOBI':
LOG(3, "This is a MOBI book")
self.mobi = {}
for field, pos, fmt in MOBI_HDR_FIELDS:
end = pos + calcsize(fmt)
if (end > len(rec0) or
("header_len" in self.mobi
and end > self.mobi["header_len"])):
continue
LOG(4, "field: %s, fmt: %s, @ [%d:%d], data: %s" % (
field, fmt, pos, end, repr(rec0[pos:end])))
(self.mobi[field], ) = unpack(">%s" % fmt, rec0[pos:end])
LOG(3, "self.mobi: %s" % repr(self.mobi))
# Get and decode the book name
if self.mobi['locale_language'] in LANGUAGES:
lang = LANGUAGES[self.mobi['locale_language']]
if self.mobi['locale_country'] == 0:
LOG(2, "Book language: %s" % lang[0][1])
self.language = "%s (%s)" % (lang[0][1], lang[0][0])
elif self.mobi['locale_country'] in lang:
country = lang[self.mobi['locale_country']]
LOG(2, "Book language is %s (%s)" % (
lang[0][1], country[1]))
self.language = "%s (%s-%s)" % (
lang[0][1],
lang[0][0],
country[0]
)
pos = self.mobi['full_name_offs']
end = pos + self.mobi['full_name_len']
self.title = rec0[pos:end].decode(encodings[self.mobi['encoding']])
LOG(2, "Book name: %s" % self.title)
if self.mobi['id'] != 'MOBI':
LOG(0, "Mobi header missing!")
return None
if (0x40 & self.mobi['exth_flags']): # check for EXTH
self.exth = parse_exth(rec0, self.mobi['header_len'] + 16)
LOG(3, "EXTH header: %s" % repr(self.exth))
if 'author' in self.exth:
self.author = ' & '.join(self.exth['author'])
else:
self.author = "n/a"
self.rawdata = d
if (('updated title' in self.exth) and
(type(self.exth['updated title']) is str)):
self.title = ' '.join(self.exth['updated title'])
if 'description' in self.exth:
self.description = ' <P> '.join(self.exth['description'])
elif self.type == 'TEXtREAd':
LOG(2, "This is an older MOBI book")
self.rawdata = d
compression, data_len, rec_count, rec_size, pos = unpack(
PRC_HDRFMT, rec0[:calcsize(PRC_HDRFMT)])
LOG(3, "compression %d, data_len %d, rec_count %d, rec_size %d" %
(compression, data_len, rec_count, rec_size))
if compression == 2:
data = uncompress(self.records[1].data)
else:
data = self.records[1].data
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(data)
self.metadata = soup.fetch("dc-metadata")
try:
self.title = soup.fetch("dc:title")[0].getText()
self.author = soup.fetch("dc:creator")[0].getText()
self.language = soup.fetch("dc:language")[0].getText()
except:
self.title, self.author, self.language = ("Unknown", "Unknown",
"en-us")
try:
self.description = soup.fetch("dc:description")[0].getText()
except:
pass
def to_html(self):
last_idx = (
self.mobi['first_image_idx'] if 'mobi' in self.__dict__ else -1)
return ''.join([uncompress(x.data) for x in self.records[1:last_idx]])
def parse_exth(data, pos):
ret = {}
n = 0
if (pos != data.find('EXTH')):
LOG(0, "EXTH header not found where it should be @%d" % pos)
return None
else:
end = pos + calcsize(EXTH_FMT)
(hlen, count) = unpack(EXTH_FMT, data[pos:end])
LOG(4, "pos: %d, EXTH header len: %d, record count: %d" % (
pos, hlen, count))
pos = end
while n < count:
end = pos + calcsize(">2I")
t, l = unpack(">2I", data[pos:end])
v = data[end:pos + l]
if l - 8 == 4:
v = unpack(">I", v)[0]
if t in EXTH_RECORD_TYPES:
rec = EXTH_RECORD_TYPES[t]
LOG(4, "EXTH record '%s' @%d+%d: '%s'" % (
rec, pos, l - 8, v))
if rec not in ret:
ret[rec] = [v]
else:
ret[rec].append(v)
else:
LOG(4, "Found an unknown EXTH record type %d @%d+%d: '%s'" %
(t, pos, l - 8, repr(v)))
pos += l
n += 1
return ret
| robwebset/script.ebooks | resources/lib/kiehinen/ebook.py | Python | gpl-2.0 | 8,604 |
import os
import sys
import urllib
import subprocess
def download(file_url,local_filename):
web_file = urllib.urlopen(file_url)
local_file = open(local_filename, 'w')
local_file.write(web_file.read())
web_file.close()
local_file.close()
def get_windows_pip_path():
python_dir = sys.executable
split = python_dir.split("\\")
pip_path = ""
for i in range(0,len(split)-1):
pip_path = "%s/%s" %(pip_path,split[i])
pip_path = "%s/Scripts/pip" %pip_path[1:]
return pip_path
def pip_install_module(module_name):
pip_path = "pip"
DEVNULL = open(os.devnull,'wb')
new_installation = True
try:
subprocess.call(["pip"], stdout=DEVNULL) # verify if pip is already installed
except OSError as e:
if(sys.platform[:3] == "win"):
pip_path = get_windows_pip_path()
try:
subprocess.call([pip_path],stdout=DEVNULL)
new_installation = False
print "[+] Found Windows pip executable at '%s'" %pip_path
except:
pass
if(new_installation):
print "[!] pip is not currently installed."
if(os.path.isfile("get-pip.py") is False):
print "[*] Downloading get-pip.py.."
download("https://bootstrap.pypa.io/get-pip.py","get-pip.py")
else:
print "[+] get-pip-py found in the current directory."
os.system("python get-pip.py")
try:
subprocess.call(["pip"],stdout=DEVNULL)
except:
if(sys.platform[:3] == "win"):
python_dir = sys.executable # "C:\\Python27\\python.exe"
split = python_dir.split("\\")
pip_path = ""
for i in range(0,len(split)-1): # let's avoid python.exe
pip_path = "%s/%s" %(pip_path,split[i])
pip_path = "%s/Scripts/pip" %pip_path[1:]
if(new_installation):
try:
os.remove("get-pip.py")
except:
pass
os.system("%s install --upgrade pip" %pip_path)
print "\n[*] Installing module '%s'" %module_name
os.system("%s install %s" %(pip_path,module_name))
| D35m0nd142/LFISuite | pipper.py | Python | gpl-3.0 | 1,908 |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2015 - 2016 Matteo Alessio Carrara <sw.matteoac@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import virtualbrowser
import logging
import version
logging.basicConfig(level=logging.WARNING)
# Apriamo il browser
browser = virtualbrowser.Browser()
# Aprendo il browser, si apre anche una finestra
window = browser.windows_manager.windows.values()[0]
# Ogni finestra contiene almeno una scheda
tab = window.tabs_manager.tabs.values()[0]
# Scriviamo l'url nella barra degli indirizzi
tab.url = "https://www.python.org/"
# Premiamo invio, e carichiamo la pagina
tab.download_content()
print "Il titolo della pagina nella scheda 1 è", tab.bs_content.title.text
# Ora vogliamo aprire una nuova scheda
tab2 = window.tabs_manager.add_tab("https://google.com")
# Ccarichiamo la pagina
tab2.download_content()
print "Il titolo della pagina nella scheda 2 è", tab2.bs_content.title.text
# Torniamo sulla scheda 1, è sempre aperta
print "Il titolo della pagina nella scheda 1 è", tab.bs_content.title.text
# Ora apriamo una nuova finestra
window2 = browser.windows_manager.add_window()
# Anche in questa, c'è già una scheda aperta
tab3 = window2.tabs_manager.tabs.values()[0]
tab3.url = "https://m.facebook.com"
tab3.download_content()
# Dimostriamo che ci sono sempre tre schede aperte
print "Il titolo della pagina nella scheda 3 è", tab3.bs_content.title.text
print "Il titolo della pagina nella scheda 2 è", tab2.bs_content.title.text
print "Il titolo della pagina nella scheda 1 è", tab.bs_content.title.text
# Ora usciamo
window2.close()
window.close()
| matteoalessiocarrara/bot-virtualbrowser | src/lib/human/src/test.py | Python | gpl-3.0 | 2,336 |
from dataclasses import dataclass
import pytest
import lcs.strategies.reinforcement_learning as rl
@dataclass
class Classifier:
r: float
ir: float
class TestReinforcementLearning:
@pytest.mark.skip("Need to investigate this...")
@pytest.mark.parametrize("_r0, reward, _r1", [
(0.5, 0, 0.5),
(0.5, 1, 0.55),
(0.5, 10, 1.45),
])
def test_should_perform_bucket_brigade_update(self, _r0, reward, _r1):
# given
prev_cl = Classifier(_r0, None)
cl = Classifier(0.5, None)
# when
rl.bucket_brigade_update(cl, prev_cl, reward)
# then
assert cl.r == 0.5
assert prev_cl.r == _r1
assert cl.ir is None
assert prev_cl.ir is None
def test_should_perform_bucket_brigade_update_when_first_step(self):
# given
prev_cl = None
cl = Classifier(0.5, None)
# when
rl.bucket_brigade_update(cl, prev_cl, 100)
# then
assert cl.r == 0.5
assert prev_cl is None
@pytest.mark.parametrize("_r0, _r1, _ir0, _ir1", [
(0.5, 97.975, 0.0, 50.0)
])
def test_should_update_classifier(self, _r0, _r1, _ir0, _ir1):
# given
cl = Classifier(r=_r0, ir=_ir0)
beta = 0.05
gamma = 0.95
env_reward = 1000
max_match_set_fitness = 1000
# when
rl.update_classifier(cl,
env_reward,
max_match_set_fitness,
beta, gamma)
# then
assert abs(cl.r - _r1) < 0.001
assert abs(cl.ir - _ir1) < 0.001
| khozzy/pyalcs | tests/lcs/strategies/test_reinforcement_learning.py | Python | gpl-3.0 | 1,644 |
# -*- coding: utf-8 -*-
from .symlinknodemixin import SymlinkNodeMixin
from .util import _repr
class SymlinkNode(SymlinkNodeMixin):
def __init__(self, target, parent=None, children=None, **kwargs):
u"""
Tree node which references to another tree node.
Args:
target: Symbolic Link Target. Another tree node, which is refered to.
Keyword Args:
parent: Reference to parent node.
children: Iterable with child nodes.
*: Any other given attribute is just stored as attribute **in** `target`.
The :any:`SymlinkNode` has its own parent and its own child nodes.
All other attribute accesses are just forwarded to the target node.
>>> from anytree import SymlinkNode, Node, RenderTree
>>> root = Node("root")
>>> s1 = Node("sub1", parent=root, bar=17)
>>> l = SymlinkNode(s1, parent=root, baz=18)
>>> l0 = Node("l0", parent=l)
>>> print(RenderTree(root))
Node('/root')
├── Node('/root/sub1', bar=17, baz=18)
└── SymlinkNode(Node('/root/sub1', bar=17, baz=18))
└── Node('/root/sub1/l0')
Any modifications on the target node are also available on the linked node and vice-versa:
>>> s1.foo = 4
>>> s1.foo
4
>>> l.foo
4
>>> l.foo = 9
>>> s1.foo
9
>>> l.foo
9
"""
self.target = target
self.target.__dict__.update(kwargs)
self.parent = parent
if children:
self.children = children
def __repr__(self):
return _repr(self, [repr(self.target)], nameblacklist=("target", ))
| c0fec0de/anytree | anytree/node/symlinknode.py | Python | apache-2.0 | 1,719 |
from glob import glob
from random import randint
from . import behavior
from ..util import Position
class Entity(object):
''' Base class for any game object.
'''
main_class = None
other_classes = []
starting_sprite = None
@staticmethod
def get_sprites(main_class):
sprites = {}
for sprite in glob('static/img/sprites/%s*' % main_class):
_, sprite_type, _, _ = sprite.split('.')
sprites[sprite_type] = '/' + sprite
return sprites
@classmethod
def default(cls, x, y, **properties):
position = Position(int(x), int(y))
return cls(
position,
cls.main_class,
cls.other_classes,
cls.starting_sprite,
**properties
)
def __init__(self, position, main_class, other_classes, starting_sprite, **properties):
self.position = position
self.main_class = main_class
self.other_classes = other_classes
self.starting_sprite = starting_sprite
self.sprites = self.get_sprites(main_class)
self.id = None
self.intentions = []
self._changes = []
self.properties = properties
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.position))
def set_id(self, id_number):
self.id = '%s-%d' % (self.main_class, id_number)
def to_dict(self):
return {
'selector':"#%s" % self.id,
'id': self.id,
'position': self.position,
'main_class': self.main_class,
'other_classes': self.other_classes,
'starting_sprite': self.starting_sprite,
'sprites': self.sprites,
}
@property
def changed(self):
return len(self._changes) > 0
def changes(self):
output = self._changes
self._changes = []
return output
def has_class(self, selected_class):
all_classes = set([self.main_class]).union(self.other_classes)
return selected_class in all_classes
def update(self, world):
if len(self.intentions) > 0:
intent = self.intentions[0]
intent(world)
self.intentions = self.intentions[1:]
def receive(self, message):
print '%s received %s' % (self.id, repr(message))
method = getattr(self, 'receive_%s' % message[u'type'], None)
if method:
method(message)
class Pc(Entity, behavior.CanMove, behavior.CanBite):
main_class = 'monkey'
other_classes = ['facing-left', 'player']
starting_sprite = 'walk'
class Npc(Entity, behavior.CanMove):
main_class = 'large-monkey'
other_classes = ['facing-left']
starting_sprite = 'walk'
def update(self, world):
if not self.intentions:
if randint(0, 100) == 1:
self.intent_move(world.random_position())
else:
super(self.__class__, self).update(world)
| clofresh/spriteful | python/spriteful/entity/core.py | Python | bsd-3-clause | 3,057 |
# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from hyperstream.stream import StreamInstance
from hyperstream.tool import Tool, check_input_stream_count
import json
class Jsonify(Tool):
"""
Converts the value part of the stream instances to json format
"""
def __init__(self):
super(Jsonify, self).__init__()
@check_input_stream_count(1)
def _execute(self, sources, alignment_stream, interval):
for time, data in sources[0].window(interval, force_calculation=True):
yield StreamInstance(time, json.dumps([str(time), data], sort_keys=True))
| IRC-SPHERE/HyperStream | hyperstream/tools/jsonify/2016-10-26_v0.0.1.py | Python | mit | 1,694 |
# ***************************************************************************
# * (c) 2019 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides GUI tools to create polar Array objects."""
## @package gui_polararray
# \ingroup draftguitools
# \brief Provides GUI tools to create polar Array objects.
## \addtogroup draftguitools
# @{
from pivy import coin
from PySide.QtCore import QT_TRANSLATE_NOOP
import FreeCAD as App
import FreeCADGui as Gui
import Draft
import Draft_rc # include resources, icons, ui files
import draftutils.todo as todo
from draftutils.messages import _msg, _log
from draftutils.translate import translate
from draftguitools import gui_base
from drafttaskpanels import task_polararray
# The module is used to prevent complaints from code checkers (flake8)
bool(Draft_rc.__name__)
class PolarArray(gui_base.GuiCommandBase):
"""Gui command for the PolarArray tool."""
def __init__(self):
super(PolarArray, self).__init__()
self.command_name = "Polar array"
self.location = None
self.mouse_event = None
self.view = None
self.callback_move = None
self.callback_click = None
self.ui = None
self.point = App.Vector()
def GetResources(self):
"""Set icon, menu and tooltip."""
d = {'Pixmap': 'Draft_PolarArray',
'MenuText': QT_TRANSLATE_NOOP("Draft", "Polar array"),
'ToolTip': QT_TRANSLATE_NOOP("Draft", "Creates copies of the selected object, and places the copies in a polar pattern\ndefined by a center of rotation and its angle.\n\nThe array can be turned into an orthogonal or a circular array by changing its type.")}
return d
def Activated(self):
"""Execute when the command is called.
We add callbacks that connect the 3D view with
the widgets of the task panel.
"""
_log("GuiCommand: {}".format(self.command_name))
#_msg("{}".format(16*"-"))
#_msg("GuiCommand: {}".format(self.command_name))
self.location = coin.SoLocation2Event.getClassTypeId()
self.mouse_event = coin.SoMouseButtonEvent.getClassTypeId()
self.view = Draft.get3DView()
self.callback_move = \
self.view.addEventCallbackPivy(self.location, self.move)
self.callback_click = \
self.view.addEventCallbackPivy(self.mouse_event, self.click)
self.ui = task_polararray.TaskPanelPolarArray()
# The calling class (this one) is saved in the object
# of the interface, to be able to call a function from within it.
self.ui.source_command = self
# Gui.Control.showDialog(self.ui)
todo.ToDo.delay(Gui.Control.showDialog, self.ui)
def move(self, event_cb):
"""Execute as a callback when the pointer moves in the 3D view.
It should automatically update the coordinates in the widgets
of the task panel.
"""
event = event_cb.getEvent()
mousepos = event.getPosition().getValue()
ctrl = event.wasCtrlDown()
self.point = Gui.Snapper.snap(mousepos, active=ctrl)
if self.ui:
self.ui.display_point(self.point)
def click(self, event_cb=None):
"""Execute as a callback when the pointer clicks on the 3D view.
It should act as if the Enter key was pressed, or the OK button
was pressed in the task panel.
"""
if event_cb:
event = event_cb.getEvent()
if (event.getState() != coin.SoMouseButtonEvent.DOWN
or event.getButton() != coin.SoMouseButtonEvent.BUTTON1):
return
if self.ui and self.point:
# The accept function of the interface
# should call the completed function
# of the calling class (this one).
self.ui.accept()
def completed(self):
"""Execute when the command is terminated.
We should remove the callbacks that were added to the 3D view
and then close the task panel.
"""
self.view.removeEventCallbackPivy(self.location,
self.callback_move)
self.view.removeEventCallbackPivy(self.mouse_event,
self.callback_click)
if Gui.Control.activeDialog():
Gui.Control.closeDialog()
super(PolarArray, self).finish()
Gui.addCommand('Draft_PolarArray', PolarArray())
## @}
| sanguinariojoe/FreeCAD | src/Mod/Draft/draftguitools/gui_polararray.py | Python | lgpl-2.1 | 6,066 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class AccountInvoiceMultipartner(osv.osv):
''' Add more than one reference partner in account invoice
(only in report document, not in journal entry)
'''
_inherit = 'account.invoice'
# on change function:
def onchange_extra_address(self, cr, uid, ids, extra_address, partner_id,
context=None):
''' Set domain in partner_ids list when
'''
res = {}
if extra_address == 'contact' and partner_id:
res['domain'] = {'partner_ids': [('parent_id', '=', partner_id)]}
else:
res['domain'] = {'partner_ids': []}
res['value'] = {'partner_ids': False}
return res
_columns = {
'extra_address': fields.selection([
('none', 'None'),
('contact', 'Contact'),
('partner', 'Partner'), ],
'Extra address', select=True, readonly=False, required=True),
'partner_ids': fields.many2many(
'res.partner', 'invoice_partner_rel', 'invoice_id', 'partner_id',
'Extra partner'),
}
_defaults = {
'extra_address': lambda *a: 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-accounting | account_invoice_multipartner/multipartner.py | Python | agpl-3.0 | 2,506 |
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax3d.utils.shape_validations."""
import einops
import jax.numpy as jnp
import jax3d.projects.nesf as jax3d
from jax3d.projects.nesf.utils import shape_validation
from jax3d.projects.nesf.utils.typing import Array, f32, i32 # pylint: disable=g-multiple-import
import pytest
@pytest.fixture
def shapeguard_scope():
with shape_validation._ShapeTracker.track():
yield
@pytest.mark.usefixtures('shapeguard_scope')
def test_assert_type():
# Scalar int and float are accepted as-is
i32[''].check(12)
f32[''].check(12.)
with pytest.raises(ValueError, match='Dtype do not match'):
i32[''].check(12.)
with pytest.raises(ValueError, match='Dtype do not match'):
f32[''].check(12)
# Number of dimensions should match and new dimensions are registered
f32[''].check(jnp.zeros(()))
f32['b'].check(jnp.zeros((2,)))
f32['b h w'].check(jnp.zeros((2, 3, 4)))
with pytest.raises(ValueError, match='Rank should be the same.'):
f32['b'].check(jnp.zeros((2, 3)))
# Recorded dimension values should be constant
f32['w b h'].check(jnp.zeros((4, 2, 3)))
with pytest.raises(ValueError, match='Expected b=2.'):
f32['b'].check(jnp.zeros((3,)))
with pytest.raises(ValueError, match='Expected w=4.'):
f32['b h w'].check(jnp.zeros((2, 3, 10,)))
with pytest.raises(TypeError, match='Expected .* array'):
f32['b h w'].check(dict())
# TODO(epot): There should be a cleaner API to check shapes values
assert shape_validation._ShapeTracker.current().resolve_spec(
'b h w') == '2 3 4'
@pytest.mark.usefixtures('shapeguard_scope')
def test_assert_type_incomplete():
# No dtype and no shape
Array.check(12)
Array.check(jnp.zeros((1, 2, 3)))
with pytest.raises(TypeError, match='Expected .* array'):
Array.check({})
# Shape but no dtype
Array['h w c'].check(jnp.zeros((1, 2, 3)))
with pytest.raises(ValueError, match='Rank should be the same.'):
Array['h w c'].check(jnp.zeros((1, 2)))
# Dtype but no shape
f32.check(jnp.zeros((1, 2, 3)))
with pytest.raises(ValueError, match='Dtype do not match'):
f32.check(jnp.zeros((1, 2, 3), dtype=jnp.int32))
def test_assert_type_outside_scope():
with pytest.raises(AssertionError, match='Calling .* from outside .* scope'):
f32[''].check(jnp.zeros((3,)))
def test_shape_valid():
@jax3d.assert_typing
def fn(x: f32['b h w c'], y: f32['']) -> f32['b h w']:
return (x + y).mean(axis=-1)
# 2 independent function calls can have different dimensions
fn(jnp.zeros((1, 2, 3, 4)), 4.)
fn(jnp.zeros((5, 6, 1, 2)), 7.)
with pytest.raises(ValueError, match='Rank should be the same.'):
fn(jnp.zeros((5, 6, 1)), 7.)
with pytest.raises(ValueError, match='Dtype do not match'):
fn(jnp.zeros((5, 6, 1, 2)), 7)
def test_shape_valid_args():
@jax3d.assert_typing
def fn(x: f32['b h'], y: f32['b w']) -> None:
del x, y
return
fn(jnp.zeros((1, 3)), jnp.zeros((1, 2)))
with pytest.raises(ValueError, match='Expected b=1.'):
fn(jnp.zeros((1, 3)), jnp.zeros((2, 2))) # Inconsistent batch size
def test_shape_valid_inner():
@jax3d.assert_typing
def fn(x: f32['b l']) -> f32['l b']:
x = einops.rearrange(x, 'b l -> l b')
f32['l b'].check(x)
with pytest.raises(ValueError, match='Expected b=1.'):
f32['b l'].check(x)
return x
assert fn(jnp.zeros((1, 2))).shape == (2, 1)
def test_shape_valid_nested():
@jax3d.assert_typing
def fn(x: f32['h w c'], nest: bool = True) -> i32['']:
if nest:
# Nested call should also trigger error
with pytest.raises(ValueError, match='Rank should be the same.'):
fn(jnp.zeros((5,)), nest=False)
# Nested calls use another scope, so dimensions do not need to match
return 1 + fn(einops.rearrange(x, 'h w c -> c h w'), nest=False)
else:
return 1
assert fn(jnp.zeros((1, 2, 3))) == 2
assert fn(jnp.zeros((1, 2, 3)), nest=False) == 1
def test_shape_valid_bad_return_type():
@jax3d.assert_typing
def fn(x: f32['batch length'], rearrange: str) -> f32['length batch']:
return einops.rearrange(x, rearrange)
assert fn(jnp.zeros((1, 2)), 'b l -> l b').shape == (2, 1)
with pytest.raises(ValueError, match='Rank should be the same.'):
fn(jnp.zeros((1, 2)), 'b l -> (l b)')
def test_shape_valid_args_kwargs():
@jax3d.assert_typing
def fn(*args: f32['b'], **kwargs: f32['']) -> int:
return len(args) + len(kwargs)
assert fn() == 0
assert fn(jnp.zeros((1,)), jnp.zeros((1,)), a=4., b=jnp.array(3.)) == 4
with pytest.raises(ValueError, match='Expected b=1.'):
fn(jnp.zeros((1,)), jnp.zeros((2,)))
with pytest.raises(ValueError, match='Rank should be the same.'):
fn(a=1., b=jnp.zeros((2,)))
| google-research/jax3d | jax3d/projects/nesf/utils/shape_validation_test.py | Python | apache-2.0 | 5,313 |
import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.markers import DiamondCross
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(
title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
glyph = DiamondCross(x="x", y="y", size="sizes", line_color="#386cb0", fill_color=None, line_width=2)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| percyfal/bokeh | tests/glyphs/DiamondCross.py | Python | bsd-3-clause | 905 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_api_gateway
short_description: Manage AWS API Gateway APIs
description:
- Allows for the management of API Gateway APIs
- Normally you should give the api_id since there is no other
stable guaranteed unique identifier for the API. If you do
not give api_id then a new API will be create each time
this is run.
- Beware that there are very hard limits on the rate that
you can call API Gateway's REST API. You may need to patch
your boto. See https://github.com/boto/boto3/issues/876
and discuss with your AWS rep.
- swagger_file and swagger_text are passed directly on to AWS
transparently whilst swagger_dict is an ansible dict which is
converted to JSON before the API definitions are uploaded.
version_added: '2.4'
requirements: [ boto3 ]
options:
api_id:
description:
- The ID of the API you want to manage.
state:
description:
- NOT IMPLEMENTED Create or delete API - currently we always create.
default: present
choices: [ 'present', 'absent' ]
swagger_file:
description:
- JSON or YAML file containing swagger definitions for API.
Exactly one of swagger_file, swagger_text or swagger_dict must
be present.
swagger_text:
description:
- Swagger definitions for API in JSON or YAML as a string direct
from playbook.
swagger_dict:
description:
- Swagger definitions API ansible dictionary which will be
converted to JSON and uploaded.
stage:
description:
- The name of the stage the API should be deployed to.
deploy_desc:
description:
- Description of the deployment - recorded and visible in the
AWS console.
default: Automatic deployment by Ansible.
author:
- 'Michael De La Rue (@mikedlr)'
extends_documentation_fragment:
- aws
notes:
- A future version of this module will probably use tags or another
ID so that an API can be create only once.
- As an early work around an intermediate version will probably do
the same using a tag embedded in the API name.
'''
EXAMPLES = '''
# Update API resources for development
tasks:
- name: update API
aws_api_gateway:
api_id: 'abc123321cba'
state: present
swagger_file: my_api.yml
# update definitions and deploy API to production
tasks:
- name: deploy API
aws_api_gateway:
api_id: 'abc123321cba'
state: present
swagger_file: my_api.yml
stage: production
deploy_desc: Make auth fix available.
'''
RETURN = '''
output:
description: the data returned by put_restapi in boto3
returned: success
type: dict
sample:
'data':
{
"id": "abc123321cba",
"name": "MY REST API",
"createdDate": 1484233401
}
'''
import json
try:
import botocore
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
from ansible.module_utils.basic import AnsibleModule, traceback
from ansible.module_utils.ec2 import (AWSRetry, HAS_BOTO3, ec2_argument_spec, get_aws_connection_info,
boto3_conn, camel_dict_to_snake_dict)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
api_id=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
swagger_file=dict(type='path', default=None, aliases=['src', 'api_file']),
swagger_dict=dict(type='json', default=None),
swagger_text=dict(type='str', default=None),
stage=dict(type='str', default=None),
deploy_desc=dict(type='str', default="Automatic deployment by Ansible."),
)
)
mutually_exclusive = [['swagger_file', 'swagger_dict', 'swagger_text']] # noqa: F841
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
mutually_exclusive=mutually_exclusive)
api_id = module.params.get('api_id')
state = module.params.get('state') # noqa: F841
swagger_file = module.params.get('swagger_file')
swagger_dict = module.params.get('swagger_dict')
swagger_text = module.params.get('swagger_text')
stage = module.params.get('stage')
deploy_desc = module.params.get('deploy_desc')
# check_mode = module.check_mode
changed = False
if not HAS_BOTO3:
module.fail_json(msg='Python module "boto3" is missing, please install boto3')
if not HAS_BOTOCORE:
module.fail_json(msg='Python module "botocore" is missing, please install it')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
try:
client = boto3_conn(module, conn_type='client', resource='apigateway',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoRegionError:
module.fail_json(msg="Region must be specified as a parameter, in "
"AWS_DEFAULT_REGION environment variable or in boto configuration file")
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e:
fail_json_aws(module, e, msg="connecting to AWS")
changed = True # for now it will stay that way until we can sometimes avoid change
conf_res = None
dep_res = None
del_res = None
if state == "present":
if api_id is None:
api_id = create_empty_api(module, client)
api_data = get_api_definitions(module, swagger_file=swagger_file,
swagger_dict=swagger_dict, swagger_text=swagger_text)
conf_res, dep_res = ensure_api_in_correct_state(module, client, api_id=api_id,
api_data=api_data, stage=stage,
deploy_desc=deploy_desc)
if state == "absent":
del_res = delete_rest_api(module, client, api_id)
exit_args = {"changed": changed, "api_id": api_id}
if conf_res is not None:
exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
if dep_res is not None:
exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
if del_res is not None:
exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)
module.exit_json(**exit_args)
def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_text=None):
apidata = None
if swagger_file is not None:
try:
with open(swagger_file) as f:
apidata = f.read()
except OSError as e:
msg = "Failed trying to read swagger file {}: {}".format(str(swagger_file), str(e))
module.fail_json(msg=msg, exception=traceback.format_exc())
if swagger_dict is not None:
apidata = json.dumps(swagger_dict)
if swagger_text is not None:
apidata = swagger_text
if apidata is None:
module.fail_json(msg='module error - failed to get API data')
return apidata
def create_empty_api(module, client):
"""
creates a new empty API ready to be configured. The description is
temporarily set to show the API as incomplete but should be
updated when the API is configured.
"""
desc = "Incomplete API creation by ansible aws_api_gateway module"
try:
awsret = create_api(client, name="ansible-temp-api", description=desc)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
fail_json_aws(module, e, msg="creating API")
return awsret["id"]
def delete_rest_api(module, client, api_id):
"""
creates a new empty API ready to be configured. The description is
temporarily set to show the API as incomplete but should be
updated when the API is configured.
"""
try:
delete_response = delete_api(client, api_id=api_id)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
fail_json_aws(module, e, msg="deleting API {}".format(api_id))
return delete_response
def ensure_api_in_correct_state(module, client, api_id=None, api_data=None, stage=None,
deploy_desc=None):
"""Make sure that we have the API configured and deployed as instructed.
This function first configures the API correctly uploading the
swagger definitions and then deploys those. Configuration and
deployment should be closely tied because there is only one set of
definitions so if we stop, they may be updated by someone else and
then we deploy the wrong configuration.
"""
configure_response = None
try:
configure_response = configure_api(client, api_data=api_data, api_id=api_id)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
fail_json_aws(module, e, msg="configuring API {}".format(api_id))
deploy_response = None
if stage:
try:
deploy_response = create_deployment(client, api_id=api_id, stage=stage,
description=deploy_desc)
except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e:
msg = "deploying api {} to stage {}".format(api_id, stage)
fail_json_aws(module, e, msg)
return configure_response, deploy_response
# There is a PR open to merge fail_json_aws this into the standard module code;
# see https://github.com/ansible/ansible/pull/23882
def fail_json_aws(module, exception, msg=None):
"""call fail_json with processed exception
function for converting exceptions thrown by AWS SDK modules,
botocore, boto3 and boto, into nice error messages.
"""
last_traceback = traceback.format_exc()
try:
except_msg = exception.message
except AttributeError:
except_msg = str(exception)
if msg is not None:
message = '{}: {}'.format(msg, except_msg)
else:
message = except_msg
try:
response = exception.response
except AttributeError:
response = None
if response is None:
module.fail_json(msg=message, traceback=last_traceback)
else:
module.fail_json(msg=message, traceback=last_traceback,
**camel_dict_to_snake_dict(response))
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2}
@AWSRetry.backoff(**retry_params)
def create_api(client, name=None, description=None):
return client.create_rest_api(name="ansible-temp-api", description=description)
@AWSRetry.backoff(**retry_params)
def delete_api(client, api_id=None):
return client.delete_rest_api(restApiId=api_id)
@AWSRetry.backoff(**retry_params)
def configure_api(client, api_data=None, api_id=None, mode="overwrite"):
return client.put_rest_api(body=api_data, restApiId=api_id, mode=mode)
@AWSRetry.backoff(**retry_params)
def create_deployment(client, api_id=None, stage=None, description=None):
# we can also get None as an argument so we don't do this as a defult
return client.create_deployment(restApiId=api_id, stageName=stage, description=description)
if __name__ == '__main__':
main()
| noroutine/ansible | lib/ansible/modules/cloud/amazon/aws_api_gateway.py | Python | gpl-3.0 | 11,663 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.