code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Nis(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Network information service
"""
plugin_name = 'nis'
profiles = ('identity', 'services')
files = ('/var/yp', '/etc/ypserv.conf')
packages = ('ypserv')
def setup(self):
self.add_copy_spec([
"/etc/yp*.conf",
"/var/yp/*"
])
self.add_cmd_output("domainname")
# vim: set et ts=4 sw=4 :
|
pierg75/pier-sosreport
|
sos/plugins/nis.py
|
Python
|
gpl-2.0
| 1,203
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2018
Contact: t.hessels@unesco-ihe.org
g.espinoza@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/HiHydroSoil
Restrictions:
The data and this python file may not be distributed to others without
permission of the WA+ team due data restriction of the HiHydroSoil developers.
Description:
This module downloads HiHydroSoil data from
ftp.wateraccounting.unesco-ihe.org. Use the HiHydroSoil function to
download and create monthly HiHydroSoil images in Gtiff format.
Examples:
from wa.Collect import HiHydroSoil
HiHydroSoil.ThetaSat_TopSoil(Dir='C:/Temp/', latlim=[-10, 30], lonlim=[-20, -10])
"""
from .ThetaSat_TopSoil import main as ThetaSat_TopSoil
__all__ = ['monthly']
__version__ = '0.1'
|
wateraccounting/wa
|
Collect/HiHydroSoil/__init__.py
|
Python
|
apache-2.0
| 830
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserAccount',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user', models.OneToOneField(related_name='account', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'user account',
'verbose_name_plural': 'user accounts',
},
),
]
|
klinger/volunteer_planner
|
accounts/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 769
|
'''
bisect(sequence, item) => index ·µ»ØÌõÄ¿²åÈëºóµÄË÷ÒýÖµ, ²»¶ÔÐòÁÐ×öÈκÎÐÞ¸Ä.
Èç ÏÂÀý Ëùʾ.
'''
import bisect
list = [10, 20, 30]
print list
print bisect.bisect(list, 25)
print bisect.bisect(list, 15)
|
iamweilee/pylearn
|
bisect-example-2.py
|
Python
|
mit
| 205
|
import json
from random import randint
class Generator:
"""Generate aleatory data readable by Tartopum."""
def __init__(self, rows_len=5, min_row_len=3, max_row_len=15,
row_name="row-{}"):
self.rows_len = rows_len
self.min_row_len = min_row_len
self.max_row_len = max_row_len
self.row_name = row_name
def create_area(self, row, index, treatable=True):
area = {}
area["metadata"] = {
"row_index": row,
"row_name": self.row_name.format(row),
"area_index": index
}
area["covered"] = False
area["treatable"] = treatable
area["yardsticks"] = [
[self.generate_yardstick(), self.generate_yardstick()],
[self.generate_yardstick(), self.generate_yardstick()]
]
return area
def create_row(self, i):
areas = []
length = randint(self.min_row_len, self.max_row_len)
border = self.create_area(i, 0, False)
areas.append(border)
for j in range(1, length):
area = self.create_area(i, j)
areas.append(area)
border = self.create_area(i, length, False)
areas.append(border)
return areas
def generate_data(self):
data = {}
data["position"] = 0
data["buzzards"] = [[1, 2], [3, 4]]
data["areas"] = []
for i in range(self.rows_len):
row = self.create_row(i)
data["areas"] += row
return data
def generate_yardstick(self):
return randint(0, 100)
if __name__ == "__main__":
generator = Generator()
name = randint(0, 9999)
path = "./{}.json".format(name)
data = generator.generate_data()
# Save
with open(path, 'w') as outfile:
json.dump(data, outfile, indent = 4)
print(path + " saved.")
|
tartopum/DataExamples
|
rows/json/generate.py
|
Python
|
mit
| 2,012
|
# Copyright (c) 2016 Orange.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db.migration.models import head
# pylint: disable=unused-import
import networking_bagpipe.db.sfc_db # noqa
def get_metadata():
return head.model_base.BASEV2.metadata
|
openstack/networking-bagpipe-l2
|
networking_bagpipe/db/models/head.py
|
Python
|
apache-2.0
| 814
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 6/1/2017
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Initial coordiante of particles for MC simulation')
## args
#parser.add_argument('-i', '--input', default='init.ic', nargs='?',
# help='input file')
parser.add_argument('-na', '--nmola', nargs='?', type=int,
help='# particles of A')
parser.add_argument('-nb', '--nmolb', nargs='?', type=int,
help='# particles of B')
parser.add_argument('-r', '--ratio', default=5.0, nargs='?', type=float,
help='ratio of box-z/box-x (box-x = box-y)')
parser.add_argument('-sep', '--sep', default='NO', nargs='?', type=str,
help='pre-separation YES/NO')
parser.add_argument('-fr', '--frac', default=1.0, nargs='?', type=float,
help='number fraction of A of one phase if -sep YES (')
parser.add_argument('-d', '--dens', nargs='?', type=float,
help='number density')
parser.add_argument('-sa', '--sizea', default=1.0, nargs='?', type=float,
help='diameter of A')
parser.add_argument('-sb', '--sizeb', default=1.0, nargs='?', type=float,
help='diameter of B')
parser.add_argument('-mt', '--maxtry', default=0, nargs='?', type=int,
help='attemps for random insertion (if zero, do lattice insertion)')
parser.add_argument('-fm', '--format', default='MC', nargs='?', type=str,
help='Save in fortran MC format (MC), .npz format (NPZ), or .gro format (GRO)')
parser.add_argument('-o', '--output', default='init', nargs='?', type=str,
help='output file (exclude extension name) ')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
# read args
args = parser.parse_args()
print(" input arguments: {0}".format(args))
# default for args
#args.input = args.input if args.input is not None else 'init.ic'
if args.sep != 'YES' and args.sep != 'NO':
raise ValueError("Wrong argument for pre-separation option")
if args.sep == 'NO' and args.frac != 1.0:
raise ValueError("-sep and -fr not matched or not set")
if args.sep == 'YES':
if args.nmola != args.nmolb:
raise ValueError("Not support the different #particles of A and B")
if args.format != 'MC' and args.format != 'NPZ' and args.format != 'GRO':
raise ValueError("Wrong argument for format!")
if args.format == 'MC':
args.output = args.output+'.ic'
elif args.format == 'NPZ':
args.output = args.output+'.npz'
elif args.format == 'GRO':
args.output = args.output+'.gro'
# numpy double precision
import numpy as np
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
## timer
start_proc, start_prof = hjung.time.init()
# determine box size
print("="*30)
ntot = args.nmola + args.nmolb
print("Total number of molecules = %s" %ntot)
boxl = (float(ntot)/args.dens/args.ratio)**(1.0/3.0)
box = np.array((boxl, boxl, boxl*args.ratio))
print("Box = %s" %(box))
# attemp to insertion only for Widom-Rowlinson Mixture
coordinates = np.zeros((ntot, 3))
def overlap(new_index,coordinates,nmola,distance,box):
import numpy as np
for i in range(nmola):
dxyz = coordinates[i] - coordinates[new_index]
dxyz = dxyz - box * np.around(dxyz/box)
if np.linalg.norm(dxyz) < distance:
return 1 # overlap!
#print("avoid overlap {}".format(new_index))
return 0 # success for insertion
print("="*30)
print("Start Insertion")
maxa = int(args.nmola*args.frac)
maxb = int(args.nmolb*args.frac)
if args.maxtry > 0:
# try random insertion
for i in range(args.nmola):
if args.sep == 'YES' and i < maxa:
# if you set specific fraction and pre-separation
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*0.50*box[2]]
elif args.sep == 'YES' and i >= maxa:
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
(np.random.random_sample()*0.50+0.50)*box[2]]
else:
# if you set random
coordinates[i] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*box[2]]
curr_index = args.nmola
ntry = 1
nmod = 10
while (curr_index < ntot):
if ntry%nmod == 0:
print("%d th random trials (%s/%s)th particle" %(ntry,curr_index,ntot))
if (ntry/nmod)%10 == 0:
nmod = nmod*10
if ntry > args.maxtry:
print("Hard to insert because ntry > maxtry.")
print("I made initial coordinates with %d out of %d molecules" %(curr_index-1,ntot))
break
if args.sep == 'YES' and curr_index < args.nmola+maxb:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
(np.random.random_sample()*0.50+0.50)*box[2]]
elif args.sep == 'YES' and curr_index >= args.nmola+maxb:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*0.50*box[2]]
else:
coordinates[curr_index] = [np.random.random_sample()*box[0],
np.random.random_sample()*box[1],
np.random.random_sample()*box[2]]
dist = 0.5*(args.sizea + args.sizeb)
success = overlap(curr_index,coordinates,args.nmola,dist,box)
if success == 0:
#print("succees {0}".format(curr_index))
#print("succees {} {} {}".format(coordinates[curr_index][0],coordinates[curr_index][1],coordinates[curr_index][2]))
curr_index = curr_index + 1
ntry = ntry + 1
else:
# try lattice insertion
maxsize = max(args.sizea,args.sizeb)
ncellx = np.int(np.floor(box[0]/maxsize))
ncelly = np.int(np.floor(box[1]/maxsize))
ncellz = np.int(np.floor(box[2]/maxsize))
ncell = ncellx*ncelly*ncellz
if ncell < ntot:
raise ValueError("Not possible to use lattice insertion because #particles > #cells")
occupy_cell = np.zeros((ncellx,ncelly,ncellz),dtype=int)
i = 0
ntry = 1
nmod = 10
print("Try Insertion of A")
while (i < args.nmola):
if ntry%nmod == 0:
print("%d th lattice trials (%s/%s)th particle" %(ntry,i,ntot))
if (ntry/nmod)%nmod == 0:
nmod = nmod*nmod
icx = np.trunc(np.random.random_sample()*box[0]/maxsize)
icy = np.trunc(np.random.random_sample()*box[1]/maxsize)
if args.sep == 'YES' and i < maxa:
icz = np.trunc(np.random.random_sample()*0.50*box[2]/maxsize)
elif args.sep == 'YES' and i >= maxa:
icz = np.trunc((np.random.random_sample()*0.50+0.50)*box[2]/maxsize)
else:
icz = np.trunc(np.random.random_sample()*box[2]/maxsize)
if icx < ncellx and icy < ncelly and icz < ncellz:
randx = (icx+0.5)*maxsize
randy = (icy+0.5)*maxsize
randz = (icz+0.5)*maxsize
coordinates[i] = [randx,randy,randz]
occupy_cell[np.int(icx)][np.int(icy)][np.int(icz)] = 1
i = i + 1
ntry = ntry + 1
curr_index = args.nmola
ntry = 1
nmod = 10
print("Try Insertion of B")
while (curr_index < ntot):
if ntry%nmod == 0:
print("%d th lattice trials (%s/%s)th particle" %(ntry,curr_index,ntot))
if (ntry/nmod)%nmod == 0:
nmod = nmod*nmod
icx = np.trunc(np.random.random_sample()*box[0]/maxsize)
icy = np.trunc(np.random.random_sample()*box[1]/maxsize)
if args.sep == 'YES' and curr_index < args.nmola+maxb:
icz = np.trunc((np.random.random_sample()*0.50+0.50)*box[2]/maxsize)
elif args.sep == 'YES' and curr_index >= args.nmola+maxb:
icz = np.trunc(np.random.random_sample()*0.50*box[2]/maxsize)
else:
icz = np.trunc(np.random.random_sample()*box[2]/maxsize)
randx = (icx+0.5)*maxsize
randy = (icy+0.5)*maxsize
randz = (icz+0.5)*maxsize
coordinates[curr_index] = [randx,randy,randz]
ntry = ntry + 1
if icx >= ncellx or icy >= ncelly or icz >= ncellz:
continue
elif occupy_cell[np.int(icx)][np.int(icy)][np.int(icz)] == 0:
curr_index = curr_index + 1
# save initial coordinates
print("="*30)
print("Saving OutputFile...")
if args.format == 'NPZ':
# array_argument = nmola, nmolb, coord, box
np.savez(args.output,nmola=args.nmola,nmolb=args.nmolb,coord=coordinates,box=box)
elif args.format == 'GRO':
# gromacs version
output_file = open(args.output, 'w')
output_file.write('# generated by initial.py\n')
output_file.write('%d\n' %(ntot))
for i in range(min(curr_index,ntot)):
if i < args.nmola:
output_file.write('{0:5d}{1:<5s}{2:5s}{3:5d}{4:8.3f}{5:8.3f}{6:8.3f}\n'.format(
i+1,'LJA','A',i+1,coordinates[i][0],coordinates[i][1],coordinates[i][2]))
else:
output_file.write('{0:5d}{1:<5s}{2:5s}{3:5d}{4:8.3f}{5:8.3f}{6:8.3f}\n'.format(
i+1,'LJB','B',i+1,coordinates[i][0],coordinates[i][1],coordinates[i][2]))
output_file.write('{0:10.5f}{1:10.5f}{2:10.5f}\n'.format(box[0],box[1],box[2]))
output_file.close()
elif args.format == 'MC':
# fortran MC version 'composite.ic'
output_file = open(args.output, 'w')
output_file.write('{} {} #NUMBER OF A particle \n'.format(args.nmola, 1))
output_file.write('{} {} #NUMBER OF B particle \n'.format(args.nmolb, 1))
output_file.write('{} #SIZE OF A \n'.format(args.sizea))
output_file.write('{} #SIZE OF B \n'.format(args.sizeb))
output_file.write('{} {} {} # BOX SIZE \n'.format(box[0],box[1],box[2]))
# coordinates of A, then B
for i in range(ntot):
if i < args.nmola:
output_file.write('{} {} A {} {} {}\n'.format(i,i,coordinates[i][0],coordinates[i][1],coordinates[i][2]))
else:
output_file.write('{} {} B {} {} {}\n'.format(i,i,coordinates[i][0],coordinates[i][1],coordinates[i][2]))
output_file.close()
else:
raise RuntimeError("Sometime wrong!")
## timer
hjung.time.end_print(start_proc, start_prof)
|
jht0664/Utility_python_gromacs
|
MonteCarlo/python/initial.py
|
Python
|
mit
| 9,683
|
# Copyright 2015 Red Hat Inc., Durham, North Carolina.
# All Rights Reserved.
#
# openscap-daemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# openscap-daemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with openscap-daemon. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Martin Preisler <mpreisle@redhat.com>
from openscap_daemon import system
from openscap_daemon import EvaluationSpec
from openscap_daemon import dbus_utils
from openscap_daemon.cve_scanner.cve_scanner import Worker
from openscap_daemon import version
import dbus
import dbus.service
import threading
from datetime import datetime
import json
# Internal note: Python does not support unsigned long integer while dbus does,
# to avoid weird issues I just use 64bit integer in the interface signatures.
# "2^63-1 IDs should be enough for everyone."
class OpenSCAPDaemonDbus(dbus.service.Object):
def __init__(self, bus, config_file):
super(OpenSCAPDaemonDbus, self).__init__(bus, dbus_utils.OBJECT_PATH)
self.system = system.System(config_file)
self.system.load_tasks()
self.system_worker_thread = threading.Thread(
target=lambda: self.system.schedule_tasks_worker()
)
self.system_worker_thread.daemon = True
self.system_worker_thread.start()
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="", out_signature="(nnn)")
def GetVersion(self):
"""Retrieves OpenSCAP-daemon version in a tuple format, suitable
for version comparisons.
"""
return (
version.VERSION_MAJOR,
version.VERSION_MINOR,
version.VERSION_PATCH
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="", out_signature="as")
def GetSSGChoices(self):
"""Retrieves absolute paths of SSG source datastreams that are
available.
"""
return self.system.get_ssg_choices()
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="ss", out_signature="a{ss}")
def GetProfileChoicesForInput(self, input_file, tailoring_file):
"""Figures out profile ID -> profile title mappings of all available
profiles given the input_file and (optionally) the tailoring_file.
"""
return self.system.get_profile_choices_for_input(
input_file, tailoring_file
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="", out_signature="a(xsi)")
def GetAsyncActionsStatus(self):
return self.system.async.get_status()
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="s", out_signature="(sssn)")
def EvaluateSpecXML(self, xml_source):
"""Deprecated, use EvaluateSpecXMLAsync instead
"""
spec = EvaluationSpec()
spec.load_from_xml_source(xml_source)
arf, stdout, stderr, exit_code = spec.evaluate(self.system.config)
return (arf, stdout, stderr, exit_code)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="s", out_signature="n")
def EvaluateSpecXMLAsync(self, xml_source):
spec = EvaluationSpec()
spec.load_from_xml_source(xml_source)
token = self.system.evaluate_spec_async(spec)
return token
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="n", out_signature="(bsssn)")
def GetEvaluateSpecXMLAsyncResults(self, token):
try:
arf, stdout, stderr, exit_code = \
self.system.get_evaluate_spec_async_results(token)
return (True, arf, stdout, stderr, exit_code)
except system.ResultsNotAvailable:
return (False, "", "", "", 1)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="n", out_signature="")
def CancelEvaluateSpecXMLAsync(self, token):
# TODO
pass
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="", out_signature="ax")
def ListTaskIDs(self):
"""Returns a list of IDs of tasks that System has loaded from config
files.
"""
return self.system.list_task_ids()
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskTitle(self, task_id, title):
"""Set title of existing task with given ID.
The change is persistent after the function returns.
"""
return self.system.set_task_title(task_id, title)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="s")
def GetTaskTitle(self, task_id):
"""Retrieves title of task with given ID.
"""
return self.system.get_task_title(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="s")
def GenerateGuideForTask(self, task_id):
"""Generates and returns HTML guide for a task with given ID.
"""
return self.system.generate_guide_for_task(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="")
def RunTaskOutsideSchedule(self, task_id):
"""Given task will be run as soon as possible without affecting its
schedule. This feature is useful mainly for testing purposes.
"""
return self.system.run_task_outside_schedule(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="", out_signature="x")
def CreateTask(self):
"""Creates a new task with empty contents, the task is created
in a disabled state so it won't be run.
The task is not persistent until some of its attributes are changed.
Empty tasks are worthless, so we don't save them until they have at
least some data.
"""
return self.system.create_task()
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xb", out_signature="")
def RemoveTask(self, task_id, remove_results):
"""Removes task with given ID and deletes its config file. The task has
to be disabled, else the operation fails.
The change is persistent after the function returns.
"""
return self.system.remove_task(task_id, remove_results)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xb", out_signature="")
def SetTaskEnabled(self, task_id, enabled):
"""Sets enabled flag of an existing task with given ID.
The change is persistent after the function returns.
"""
return self.system.set_task_enabled(task_id, enabled)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="b")
def GetTaskEnabled(self, task_id):
"""Retrieves the enabled flag of an existing task with given ID.
"""
return self.system.get_task_enabled(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskTarget(self, task_id, target):
"""Set target of existing task with given ID.
The change is persistent after the function returns.
"""
return self.system.set_task_target(task_id, target)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="s")
def GetTaskTarget(self, task_id):
"""Retrieves target of existing task with given ID.
"""
return self.system.get_task_target(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="x")
def GetTaskCreatedTimestamp(self, task_id):
"""Get timestamp of task creation
"""
return self.system.get_task_created_timestamp(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="x")
def GetTaskModifiedTimestamp(self, task_id):
"""Get timestamp of task modification
"""
return self.system.get_task_modified_timestamp(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskInput(self, task_id, input_):
"""Set input of existing task with given ID.
input can be absolute file path or the XML source itself, this is
is autodetected.
The change is persistent after the function returns.
"""
return self.system.set_task_input(
task_id, input_ if input_ != "" else None
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskTailoring(self, task_id, tailoring):
"""Set tailoring of existing task with given ID.
tailoring can be absolute file path or the XML source itself, this is
is autodetected.
The change is persistent after the function returns.
"""
return self.system.set_task_tailoring(
task_id, tailoring if tailoring != "" else None
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskProfileID(self, task_id, profile_id):
"""Set profile ID of existing task with given ID.
The change is persistent after the function returns.
"""
return self.system.set_task_profile_id(task_id, profile_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xb", out_signature="")
def SetTaskOnlineRemediation(self, task_id, online_remediation):
"""Sets whether online remediation of existing task with given ID
is enabled.
The change is persistent after the function returns.
"""
return self.system.set_task_online_remediation(
task_id, online_remediation
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xs", out_signature="")
def SetTaskScheduleNotBefore(self, task_id, schedule_not_before_str):
"""Sets time when the task is next scheduled to run. The time is passed
as a string in format YYYY-MM-DDTHH:MM in UTC with no timezone info!
Example: 2015-05-14T13:49
The change is persistent after the function returns.
"""
schedule_not_before = datetime.strptime(
schedule_not_before_str,
"%Y-%m-%dT%H:%M"
)
return self.system.set_task_schedule_not_before(
task_id, schedule_not_before
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="")
def SetTaskScheduleRepeatAfter(self, task_id, schedule_repeat_after):
"""Sets number of hours after which the task should be repeated.
For example 24 for daily tasks, 24*7 for weekly tasks, ...
The change is persistent after the function returns.
"""
return self.system.set_task_schedule_repeat_after(
task_id, schedule_repeat_after
)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="ax")
def GetTaskResultIDs(self, task_id):
"""Retrieves list of available task result IDs.
"""
return self.system.get_task_result_ids(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="x")
def GetResultCreatedTimestamp(self, task_id, result_id):
"""Return timestamp of result creation
"""
return self.system.get_task_result_created_timestamp(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="s")
def GetXMLOfTaskResult(self, task_id, result_id):
"""Retrieves full XML of result of given task.
This can be an ARF or OVAL result file, depending on task EvaluationMode
Deprecated, use GetXMLOfTaskResult instead.
"""
return self.system.get_xml_of_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="s")
def GetARFOfTaskResult(self, task_id, result_id):
"""Retrieves full ARF of result of given task.
Deprecated, use GetXMLOfTaskResult instead.
"""
return self.system.get_xml_of_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="x", out_signature="")
def RemoveTaskResults(self, task_id):
"""Remove all results of given task.
"""
return self.system.remove_task_results(task_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="")
def RemoveTaskResult(self, task_id, result_id):
"""Remove result of given task.
"""
return self.system.remove_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="s")
def GetStdOutOfTaskResult(self, task_id, result_id):
"""Retrieves full stdout of result of given task.
"""
return self.system.get_stdout_of_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="s")
def GetStdErrOfTaskResult(self, task_id, result_id):
"""Retrieves full stderr of result of given task.
"""
return self.system.get_stderr_of_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="i")
def GetExitCodeOfTaskResult(self, task_id, result_id):
"""Retrieves exit code of result of given task.
"""
return self.system.get_exit_code_of_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="xx", out_signature="s")
def GenerateReportForTaskResult(self, task_id, result_id):
"""Generates and returns HTML report for report of given task.
"""
return self.system.generate_report_for_task_result(task_id, result_id)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE, in_signature='s',
out_signature='s')
def inspect_container(self, cid):
"""Returns inspect data of a container.
Used by `atomic scan`. Do not break this interface!
"""
import docker
docker_conn = docker.Client()
inspect_data = docker_conn.inspect_container(cid)
return json.dumps(inspect_data)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE, in_signature='s',
out_signature='s')
def inspect_image(self, iid):
"""Returns inspect data of an image.
Used by `atomic scan`. Do not break this interface!
"""
import docker
docker_conn = docker.Client()
inspect_data = docker_conn.inspect_image(iid)
return json.dumps(inspect_data)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE, out_signature='s')
def images(self):
"""Used by `atomic scan`. Do not break this interface!
"""
import docker
docker_conn = docker.Client()
images = docker_conn.images(all=True)
return json.dumps(images)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE, out_signature='s')
def containers(self):
"""Used by `atomic scan`. Do not break this interface!
"""
import docker
docker_conn = docker.Client()
cons = docker_conn.containers(all=True)
return json.dumps(cons)
@staticmethod
def _parse_only_cache(config, fetch_cve):
if fetch_cve == 2:
return config.fetch_cve
elif fetch_cve == 1:
return True
elif fetch_cve == 0:
return False
else:
raise RuntimeError("Invalid value %i for fetch_cve" % (fetch_cve))
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature='bbiy', out_signature='s')
def scan_containers(self, onlyactive, allcontainers, number, fetch_cve=2):
"""fetch_cve -
0 to enable CVE fetch
1 to disable CVE fetch
2 to use defaults from oscapd config file
"""
worker = Worker(onlyactive=onlyactive, allcontainers=allcontainers,
number=number,
fetch_cve=self._parse_only_cache(self.system.config, int(fetch_cve)),
fetch_cve_url=self.system.config.fetch_cve_url)
return_json = worker.start_application()
return json.dumps(return_json)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE, in_signature='bbiy',
out_signature='s')
def scan_images(self, allimages, images, number, fetch_cve=2):
"""fetch_cve -
0 to enable CVE fetch
1 to disable CVE fetch
2 to use defaults from oscapd config file
"""
worker = Worker(allimages=allimages, images=images,
number=number,
fetch_cve=self._parse_only_cache(self.system.config, int(fetch_cve)),
fetch_cve_url=self.system.config.fetch_cve_url)
return_json = worker.start_application()
return json.dumps(return_json)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature='asiy', out_signature='s')
def scan_list(self, scan_list, number, fetch_cve=2):
"""fetch_cve -
0 to enable CVE fetch
1 to disable CVE fetch
2 to use defaults from oscapd config file
Used by `atomic scan`. Do not break this interface!
Deprecated, please use CVEScanListAsync
"""
worker = Worker(scan=scan_list, number=number,
fetch_cve=self._parse_only_cache(self.system.config, int(fetch_cve)),
fetch_cve_url=self.system.config.fetch_cve_url)
return_json = worker.start_application()
return json.dumps(return_json)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature='asiy', out_signature='n')
def CVEScanListAsync(self, scan_list, number, fetch_cve):
worker = Worker(
scan=scan_list, number=number,
fetch_cve=self._parse_only_cache(self.system.config, int(fetch_cve)),
fetch_cve_url=self.system.config.fetch_cve_url
)
return self.system.evaluate_cve_scanner_worker_async(worker)
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="n", out_signature="(bs)")
def GetCVEScanListAsyncResults(self, token):
try:
json_results = \
self.system.get_evaluate_cve_scanner_worker_async_results(token)
return (True, json.dumps(json_results))
except system.ResultsNotAvailable:
return (False, "")
@dbus.service.method(dbus_interface=dbus_utils.DBUS_INTERFACE,
in_signature="n", out_signature="")
def CancelCVEScanListAsync(self, token):
# TODO
pass
|
pthierry38/openscap-daemon
|
openscap_daemon/dbus_daemon.py
|
Python
|
lgpl-2.1
| 20,907
|
'''
Tests various branching and search strategies. Compares number of LPs solved
for different branching and searching strategies.
The result of the script is as follows (until python decides to change its
random number generator).
problem | branching strategy | search strategy | num lp
---------------------------------------------------------------
(10, 10, 0) | Most Fraction | Depth First | 7
(10, 10, 0) | Most Fraction | Best First | 5
(10, 10, 0) | Most Fraction | Best Estimate | 5
(10, 10, 0) | Fixed Branching | Depth First | 7
(10, 10, 0) | Fixed Branching | Best First | 5
(10, 10, 0) | Fixed Branching | Best Estimate | 5
(10, 10, 0) | Pseudocost Branching | Depth First | 7
(10, 10, 0) | Pseudocost Branching | Best First | 5
(10, 10, 0) | Pseudocost Branching | Best Estimate | 5
(10, 10, 1) | Most Fraction | Depth First | 9
(10, 10, 1) | Most Fraction | Best First | 9
(10, 10, 1) | Most Fraction | Best Estimate | 9
(10, 10, 1) | Fixed Branching | Depth First | 9
(10, 10, 1) | Fixed Branching | Best First | 9
(10, 10, 1) | Fixed Branching | Best Estimate | 9
(10, 10, 1) | Pseudocost Branching | Depth First | 9
(10, 10, 1) | Pseudocost Branching | Best First | 9
(10, 10, 1) | Pseudocost Branching | Best Estimate | 9
(20, 10, 2) | Most Fraction | Depth First | 37
(20, 10, 2) | Most Fraction | Best First | 33
(20, 10, 2) | Most Fraction | Best Estimate | 33
(20, 10, 2) | Fixed Branching | Depth First | 35
(20, 10, 2) | Fixed Branching | Best First | 31
(20, 10, 2) | Fixed Branching | Best Estimate | 31
(20, 10, 2) | Pseudocost Branching | Depth First | 23
(20, 10, 2) | Pseudocost Branching | Best First | 23
(20, 10, 2) | Pseudocost Branching | Best Estimate | 23
(20, 10, 3) | Most Fraction | Depth First | 41
(20, 10, 3) | Most Fraction | Best First | 21
(20, 10, 3) | Most Fraction | Best Estimate | 24
(20, 10, 3) | Fixed Branching | Depth First | 57
(20, 10, 3) | Fixed Branching | Best First | 39
(20, 10, 3) | Fixed Branching | Best Estimate | 39
(20, 10, 3) | Pseudocost Branching | Depth First | 31
(20, 10, 3) | Pseudocost Branching | Best First | 21
(20, 10, 3) | Pseudocost Branching | Best Estimate | 25
(30, 20, 4) | Most Fraction | Depth First | 95
(30, 20, 4) | Most Fraction | Best First | 95
(30, 20, 4) | Most Fraction | Best Estimate | 95
(30, 20, 4) | Fixed Branching | Depth First | 145
(30, 20, 4) | Fixed Branching | Best First | 145
(30, 20, 4) | Fixed Branching | Best Estimate | 145
(30, 20, 4) | Pseudocost Branching | Depth First | 69
(30, 20, 4) | Pseudocost Branching | Best First | 77
(30, 20, 4) | Pseudocost Branching | Best Estimate | 83
(30, 20, 5) | Most Fraction | Depth First | 181
(30, 20, 5) | Most Fraction | Best First | 181
(30, 20, 5) | Most Fraction | Best Estimate | 181
(30, 20, 5) | Fixed Branching | Depth First | 159
(30, 20, 5) | Fixed Branching | Best First | 159
(30, 20, 5) | Fixed Branching | Best Estimate | 159
(30, 20, 5) | Pseudocost Branching | Depth First | 113
(30, 20, 5) | Pseudocost Branching | Best First | 111
(30, 20, 5) | Pseudocost Branching | Best Estimate | 113
(40, 20, 6) | Most Fraction | Depth First | 323
(40, 20, 6) | Most Fraction | Best First | 181
(40, 20, 6) | Most Fraction | Best Estimate | 209
(40, 20, 6) | Fixed Branching | Depth First | 949
(40, 20, 6) | Fixed Branching | Best First | 525
(40, 20, 6) | Fixed Branching | Best Estimate | 562
(40, 20, 6) | Pseudocost Branching | Depth First | 177
(40, 20, 6) | Pseudocost Branching | Best First | 127
(40, 20, 6) | Pseudocost Branching | Best Estimate | 151
(40, 20, 7) | Most Fraction | Depth First | 145
(40, 20, 7) | Most Fraction | Best First | 71
(40, 20, 7) | Most Fraction | Best Estimate | 74
(40, 20, 7) | Fixed Branching | Depth First | 111
(40, 20, 7) | Fixed Branching | Best First | 81
(40, 20, 7) | Fixed Branching | Best Estimate | 81
(40, 20, 7) | Pseudocost Branching | Depth First | 81
(40, 20, 7) | Pseudocost Branching | Best First | 55
(40, 20, 7) | Pseudocost Branching | Best Estimate | 51
(40, 30, 8) | Most Fraction | Depth First | 691
(40, 30, 8) | Most Fraction | Best First | 411
(40, 30, 8) | Most Fraction | Best Estimate | 424
(40, 30, 8) | Fixed Branching | Depth First | 1083
(40, 30, 8) | Fixed Branching | Best First | 393
(40, 30, 8) | Fixed Branching | Best Estimate | 393
(40, 30, 8) | Pseudocost Branching | Depth First | 395
(40, 30, 8) | Pseudocost Branching | Best First | 167
(40, 30, 8) | Pseudocost Branching | Best Estimate | 253
'''
from coinor.grumpy import BBTree
# import branching strategies
from coinor.grumpy import MOST_FRACTIONAL, FIXED_BRANCHING, PSEUDOCOST_BRANCHING
# import searching strategies
from coinor.grumpy import DEPTH_FIRST, BEST_FIRST, BEST_ESTIMATE
import sys
import math
branch_strategy = [MOST_FRACTIONAL, FIXED_BRANCHING, PSEUDOCOST_BRANCHING]
search_strategy = [DEPTH_FIRST, BEST_FIRST, BEST_ESTIMATE]
# branching strategy string dictionary for naming output files
bs_dict = {MOST_FRACTIONAL:"most_fractional", FIXED_BRANCHING:"fixed_b", PSEUDOCOST_BRANCHING:"pseudocost_b"}
# search strategy string dictionary for naming output files
ss_dict ={DEPTH_FIRST:"depthfirst", BEST_FIRST:"bestfirst", BEST_ESTIMATE:"bestestimate"}
# test problem, (num_vars,num_cons,seed)
problem = [(10,10,0),
(10,10,1),
(10,10,2),
(10,10,3),
(20,10,4),
(20,10,5),
(20,10,6),
(20,10,7),
(30,20,8),
(30,20,9),
(30,20,10),
(30,20,11),
(40,20,12),
(40,20,13),
(40,30,14)
]
# number of LPs solved for each problem
num_lp = {}
if __name__=='__main__':
for p in problem:
var, con, seed = p
bt = BBTree()
CONSTRAINTS, VARIABLES, OBJ, MAT, RHS = bt.GenerateRandomMIP(numVars=var,
numCons=con,
rand_seed=seed)
num_lp[p] = {}
for b in branch_strategy:
for s in search_strategy:
# create a new object before each branch and bound call
bt = BBTree()
# solve using BB
solution, bb_optimal = bt.BranchAndBound(CONSTRAINTS, VARIABLES, OBJ,
MAT, RHS,
branch_strategy = b,
search_strategy = s)
# keep number of LPs solved.
num_lp[p][(b,s)] = bt._lp_count
# print table
print 'problem | branching strategy | search strategy | num lp'
print '---------------------------------------------------------------'
for p in problem:
for b in branch_strategy:
for s in search_strategy:
print str(p).ljust(10),
print '|',
print str(b).ljust(20),
print '|',
print str(s).ljust(17),
print '|',
print num_lp[p][(b,s)]
for b in branch_strategy:
for s in search_strategy:
filename = bs_dict[b] + "_" + ss_dict[s]
print "writing output file ", filename, "..."
f = open(filename, "w")
f.write("#problem".ljust(15))
f.write("num lp".ljust(10))
f.write("\n")
for p in problem:
f.write(str(p).ljust(15))
f.write(str(num_lp[p][(b,s)]).ljust(10))
f.write("\n")
f.close()
|
tkralphs/GrUMPy
|
test/test_strategies.py
|
Python
|
epl-1.0
| 8,386
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import simplejson
import sys
from model import *
from utils import *
import reveal
class Admin(Handler):
# After a subdomain is deactivated, we still need the admin page to be
# accessible so we can edit its settings.
ignore_deactivation = True
def get(self):
user = users.get_current_user()
simplejson.encoder.FLOAT_REPR = str
encoder = simplejson.encoder.JSONEncoder(ensure_ascii=False)
config_json = dict((name, encoder.encode(self.config[name]))
for name in self.config.keys())
self.render('templates/admin.html', user=user,
subdomains=Subdomain.all(),
config=self.config, config_json=config_json,
start_url=self.get_start_url(),
login_url=users.create_login_url(self.request.url),
logout_url=users.create_logout_url(self.request.url),
id=self.env.domain + '/person.')
def post(self):
if self.params.operation == 'delete':
# Redirect to the deletion handler with a valid signature.
action = ('delete', str(self.params.id))
self.redirect('/delete', id=self.params.id,
signature=reveal.sign(action))
elif self.params.operation == 'subdomain_create':
Subdomain(key_name=self.params.subdomain_new).put()
config.set_for_subdomain( # Provide some defaults.
self.params.subdomain_new,
language_menu_options=['en', 'fr'],
subdomain_titles={'en': 'Earthquake', 'fr': u'S\xe9isme'},
keywords='person finder, people finder, person, people, ' +
'crisis, survivor, family',
use_family_name=True,
use_postal_code=True,
min_query_word_length=2,
map_default_zoom=6,
map_default_center=[0, 0],
map_size_pixels=[400, 280],
deactivated=False,
deactivation_message_html=''
)
self.redirect('/admin', subdomain=self.params.subdomain_new)
elif self.params.operation == 'subdomain_save':
values = {}
for name in [ # These settings are all entered in JSON.
'language_menu_options', 'subdomain_titles',
'use_family_name', 'family_name_first', 'use_postal_code',
'min_query_word_length', 'map_default_zoom',
'map_default_center', 'map_size_pixels',
'read_auth_key_required', 'deactivated'
]:
try:
values[name] = simplejson.loads(self.request.get(name))
except:
return self.error(
400, 'The setting for %s was not valid JSON.' % name)
for name in ['keywords', 'deactivation_message_html']:
# These settings are literal strings (not JSON).
values[name] = self.request.get(name)
config.set_for_subdomain(self.subdomain, **values)
self.redirect('/admin', subdomain=self.subdomain)
if __name__ == '__main__':
run(('/admin', Admin))
|
istevens/personfinder-search-api
|
app/admin.py
|
Python
|
apache-2.0
| 3,881
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-04 15:11
# flake8: noqa
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Addon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('version', models.CharField(max_length=32)),
('ftp_url', models.URLField()),
],
),
migrations.CreateModel(
name='AddonGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_name', models.CharField(max_length=100)),
('browser_version', models.CharField(max_length=32)),
('addons', models.ManyToManyField(related_name='groups', to='addons.Addon')),
],
),
]
|
rehandalal/morgoth
|
morgoth/addons/migrations/0001_initial.py
|
Python
|
mpl-2.0
| 1,129
|
# Generated by Django 1.11.15 on 2019-01-24 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0146_remove_log_queries_switch'),
]
operations = [
migrations.AddField(
model_name='degree',
name='hubspot_lead_capture_form_id',
field=models.CharField(help_text='The Hubspot form ID for the lead capture form', max_length=128, null=True),
),
]
|
edx/course-discovery
|
course_discovery/apps/course_metadata/migrations/0147_degree_hubspot_lead_capture_form_id.py
|
Python
|
agpl-3.0
| 495
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author: Thomas Beucher
Module: ArmParameters
Description: -We find here all arm parameters
-we use a model of arm with two joints and six muscles
'''
import numpy as np
from GlobalVariables import pathWorkingDirectory
class ArmParameters:
'''
class ArmParameters
'''
def __init__(self):
'''
Initializes the class
'''
self.pathSetupFile = pathWorkingDirectory + "/ArmModel/Setup/Arm.params"
self.readSetupFile()
self.AMatrix()
self.BMatrix()
self.readStops()
def readSetupFile(self):
'''
Reads the setup file
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
#Split to read line by line
allsByLign = alls.split("\n")
#line 1, Arm length
self.l1 = float((allsByLign[0].split(":"))[1])
#line 2, ForeArm length
self.l2 = float((allsByLign[1].split(":"))[1])
#line 3, Hand length
self.l3 = float((allsByLign[2].split(":"))[1])
#line 4, Arm mass
self.m1 = float((allsByLign[3].split(":"))[1])
#line 5, ForeArm mass
self.m2 = float((allsByLign[4].split(":"))[1])
#line 6, Hand mass
self.m3 = float((allsByLign[5].split(":"))[1])
#line 7, Distance from the axis of joint 1 to its center of mass
self.s1 = float((allsByLign[6].split(":"))[1])
#line 8, Distance from the axis of joint 2 to its center of mass
self.s2 = float((allsByLign[7].split(":"))[1])
#line 9, Distance from the axis of joint 3 to its center of mass
self.s3 = float((allsByLign[8].split(":"))[1])
#line 10, Arm inertia
self.i1 = float((allsByLign[9].split(":"))[1])
#line 11, ForeArm inertia
self.i2 = float((allsByLign[10].split(":"))[1])
#line 12, Hand inertia
self.i3 = float((allsByLign[11].split(":"))[1])
def BMatrix(self):
'''
Defines the damping matrix B
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 13, Damping term b1
b1 = float((allsByLign[12].split(":"))[1])
#line 14, Damping term b2
b2 = float((allsByLign[13].split(":"))[1])
#line 15, Damping term b3
b3 = float((allsByLign[14].split(":"))[1])
#line 16, Damping term b4
b4 = float((allsByLign[15].split(":"))[1])
#line 17, Damping term b5
b5 = float((allsByLign[16].split(":"))[1])
#line 18, Damping term b6
b6 = float((allsByLign[17].split(":"))[1])
#line 19, Damping term b7
b7 = float((allsByLign[18].split(":"))[1])
#line 20, Damping term b8
b8 = float((allsByLign[19].split(":"))[1])
#line 21, Damping term b9
b9 = float((allsByLign[20].split(":"))[1])
#matrix definition
self.B = np.array([[b1,b2,b3],[b4,b5,b6],[b7,b8,b9]])
def AMatrix(self):
'''
Defines the moment arm matrix A
'''
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 22, Moment arm matrix, a1
a1 = float((allsByLign[21].split(":"))[1])
#line 23, Moment arm matrix, a2
a2 = float((allsByLign[22].split(":"))[1])
#line 24, Moment arm matrix, a3
a3 = float((allsByLign[23].split(":"))[1])
#line 25, Moment arm matrix, a4
a4 = float((allsByLign[24].split(":"))[1])
#line 26, Moment arm matrix, a5
a5 = float((allsByLign[25].split(":"))[1])
#line 27, Moment arm matrix, a6
a6 = float((allsByLign[26].split(":"))[1])
#line 28, Moment arm matrix, a7
a7 = float((allsByLign[27].split(":"))[1])
#line 29, Moment arm matrix, a8
a8 = float((allsByLign[28].split(":"))[1])
#line 30, Moment arm matrix, a9
a9 = float((allsByLign[29].split(":"))[1])
#line 31, Moment arm matrix, a10
a10 = float((allsByLign[30].split(":"))[1])
#line 32, Moment arm matrix, a11
a11 = float((allsByLign[31].split(":"))[1])
#line 33, Moment arm matrix, a12
a12 = float((allsByLign[32].split(":"))[1])
#line 34, Moment arm matrix, a13
a13 = float((allsByLign[33].split(":"))[1])
#line 35, Moment arm matrix, a14
a14 = float((allsByLign[34].split(":"))[1])
#line 36, Moment arm matrix, a15
a15 = float((allsByLign[35].split(":"))[1])
#line 37, Moment arm matrix, a16
a16 = float((allsByLign[36].split(":"))[1])
#line 38, Moment arm matrix, a17
a17 = float((allsByLign[37].split(":"))[1])
#line 39, Moment arm matrix, a18
a18 = float((allsByLign[38].split(":"))[1])
#line 40, Moment arm matrix, a19
a19 = float((allsByLign[39].split(":"))[1])
#line 41, Moment arm matrix, a20
a20 = float((allsByLign[40].split(":"))[1])
#line 42, Moment arm matrix, a21
a21 = float((allsByLign[41].split(":"))[1])
#line 43, Moment arm matrix, a22
a22 = float((allsByLign[42].split(":"))[1])
#line 44, Moment arm matrix, a23
a23 = float((allsByLign[43].split(":"))[1])
#line 45, Moment arm matrix, a24
a24 = float((allsByLign[44].split(":"))[1])
#matrix definition
self.At = np.array([[a1,a2,a3,a4,a5,a6,a7,a8], [a9,a10,a11,a12,a13,a14,a15,a16], [a17,a18,a19,a20,a21,a22,a23,a24]])
def readStops(self):
with open(self.pathSetupFile, "r") as file:
alls = file.read()
allsByLign = alls.split("\n")
#line 46, Shoulder upper bound
self.sub = float((allsByLign[45].split(":"))[1])
#line 47, Shoulder lower bound
self.slb = float((allsByLign[46].split(":"))[1])
#line 48, Elbow upper bound
self.eub = float((allsByLign[47].split(":"))[1])
#line 49, Elbow lower bound
self.elb = float((allsByLign[48].split(":"))[1])
#line 50, Hand upper bound
self.hub = float((allsByLign[49].split(":"))[1])
#line 51, Hand lower bound
self.hlb = float((allsByLign[50].split(":"))[1])
|
osigaud/ArmModelPython
|
3RModel_Gao2/ArmModel/ArmParameters.py
|
Python
|
gpl-2.0
| 6,375
|
#!/usr/bin/env python2
from __future__ import print_function
import smtplib
import uuid
import urllib2
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import os
import sys
from django.template import loader, Context
from django.conf import settings
def post(mail, template_var):
me = mail['user'] + "<" + mail['from_address'] + ">"
msg = MIMEMultipart('related')
msg['Subject'] = mail['subject']
msg['From'] = me
msg['To'] = mail['to_list']
to_list = mail['to_list'].split(';')
msg_alternative = MIMEMultipart('alternative')
msg.attach(msg_alternative)
if 'tmimg' in mail:
tmimg = dict(path=mail['tmimg_dir'] + mail['tmimg'],
cid=str(uuid.uuid4()))
with open(tmimg['path'], 'rb') as file:
msg_tmimage = MIMEImage(file.read(),
name=os.path.basename(tmimg['path']))
msg.attach(msg_tmimage)
template_var['tmimg_cid'] = tmimg['cid']
msg_tmimage.add_header('Content-ID', '<{}>'.format(tmimg['cid']))
if 'hsimg' in mail:
hsimg = dict(path=mail['hsimg_dir'] + mail['hsimg'],
cid=str(uuid.uuid4()))
with open(hsimg['path'], 'rb') as file:
msg_hsimage = MIMEImage(file.read(),
name=os.path.basename(hsimg['path']))
msg.attach(msg_hsimage)
template_var['hsimg_cid'] = hsimg['cid']
msg_hsimage.add_header('Content-ID', '<{}>'.format(hsimg['cid']))
tmp = mail['template'].split('/')
template_dir = '/'.join(tmp[:-1])
try:
settings.configure(TEMPLATE_DIRS=(template_dir
if template_dir else './'))
except:
pass
t = loader.get_template(tmp[-1])
c = Context(template_var)
mail['content'] = t.render(c)
msg_html = MIMEText(mail['content'], 'HTML')
msg_alternative.attach(msg_html)
try:
s = smtplib.SMTP()
s.connect(mail['smtp_server'])
s.login(mail['user'], mail['password'])
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print(str(e), end="")
sys.stdout.flush()
return False
def sendmail(time, data, err_list, cfg, luckyID):
mail = cfg['Email']
mail['user'] = mail['from_address'].split('@')[0]
mail['subject'] = "[Miner Status " + cfg['General']['server_code'] + (
"] Report " + time)
template_var = {}
if luckyID:
template_var['lucky'] = True
template_var['luckyID'] = luckyID
else:
template_var['lucky'] = False
print('Fetching balance data:')
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
url_list = list(filter(None, (x.strip() for x in
cfg['Pool']['balance_url'].splitlines())))
template_var['balance_list'] = []
b = 0
err = 0
for url in url_list:
print(url + ' ... ', end="")
sys.stdout.flush()
retry = 0
fail = 1
while retry < int(cfg['Pool']['retry']):
try:
s = urllib2.urlopen(url, timeout=10).read()
balance = s.split('Final Balance')[1].split(
' BTC')[0].split('>')[-1]
b += float(balance)
fail = 0
break
except Exception, e:
print(str(e), end="")
sys.stdout.flush()
retry += 1
if fail == 1:
err = 1
balance = 'Connection Failed'
template_var['balance_list'].append({'addr': url.split('/')[-1],
'num': balance, 'url': url})
print("Done.")
if err == 1:
template_var['sum_balance'] = 'N/A'
else:
template_var['sum_balance'] = str(b)
print("Sending email to " + cfg['Email']['to_list'].replace(';', ' & ') +
' ...', end="")
sys.stdout.flush()
template_var['server_code'] = cfg['General']['server_code']
template_var['time'] = time
alivenum = 0
for mminer in data:
alive_flag = False
for miner in mminer[1:]:
if miner[1] == "Alive":
alive_flag = True
if alive_flag:
alivenum += 1
minernum = len(cfg['miner_list'])
for i in cfg['mod_num_list']:
if int(i) == 0:
minernum -= 1
template_var['active_ip_num'] = str(alivenum) + '/' + str(minernum)
template_var['err_miner_list'] = err_list
sum_mod_num = 0
for mminer in data:
for miner in mminer[1:]:
for dev_stat in miner[4]:
sum_mod_num += int(dev_stat[3])
sum_mod_num0 = 0
for mod_num in cfg['mod_num_list']:
sum_mod_num0 += int(mod_num)
template_var['alive_mod_num'] = str(sum_mod_num) + '/' + str(sum_mod_num0)
if 'tmimg' in mail:
template_var['tmimg'] = True
if 'hsimg' in mail:
template_var['hsimg'] = True
if post(mail, template_var):
print(" Successed.")
else:
print(" Failed.")
|
archangdcc/avalon-extras
|
farm-manager/status-report/sendmail.py
|
Python
|
unlicense
| 5,272
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.environments import (
EnvironmentsAsyncClient,
)
from google.cloud.dialogflow_v2beta1.services.environments import EnvironmentsClient
from google.cloud.dialogflow_v2beta1.services.environments import pagers
from google.cloud.dialogflow_v2beta1.services.environments import transports
from google.cloud.dialogflow_v2beta1.types import audio_config
from google.cloud.dialogflow_v2beta1.types import environment
from google.cloud.dialogflow_v2beta1.types import fulfillment
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EnvironmentsClient._get_default_mtls_endpoint(None) is None
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EnvironmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert EnvironmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.EnvironmentsGrpcTransport, "grpc"),
(transports.EnvironmentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_environments_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient,])
def test_environments_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_environments_client_get_transport_class():
transport = EnvironmentsClient.get_transport_class()
available_transports = [
transports.EnvironmentsGrpcTransport,
]
assert transport in available_transports
transport = EnvironmentsClient.get_transport_class("grpc")
assert transport == transports.EnvironmentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
def test_environments_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EnvironmentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "true"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc", "false"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_environments_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [EnvironmentsClient, EnvironmentsAsyncClient])
@mock.patch.object(
EnvironmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EnvironmentsClient)
)
@mock.patch.object(
EnvironmentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EnvironmentsAsyncClient),
)
def test_environments_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport, "grpc"),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_environments_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
EnvironmentsClient,
transports.EnvironmentsGrpcTransport,
"grpc",
grpc_helpers,
),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_environments_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_environments_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.environments.transports.EnvironmentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EnvironmentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
EnvironmentsClient,
transports.EnvironmentsGrpcTransport,
"grpc",
grpc_helpers,
),
(
EnvironmentsAsyncClient,
transports.EnvironmentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_environments_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [environment.ListEnvironmentsRequest, dict,])
def test_list_environments(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
response = client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_environments_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
client.list_environments()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.ListEnvironmentsRequest()
@pytest.mark.asyncio
async def test_list_environments_async(
transport: str = "grpc_asyncio", request_type=environment.ListEnvironmentsRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.ListEnvironmentsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.ListEnvironmentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEnvironmentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_environments_async_from_dict():
await test_list_environments_async(request_type=dict)
def test_list_environments_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = environment.ListEnvironmentsResponse()
client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_environments_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.ListEnvironmentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.ListEnvironmentsResponse()
)
await client.list_environments(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_environments_flattened():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.ListEnvironmentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_environments_flattened_error():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_environments(
environment.ListEnvironmentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_environments_flattened_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.ListEnvironmentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.ListEnvironmentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_environments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_environments_flattened_error_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_environments(
environment.ListEnvironmentsRequest(), parent="parent_value",
)
def test_list_environments_pager(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.ListEnvironmentsResponse(
environments=[
environment.Environment(),
environment.Environment(),
environment.Environment(),
],
next_page_token="abc",
),
environment.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(),], next_page_token="ghi",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(), environment.Environment(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_environments(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, environment.Environment) for i in results)
def test_list_environments_pages(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.ListEnvironmentsResponse(
environments=[
environment.Environment(),
environment.Environment(),
environment.Environment(),
],
next_page_token="abc",
),
environment.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(),], next_page_token="ghi",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(), environment.Environment(),],
),
RuntimeError,
)
pages = list(client.list_environments(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_environments_async_pager():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.ListEnvironmentsResponse(
environments=[
environment.Environment(),
environment.Environment(),
environment.Environment(),
],
next_page_token="abc",
),
environment.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(),], next_page_token="ghi",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(), environment.Environment(),],
),
RuntimeError,
)
async_pager = await client.list_environments(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, environment.Environment) for i in responses)
@pytest.mark.asyncio
async def test_list_environments_async_pages():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_environments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.ListEnvironmentsResponse(
environments=[
environment.Environment(),
environment.Environment(),
environment.Environment(),
],
next_page_token="abc",
),
environment.ListEnvironmentsResponse(
environments=[], next_page_token="def",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(),], next_page_token="ghi",
),
environment.ListEnvironmentsResponse(
environments=[environment.Environment(), environment.Environment(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_environments(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [environment.GetEnvironmentRequest, dict,])
def test_get_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
response = client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
def test_get_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
client.get_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentRequest()
@pytest.mark.asyncio
async def test_get_environment_async(
transport: str = "grpc_asyncio", request_type=environment.GetEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
)
response = await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
@pytest.mark.asyncio
async def test_get_environment_async_from_dict():
await test_get_environment_async(request_type=dict)
def test_get_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = environment.Environment()
client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.GetEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_environment), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment()
)
await client.get_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [environment.CreateEnvironmentRequest, dict,])
def test_create_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
response = client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
def test_create_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
client.create_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.CreateEnvironmentRequest()
@pytest.mark.asyncio
async def test_create_environment_async(
transport: str = "grpc_asyncio", request_type=environment.CreateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
)
response = await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.CreateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
@pytest.mark.asyncio
async def test_create_environment_async_from_dict():
await test_create_environment_async(request_type=dict)
def test_create_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = environment.Environment()
client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.CreateEnvironmentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment()
)
await client.create_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [environment.UpdateEnvironmentRequest, dict,])
def test_update_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
response = client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
def test_update_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
client.update_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.UpdateEnvironmentRequest()
@pytest.mark.asyncio
async def test_update_environment_async(
transport: str = "grpc_asyncio", request_type=environment.UpdateEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment(
name="name_value",
description="description_value",
agent_version="agent_version_value",
state=environment.Environment.State.STOPPED,
)
)
response = await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.UpdateEnvironmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, environment.Environment)
assert response.name == "name_value"
assert response.description == "description_value"
assert response.agent_version == "agent_version_value"
assert response.state == environment.Environment.State.STOPPED
@pytest.mark.asyncio
async def test_update_environment_async_from_dict():
await test_update_environment_async(request_type=dict)
def test_update_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.UpdateEnvironmentRequest()
request.environment.name = "environment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = environment.Environment()
client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "environment.name=environment.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.UpdateEnvironmentRequest()
request.environment.name = "environment.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.Environment()
)
await client.update_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "environment.name=environment.name/value",) in kw[
"metadata"
]
@pytest.mark.parametrize("request_type", [environment.DeleteEnvironmentRequest, dict,])
def test_delete_environment(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_environment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
client.delete_environment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.DeleteEnvironmentRequest()
@pytest.mark.asyncio
async def test_delete_environment_async(
transport: str = "grpc_asyncio", request_type=environment.DeleteEnvironmentRequest
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.DeleteEnvironmentRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_environment_async_from_dict():
await test_delete_environment_async(request_type=dict)
def test_delete_environment_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = None
client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_environment_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.DeleteEnvironmentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_environment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_environment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [environment.GetEnvironmentHistoryRequest, dict,]
)
def test_get_environment_history(request_type, transport: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = environment.EnvironmentHistory(
parent="parent_value", next_page_token="next_page_token_value",
)
response = client.get_environment_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentHistoryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GetEnvironmentHistoryPager)
assert response.parent == "parent_value"
assert response.next_page_token == "next_page_token_value"
def test_get_environment_history_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
client.get_environment_history()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentHistoryRequest()
@pytest.mark.asyncio
async def test_get_environment_history_async(
transport: str = "grpc_asyncio",
request_type=environment.GetEnvironmentHistoryRequest,
):
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.EnvironmentHistory(
parent="parent_value", next_page_token="next_page_token_value",
)
)
response = await client.get_environment_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == environment.GetEnvironmentHistoryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.GetEnvironmentHistoryAsyncPager)
assert response.parent == "parent_value"
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_get_environment_history_async_from_dict():
await test_get_environment_history_async(request_type=dict)
def test_get_environment_history_field_headers():
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.GetEnvironmentHistoryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
call.return_value = environment.EnvironmentHistory()
client.get_environment_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_environment_history_field_headers_async():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = environment.GetEnvironmentHistoryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
environment.EnvironmentHistory()
)
await client.get_environment_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_get_environment_history_pager(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
next_page_token="abc",
),
environment.EnvironmentHistory(entries=[], next_page_token="def",),
environment.EnvironmentHistory(
entries=[environment.EnvironmentHistory.Entry(),],
next_page_token="ghi",
),
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.get_environment_history(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, environment.EnvironmentHistory.Entry) for i in results)
def test_get_environment_history_pages(transport_name: str = "grpc"):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
next_page_token="abc",
),
environment.EnvironmentHistory(entries=[], next_page_token="def",),
environment.EnvironmentHistory(
entries=[environment.EnvironmentHistory.Entry(),],
next_page_token="ghi",
),
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
),
RuntimeError,
)
pages = list(client.get_environment_history(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_get_environment_history_async_pager():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
next_page_token="abc",
),
environment.EnvironmentHistory(entries=[], next_page_token="def",),
environment.EnvironmentHistory(
entries=[environment.EnvironmentHistory.Entry(),],
next_page_token="ghi",
),
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
),
RuntimeError,
)
async_pager = await client.get_environment_history(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, environment.EnvironmentHistory.Entry) for i in responses
)
@pytest.mark.asyncio
async def test_get_environment_history_async_pages():
client = EnvironmentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_environment_history),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
next_page_token="abc",
),
environment.EnvironmentHistory(entries=[], next_page_token="def",),
environment.EnvironmentHistory(
entries=[environment.EnvironmentHistory.Entry(),],
next_page_token="ghi",
),
environment.EnvironmentHistory(
entries=[
environment.EnvironmentHistory.Entry(),
environment.EnvironmentHistory.Entry(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.get_environment_history(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = EnvironmentsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EnvironmentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EnvironmentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EnvironmentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EnvironmentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EnvironmentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EnvironmentsGrpcTransport,)
def test_environments_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_environments_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.environments.transports.EnvironmentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EnvironmentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_environments",
"get_environment",
"create_environment",
"update_environment",
"delete_environment",
"get_environment_history",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_environments_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_environments_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.environments.transports.EnvironmentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EnvironmentsTransport()
adc.assert_called_once()
def test_environments_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EnvironmentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EnvironmentsGrpcTransport,
transports.EnvironmentsGrpcAsyncIOTransport,
],
)
def test_environments_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EnvironmentsGrpcTransport, grpc_helpers),
(transports.EnvironmentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_environments_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_environments_host_no_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_environments_host_with_port():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_environments_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_environments_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EnvironmentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.EnvironmentsGrpcTransport, transports.EnvironmentsGrpcAsyncIOTransport],
)
def test_environments_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_environment_path():
project = "squid"
environment = "clam"
expected = "projects/{project}/agent/environments/{environment}".format(
project=project, environment=environment,
)
actual = EnvironmentsClient.environment_path(project, environment)
assert expected == actual
def test_parse_environment_path():
expected = {
"project": "whelk",
"environment": "octopus",
}
path = EnvironmentsClient.environment_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_environment_path(path)
assert expected == actual
def test_fulfillment_path():
project = "oyster"
expected = "projects/{project}/agent/fulfillment".format(project=project,)
actual = EnvironmentsClient.fulfillment_path(project)
assert expected == actual
def test_parse_fulfillment_path():
expected = {
"project": "nudibranch",
}
path = EnvironmentsClient.fulfillment_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_fulfillment_path(path)
assert expected == actual
def test_version_path():
project = "cuttlefish"
version = "mussel"
expected = "projects/{project}/agent/versions/{version}".format(
project=project, version=version,
)
actual = EnvironmentsClient.version_path(project, version)
assert expected == actual
def test_parse_version_path():
expected = {
"project": "winkle",
"version": "nautilus",
}
path = EnvironmentsClient.version_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_version_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EnvironmentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = EnvironmentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = EnvironmentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = EnvironmentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = EnvironmentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = EnvironmentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = EnvironmentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = EnvironmentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EnvironmentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = EnvironmentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EnvironmentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EnvironmentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EnvironmentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = EnvironmentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = EnvironmentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(EnvironmentsClient, transports.EnvironmentsGrpcTransport),
(EnvironmentsAsyncClient, transports.EnvironmentsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-dialogflow
|
tests/unit/gapic/dialogflow_v2beta1/test_environments.py
|
Python
|
apache-2.0
| 97,936
|
#!/usr/bin/env python3
"""
For a given vcf file, compare each variant and its reference sequence to
a set of motifs to determine if any are significantly altered.
Usage:
python motifs.py -i <input.vcf> -r <reference.fa> -m <motif.txt>
-o <output.vcf> [OPTIONS]
Args:
-i (required) <input.vcf>: Name of sorted variant file to process.
-r (required) <reference.fa>: Name of reference sequence
file to get surrounding bases from variant.
-m (required) <motif.txt>: Tab-delimited key file containing a frequency
matrix with each row corresponding to a base and each column
corresponding to a position (JASPAR format).
-o (required) <output.vcf>: Name of output file to be created.
-pc (optional) <0.1>: Pseudocounts value to be added to all positions of
the motif frequency matrix before calculating the probability matrix.
-th (optional) <0>: Motifs are considered a match if they score above a
given threshold. This is the default threshold (used if no threshold is
specified by motif file).
-ws (optional) <50>: Wing size in bp to search for weak homotypic
matches, co-binding tfs, and GC content.
-bp (optional) <baselines.txt>: A file containing a single line with tab
delineated values for baseline probabilities for A, C, G, T (in order).
Probabilities should all be positive and should sum to 1. If none is
provided then all are assumed to be equally likely.
-fm (optional flag): If -fm (filter with motifs) is included, variants
that do not match any motif will not be included in the output file.
-ci (optional) <ChIP.bed>: A sorted bed-like file containing tab delineated
columns of the form:
chr start end TF1;TF2;TF3...
-co (optional) <chip_out.bed> = Name of output bed file to be created.
A new column will be added with motifs that computationally match each
peak.
-fp (optional flag): If -fp (filter with peaks) is included, ChIP peaks
that do not match any motif will not be included in the output (-co).
-sk (optional flag): Use if sorted by karyotype, do not if sorted numerically.
Input vcf file and chip bed file must be sorted the same way
Are input files chr sorted lexicographically (karyotype) or numerically?
Present: lexicographically: i.e. chr1 < chr11 < chr2 < chrX < chrY
Absent: numerically: i.e. chr1 < chr2 < chr11 < chrX < chrY
-fc (optional): filter_chip YYY; can not also be called with fn
-fn (optional): filter_novel YYY; can not also be called with fc
-ht (optional): If included then runs homotypic matches. Absent then not run.
-mv (optional) <ws>: merge adjacent variants for same chromosome and sample
that occur within passed integer distance. Use -1 to set = wing size.
-rf (optional): (boolean) if true then force variant reference bases
to match FASTA reference
"""
from __future__ import print_function # so Ninja IDE will stop complaining & show symbols
import sys
import argparse
#import pdb # necessary for debugger; use pdb.set_trace()
import motif
import sequence
import vcf
from utils import timeString
from pyfaidx import Fasta
parser = argparse.ArgumentParser(usage=__doc__)
# TODO - Remove use of this, will only create headaches later.
class Options_list:
def __init__(self):
# Should lines in the vcf output file be excluded
# if they don't match a motif?
# -fm tag sets this to True
self.filter_vcf_motif = False
# Should lines in the vcf output file be excluded
# if they don't match a ChIP peak?
# -fc tag sets this to True
self.filter_vcf_chip = False
# Should lines in the vcf output file be excluded
# if they do match a ChIP peak? Allows for printing of only
# potentially novel variants
# -fn tag sets this to True
self.filter_vcf_no = False
# Is a ChIP peak bed file present?
# -ci <chip_file.bed> will make this True
self.chip_present = False
def multivar_computation_set(multi_var, default_distance):
"""
Give an integer argument for the multi-variant search distance
and the default distance return the list indicating whether
to perform multi-variant search or not and the search distance
If multi_var < -1 --> (true, default_distance)
If multi_var < 1 but not -1 --> (false, 0)
If multi_var > 0 --> (true, multi_var)
Args:
multi_var = integer multi_var search distance
also used to determine if should search or not
default_distance = integer default wing size
Returns: A 2 element list (multivar_flag, multivar_distance)
where multivar_flag (boolean) and multivar_distance (integer)
"""
if (multi_var is None):
multivar_computation_flag = False
multivar_distance = 0
else:
if (int(multi_var) == -1):
multivar_distance = default_distance
multivar_computation_flag = True
elif int(multi_var) < 1:
multivar_distance = 0
multivar_computation_flag = False
else:
multivar_distance = int(multi_var)
multivar_computation_flag = True
return ([multivar_computation_flag, multivar_distance])
def update_vcf(line, matches, output_fileHandle, options):
"""
Updates the output file with the output in the correct variant call format
Args:
line = original vcf line (input file line)
matches = list of MotifMatch objects
output_fileHandle = output vcf file handle
options = options list
Returns: Nothing (updates output_fileHandle instead of returning)
"""
# First 8 columns should always be:
# CHROM POS ID REF ALT QUAL FILTER INFO
line = line.strip()
columns = line.split('\t')
# ID=MOTIFN,Type=String,Description="Matched motif names"
# ID=MOTIFV,Type=Float,Description="Motif variant match scores"
# ID=MOTIFR,Type=Float,Description="Motif reference match scores"
# ID=MOTIFC,Type=Character,Description="Motif validated by ChIP (Y/N)"
names = ""
varscores = ""
refscores = ""
chips = ""
varht = ""
refht = ""
vargc = ""
refgc = ""
to_be_printed = 0
for match in matches:
# First, check if the match needs to be filtered
if options.filter_vcf_chip and not match.chip_match:
continue
elif options.filter_vcf_no and match.chip_match:
continue
else:
to_be_printed += 1
if chips != "":
names += ","
varscores += ","
refscores += ","
chips += ","
if vargc != "":
varht += ","
refht += ","
vargc += ","
refgc += ","
names += match.name
varscores += str(round(match.var_score, 4))
refscores += str(round(match.ref_score, 4))
# Sequence environment data
if match.var_gc is not None:
# use ref version because homotypic matches against wings are equal
# QQQ: question why bother exporting twice?!?
varht += sublist_str(match.ref_ht, 4)
refht += sublist_str(match.ref_ht, 4)
vargc += str(round(match.var_gc, 4))
refgc += str(round(match.ref_gc, 4))
if match.chip_match:
chips += "Y"
else:
chips += "N"
# If there are no matches, print the line unchanged or filter it out (return
# without printing)
if to_be_printed == 0:
if (not options.filter_vcf_motif and not options.filter_vcf_chip
and not options.filter_vcf_no):
print(line, file=output_fileHandle)
return
outline = ""
idx = 0
for col in columns:
if outline != "":
outline += "\t"
if idx == 7:
outline = update_vcf_motifs_info(outline, names, varscores, refscores,
varht, refht, vargc, refgc, options, chips, col)
else:
outline += col
idx += 1
if idx < 7:
print("**Error** VCF formatted incorrectly. Less than 8 columns found:\n" + line)
# Add output at the end anyway
outline += "\t"
outline = update_vcf_motifs_info(outline, names, varscores, refscores,
varht, refht, vargc, refgc, options, chips, col)
print(outline, file=output_fileHandle)
return
def update_vcf_motifs_info(outline, names, varscores, refscores, varht, refht,
vargc, refgc, options, chips, col):
"""
Used in conjunction with update_vcf() to add motifs information to the line.
Reduces code maintenance by putting update code in 1 place
Args:
see update_vcf for definition
names set of match strings; matching MOTIF
*scores string of 4 digit decimal, set of scores as string
*ht
*gc string of 4 digit decimal; gc count
options options list
chips Y or N
col current column string
Returns:
Modifies passed outline string and returns modified copy
"""
outline += "MOTIFN=" + names + ";MOTIFV=" + varscores + ";MOTIFR=" + refscores
if refgc != "":
outline += ";MOTIFVH=" + varht + ";MOTIFRH=" + refht
outline += ";MOTIFVG=" + vargc + ";MOTIFRG=" + refgc
if (options.chip_present):
outline += ";MOTIFC=" + chips
if col != '.':
outline += ";" + col
return outline
def sublist_str(sublist, sig_figs):
# Converts a float list within an info field list into a string
output = ""
for float_item in sublist:
if output != "":
output += "/"
# debug output += str(float_item)
output += str(round(float_item, sig_figs))
if output != "":
return "(" + output + ")"
return ""
def chr_less(chr_left, chr_right, sorted_lex):
"""
Returns true if the left chromosome comes before the right or is the same.
Args:
chr_left = (string) the left chromsome being compared
chr_right = (string) the right chromosome being compared
sorted_lex = true if file sorted lexicographically (by string values)
Returns: Whether or not left chromosome comes before the right (boolean).
"""
# True if the file is sorted lexicographically
# i.e. chr1 < chr11 < chr2 < chrX < chrY
if sorted_lex:
return (chr_left < chr_right)
# False if the file is sorted numerically
# i.e. chr1 < chr2 < chr11 < chrX < chrY
else:
left = chr_left[3:] # assumes chromosome name is chr<other bits>
right = chr_right[3:]
try:
l_num = int(left)
try:
r_num = int(right)
return l_num < r_num
# Right chromosome is a string (chrX, chrY, chrTest, etc)
except:
# Left is a number and right is a string
# Numbers are sorted before strings (chr1 < chrX)
return True
# Left number is a string if get to ValueError exception
except ValueError:
try:
r_num = int(left)
# Left is a string and right is a number
# Numbers are sorted before strings (chrX !< chr1)
return False
# Both are strings, sort lexicographically
except ValueError:
return chr_left < chr_right
def match_peaks(chrom, pos, peaks, chip_fh, matches, output_fileHandle, sorted_lex, filter_bed):
"""
Returns an array of peaks that match the current chromosome and position.
Updates the output_fileHandle if not None.
Args:
p_chr = (string) previous chromosome. Needed to know if a new chromosome
is being entered.
chr = (string) chromosome. Chromosomes 1-22, X, and Y are expected.
pos = current position of the variant.
peaks = buffer of peaks. They should all be upstream of or overlapping
variant at chr and pos. Peaks is an array of tuples of the form:
(chr, start pos, end pos, array of ChIP tfs, motif match array)
tfs = transcription factors
chip_fh = input ChIP bed file handle to be read from.
This must have the start positions in order
within each chromosome and must be grouped by chromosome.
matches = list of motif matches as tuples of the form:
(name, variant score, reference score, ChIP match)
output_fileHandle = ChIP output bed file to be printed to.
sorted_lex = True if sorted lexigraphically (by character strings)
filter_bed = if true exclude non matched motif(s) in ChiP output
Returns: Peak buffer tuple of the form ( overlapping peak array, next peak )
Array of peaks that overlap the current chromosome and position
Next peak (because you must over-read to make sure you don't miss any)
match_peaks also updates output_fileHandle.
"""
if chip_fh is None:
return (peaks, matches)
# Get rid of peaks that are upstream (left of) of the current chromosome
idx = 0
while idx < len(peaks) and chr_less(peaks[idx][0], chrom, sorted_lex):
# If the chromosome doesn't match, output the line and keep searching
print_peak(peaks[idx], output_fileHandle, filter_bed)
idx += 1
# peak at idx will be included and the rest will be removed
peaks = peaks[idx:]
# If previous peaks were not from correct chromosome, get there
if (len(peaks) == 0):
new_peak = get_peak_at(chrom, pos, chip_fh, output_fileHandle, sorted_lex, filter_bed)
# If end of file is reached
if (new_peak is None):
return ([], matches)
else:
peaks.append(new_peak)
idx = 0
# Read through bed file
while True:
# If more peaks are needed
if idx == len(peaks):
n_peak = get_next_peak(chip_fh)
# If end of bed file is reached, then just return current list
if n_peak is None:
return (peaks, matches)
peaks.append(n_peak)
# Current peak (chromosome, start pos, end pos,
# transcription factor array, matrix match array)
# when get_next_peak defines peak, matrix match array is undefined
(pchr, psta, pend, ptfs, pmms) = peaks[idx]
# If next chromosome is reached in bed file [QQQ: should this occur before append?]
if pchr != chrom:
break
if psta <= pos:
if pend >= pos:
motif_idx = 0
for motif_idx in range(len(matches)):
pmms.append(matches[motif_idx]) # defines pmms for peaks
for trans_factor in ptfs:
# If the transcription factor (chip peak) name is the same as
# the matched motif name, note that there is a chip match
if trans_factor == matches[motif_idx].name:
# Motif match is verified by ChIP data
matches[motif_idx].chip_match = True
# Save with new value for pmms
peaks[idx] = (pchr, psta, pend, ptfs, pmms)
# Otherwise both are before pos, so remove that peak and continue
# This should only ever happen when idx is 0... but still
else:
print_peak(peaks[idx], output_fileHandle, filter_bed)
peaks = peaks[0:idx] + peaks[idx + 1:]
idx -= 1
# Otherwise peak start is after the variant position, so stop
else:
break
idx += 1
return (peaks, matches)
def get_next_peak(opened_file):
"""
Reads in the next line of the bed and returns the next peak's information
Args: opened_file = an already open input .bed file handle
Returns: a tuple with the following information (in order) or None
chromosome number as a string e.g. "chr1",
position of the start of the peak
position of the end of the peak
array list containing transcription factors which bind in that area
array list containing motif matches (empty)
"""
# print("Entering get_next_peak")
# sys.stdout.flush()
line = opened_file.readline().strip()
# print("Got: <"+line+">")
sys.stdout.flush()
# input file is empty
if line == "":
return None
line_list = line.split('\t')
chrom = line_list[0]
start = int(line_list[1])
end = int(line_list[2])
tf_array = line_list[3].split(';')
return (chrom, start, end, tf_array, [])
def get_peak_at(chrom, pos, chip_fh, out_fh, sorted_lex, filter_bed):
"""
Get the first peak where the end point of the peak is past the input pos.
Requires that chip_fh is sorted the same way is the vcf input file.
This file will print all intermediate peaks to out_fh if not None.
Args:
chr = the new chromosome to get to
pos = the new position to get to
chip_fh = an already open input .bed file handle
out_fh = an already open output file (to be printed to). May be None.
sorted_lex = True if sorted lexigraphically (by character strings)
filter_bed = if true exclude non matched motif(s) in ChiP output
Returns:
The first peak from chip_fh that is the same chromosome as chr
where the end point of the peak is past the input position (pos).
If the end of file is reached, None is returned.
"""
# print("*Entering get_peak_at")
# sys.stdout.flush()
# Skip ahead until the correct chromosome
peak = get_next_peak(chip_fh)
while peak is not None:
(p_chr, p_sta, p_end, p_tfa, p_mm) = peak
# If the chromosome is correct, skip ahead until the correct position
if p_chr == chrom:
# We have passed the position at the chromosome
if p_end >= pos:
# print("get_peak_at returns: "+p_chr+":"+str(p_sta)+"-"+str(p_end))
# sys.stdout.flush()
return peak
else:
print_peak(peak, out_fh, filter_bed)
# If the chromosome is too low and there is an outfile, print
elif chr_less(p_chr, chrom, sorted_lex):
print_peak(peak, out_fh, filter_bed)
# If we have passed the chromosome
else:
return peak
peak = get_next_peak(chip_fh)
# print("*get_peak_at returns None")
# sys.stdout.flush()
return None
def print_peak(peak, fileHandle, filter_bed):
"""
Prints the peak to the given file handle (or exits if no file is given)
Args:
peak = ChIP peak of the form:
(chr, start, stop, chip tf array, motif match tf array)
chip tf array is an array of tf names
motif match tf array is an array of (motif name, vscore, rscore, strand)
fileHandle = the file to print to. This file should already be opened.
If this file is None, nothing will happen.
filter_bed = if true exclude lines in chip (bed) output file if they
don't match a motif
Returns: Nothing
"""
if fileHandle is None:
return
(chrom, start, end, c_array, mm_array) = peak
# If there are no motif matches and filtering is on, do not print this peak
if filter_bed and len(mm_array) == 0:
return
line = chrom + '\t' + str(start) + '\t' + str(end) + '\t'
# Generate string of chip transcription factors from the peak
chip_string = ""
for tf_name in c_array:
if chip_string != "":
chip_string += ";"
chip_string += tf_name
# Generate string of motif matches that overlap the peak
motif_string = ""
for match in mm_array:
if motif_string != "":
motif_string += ";"
motif_string += match.name + "," + str(round(match.var_score, 4)) + ","
motif_string += str(round(match.ref_score, 4))
line += chip_string + "\t" + motif_string
print(line, file=fileHandle)
return
"""
---- START OF MAIN ----
Requires that vcf file have variants sorted by position within chromosomes
"""
def main(file_input, file_output, file_reference_genome, file_motif, file_baseline_prob,
pc, th, ws, multivar_distance, run_homotypic, force_ref_match,
file_chip, file_output_chip, filter_co, sorted_lex,
filter_chip, filter_motif, filter_novel):
"""
Args:
Unless specified as required, use None to not use.
file_input (-i, required): Name of sorted variant file (*.vcf) to process.
file_output (-o, required): Name of output file (*.vcf) to be created.
file_reference_genome (-r, required): Name of reference sequence file (*.fa)
to get surrounding bases from variant.
file_motif (-m, required): Name of Tab-delimited key file containing a
frequency matrix with each row corresponding to a base and each column
corresponding to a position (JASPAR format). File may or may not
have thresholds identified in the transcription factor (TF) name line.
Grammar Note: motif and transcription factor are used interchangeably.
#
# -- arguments that change scoring of TF against variants in vcf -- #
#
file_baseline_prob (-bp): Name of file containing a single line with tab
delineated values for baseline probabilities for A, C, G, T (in order).
Probabilities should all be positive and should sum to 1. If None,
all are assumed to be equally likely.
pc (-pc) <0.1>: Float pseudocounts value to be added to all positions of
the motif frequency matrix before calculating the probability matrix.
th (-th) <0>: Motifs are considered a match if they score above a
given threshold. Set the default threshold used if no threshold is
specified by motif file.
ws (-ws) <50>: Integer wing size around variant sequence to search for
weak homotypic matches, co-binding TFs, and GC content. Forced in
code to max of longest TF and ws.
Note: co-binding TF scores only use wings up to the length of the TF
multivar_distance (-mv) <ws>: Integer distance to search for
adjacent variants for the same chromosome and sample to merge.
Expects a positive value. Use -1 to set = wing size.
None, 0 and < -1 mean multi-variant merge is not performed.
run_homotypic (-ht): Boolean, if True runs homotypic matches,
None or False does not.
force_ref_match (-rf): Boolean, if True then force variant reference bases
to match FASTA reference. None or False does not.
#
# -- chip specific arguments -- #
#
file_chip (-ci): Name of a sorted bed-like file (*.bed) containing
tab delineated columns of the form: chr start end TF1;TF2;TF3...
Ignored if filter_co not True. No output chip file unless
file_output_chip is defined
file_output_chip (-co): Name of output bed file (*.bed) to be created.
A new column will be added with motifs that computationally match each
peak.
Ignored if filter_co not True or file_chip not defined.
filter_co (-fp): If filter with peaks is True, ChIP peaks that do not
match any motif will not be included in the output chip file = file_output_chip
sorted_lex (-sk): True if sorted by karyotype (means sorted lexicographically)
Are input files chr sorted lexicographically or numerically?
Input vcf file and chip bed file must be sorted the same way
True: lexicographically: i.e. chr1 < chr11 < chr2 < chrX < chrY
False: numerically: i.e. chr1 < chr2 < chr11 < chrX < chrY
#
# -- arguments that filter output vcf -- #
#
filter_chip (-fc): Boolean, if True filter output vcf file for ChIP peak overlap
Can not also be called with filter_novel.
Translated to options.filter_vcf_chip.
filter_motif (-fm): If -fm (filter with motifs) is included, variants
that do not match any motif will not be included in the vcf output file.
Translated to options.filter_vcf_motif
filter_novel (-fn): If True, exclude output to vcf output file
if they do match a ChIP peak. Only print potentially novel variants
Can not also be called with filter_chip
Translated to options.filter_vcf_no
Returns:
output is via files defined in arguments
"""
print("Run started at:" + timeString())
# --------------------------------------------------------------------------------------
# handling arguments --> XXX:change all strings to opt_args print string
fileHan_chip = None
fileHan_out_chip = None
filter_bed = False # true if file_chip and filter_co
if pc is not None:
pc = float(pc)
else:
pc = 0.1
# Output so user can double check options
print(("Input file: {}\nReference file: {}\nMotif file: {}\n" +
"Output file: {}\nOptional arguments:\n Pseudocounts value = {}").
format(
file_input, file_reference_genome, file_motif,
file_output, pc
))
# Optional arguments (set and prepare string to print)
opt_args = ""
if th is not None:
th = float(th)
else:
th = 0.0
opt_args += " Defined match threshold = " + str(th) + "\n"
if ws is not None:
ws = int(ws)
else:
ws = 50
opt_args += " Defined wing size = " + str(ws) + "\n"
if run_homotypic is None:
run_homotypic = False
if run_homotypic:
opt_args += " Homotypic matches code will be run.\n"
else:
opt_args += " Not running homotypic matches code.\n"
if force_ref_match is None:
force_ref_match = False
if force_ref_match:
opt_args += "User requested variant reference string must match genome reference string\n"
if filter_motif is None:
filter_motif = False
if filter_co is None:
filter_co = False
if filter_chip is None:
filter_chip = False
if filter_novel is None:
filter_novel = False
[multivar_computation_flag, multivar_distance] = multivar_computation_set(multivar_distance, ws)
# Options list. Easier to pass in methods or use in code updates.
options = Options_list()
#options.filter_vcf_motif # controlled by filter_motif
#options.filter_vcf_chip # controlled by filter_chip
#options.filter_vcf_no # controlled by filter_novel
#options.chip_present # used by vcf set by file_chip
options.filter_vcf_motif = filter_motif
if (file_chip is not None):
options.chip_present = True
# -- Process chip related options
# Should lines in the chip (bed) output file be excluded
# if they don't match a motif?
# -fp sets this to True
filter_bed = filter_co
options.filter_vcf_chip = filter_chip
options.filter_vcf_no = filter_novel
opt_args += " ChIP file: " + file_chip + "\n"
fileHan_chip = open(file_chip)
if (file_output_chip is not None):
opt_args += " ChIP output file: " + file_output_chip + "\n"
fileHan_out_chip = open(file_output_chip, "w")
if (filter_bed):
opt_args += " Filter output ChIP bed for motif matches? Yes\n"
if (options.filter_vcf_chip):
opt_args += " Filter output vcf for ChIP peak overlap? Yes\n"
if (options.filter_vcf_no):
opt_args += " Filter output vcf for no ChIP peak overlap? Yes\n"
if (options.filter_vcf_chip and options.filter_vcf_no):
# YYY: default to one? or should one override the other?
opt_args += "Err: Cannot have -fn and -fc (two prev options)."
opt_args += " Both will be ignored.\n"
options.filter_vcf_chip = False
options.filter_vcf_no = False
elif (file_output_chip is not None):
opt_args += "No ChIP file given, so no ChIP output file will be created\n"
if (file_baseline_prob is not None):
opt_args += " Baseline probabilities file: " + file_baseline_prob + "\n"
if (options.filter_vcf_motif):
opt_args += " Filter output vcf for motif matches? Yes\n"
if (not sorted_lex):
opt_args += " Input vcf and Input ChIP bed are sorted by karyotype\n"
print(opt_args) # debug outputs
# --------------------------------------------------------------------------------------
if (file_baseline_prob is None):
bp = [0.25, 0.25, 0.25, 0.25]
else:
print("Reading in baseline probabilities (@" + timeString() + "):")
bp = motif.get_baseline_probs(file_baseline_prob)
print(bp)
sys.stdout.flush()
# Grab motif list from motif file.
print("Creating motif list from " + format(file_motif) +
", with pc=" + format(pc) +
", threshold=" + format(th) +
", bp=" + format(bp))
sys.stdout.flush()
#(motif_set, max_motif_l) = get_motifs(file_motif, pc, th, bp) # old call
motif_set = motif.get_motifs(file_motif, pc, th, bp)
# motif_set.max_positions replaces max_motif_l
# motif_set.motifs[index].positions more 'valid'
# debug print("Maximum motif length is "+str(motif_set.max_positions)+".")
# grab extended 'wing' sequence around the reference and variant
# to accomodate motif size and homotypic matching
# 1. motif matching for individual variants
# trims to only uses individual motif length wing
# therefore wing_l can not be greater than
# the individual motif length being processed
# 2. homotypic match wants larger wing size
# to check for adjacent matches
# only grabs wing sequence from reference genome
# then joins to build full variant length
#
# QQQ: should wing_length be the number of elements to grab or the maximum index?
# ie motif_set.max_positions or motif_set.max_positions - 1
wing_l = max(motif_set.max_positions, ws)
print("Proceeding with wing_size: " + format(wing_l) + " vs defined ws(" + format(ws) + ")")
# Open output file.
fileHan_output = open(file_output, "w") # check unnecessary; import os; os.path.exists()
"""#debug that motifs were calculated correctly
for motif in motifs:
print(motif[0] + " " + motif[1]+" "+str(motif[2]))
sys.stdout.flush()
for i in range(4):
line = "[ "
for p in range ( len( motif[3][i] ) ):
line+= sf_str(motif[3][i][p],3) + " "
print(line+"]")
print("")
print("")"""
# Create index file from input fasta for quick searching
print("Creating index from reference sequence for efficient searching..." +
timeString())
print("This will be slow the first time (takes about 20 seconds on i7.)")
sys.stdout.flush()
fa_ind = Fasta(file_reference_genome) # XXX: need to check, if present skip
print("Completed fasta index @ " + timeString())
print("Importing variants(" + timeString() + "). This may take a while.\n")
sys.stdout.flush()
# Open and Read VCF file: populates SequenceArray, assumes set fits in memory
variant_set = sequence.SequenceArray()
with open(file_input) as vcf_handle:
line = vcf_handle.readline()
# building output vcf info line
info_needed = True
info = "##INFO=<ID=MOTIFN,Number=.,Type=String,Description="
info += "\"Matched motif names\">"
info += "\n##INFO=<ID=MOTIFV,Number=.,Type=Float,Description="
info += "\"Variant match scores\">"
info += "\n##INFO=<ID=MOTIFR,Number=.,Type=Float,Description="
info += "\"Reference match scores\">"
info += "\n##INFO=<ID=MOTIFVH,Number=.,Type=String,Description="
info += "\"Variant homotypic match scores\">"
info += "\n##INFO=<ID=MOTIFRH,Number=.,Type=String,Description="
info += "\"Reference homotypic match scores\">"
info += "\n##INFO=<ID=MOTIFVG,Number=.,Type=String,Description="
info += "\"Variant environment GC content\">"
info += "\n##INFO=<ID=MOTIFRG,Number=.,Type=String,Description="
info += "\"Reference environment GC content\">"
if (fileHan_chip is not None):
info += "\n##INFO=<ID=MOTIFC,Number=.,Type=Character,Description="
info += "\"Motif validated by ChIP (Y/N)\">"
print("\tfinished header read " + timeString())
# Skip info lines
while line.startswith("##"):
# Print new info lines at the top of the ##INFO section
if info_needed and line.startswith("##INFO"):
print(info, file=fileHan_output)
info_needed = False
print(line, file=fileHan_output, end="")
line = vcf_handle.readline()
# Create appropriate header. Presumably reads the first # line
header = line.strip()
print(header, file=fileHan_output)
# QQQ|YYY: push header line to global variable for samples, also modify
# info line as method above to all extension to calls and reduce file
# read?
# YYY: Not entirely sure what your question is asking here, but using a
# method to add the INFO fields metadata in the header would be fine
# and probably look a lot cleaner.
# CCC-WK: note has to do with whether it used again or not, best way to handle
# Process each variant; reads the variant lines in a loop
variant_set = vcf.read_vcf_variant_lines(vcf_handle, False)
print("Finished importing variants(" + timeString() + ")\n")
# XXX: QQQ: split multi-allele input vcf objects?
if multivar_computation_flag:
# expect lexicographic order for input but multivariant_list_build requires it
variant_set.sort() # must sort before multivariant_list_build
print("Start variant merge (" + timeString() + ").\n")
variant_set.multivariant_list_build(multivar_distance, fa_ind)
print("Finished variant merge(" + timeString() + ").\n")
# QQQ: super-variants for processing -- how to annotate in output?
print("Analyzing variants(" + timeString() + "). This may take a while.\n")
# ---------------------------------------------------------------------------
# Queue of ChIP peaks that overlap the current variant
# Will contain peaks as a tuple of the form
# (chr, start, stop, chip tf array, motif match tf array)
# chip tf array is an array of tf names
# motif match tf array is an array of (motif name, vscore, rscore, strand)
peak_buffer = []
chromosome = "" # processed chromosome
for index in range(variant_set.length()):
var_element = variant_set.seq[index] # XXX: WARNING: changes made to element not saved
# QQQ: separate variant and multivariant test sets?
# test for and only analyze if index not in variant_set.multivariant
# YYY: Seems this will likely be a necessity, at least to me. May be a way
# around it, but I can't think of a better option off the top of my head.
# CCC-WK: likely only still an issue with multi-allele and just knowing
# the item processed is a multi-variant (really long sequences?)
# Update previous, next, and current variables
if (chromosome != var_element.name):
chromosome = var_element.name
print("\tStart Analyzing new chromosome: " + chromosome + "(" + timeString() + ")")
# -- prepare the variant for processing
# 1. Get reference sequence surrounding the variant from the reference file
# get_surrounding_seq(chr, pos, len(ref_bases), wing_l, fa_ind)
# self.get_surround_seq(self, wing_length, fasta_object, force_ref_match)
#
print("\t\tpulling " + format(wing_l) +
" base wings from index " + format(fa_ind) +
" arg: " + format(force_ref_match))
var_element.get_surround_seq(wing_l, fa_ind, force_ref_match)
# 2. compute reverse complement
var_element.assign_rev_complement()
# 3. compute int version (faster to process as int)
var_element.assign_int_versions()
print("Calculating:\n" + var_element.print_str() + "at(" + timeString() + ")\n")
# -- building the list of MotifMatch objects
# 4. Calculate motif matches to variant sequence
ref_seq = var_element.return_full_ref_seq_int(wing_l)
var_seq = var_element.return_full_var_seq_int(wing_l)
print("\tref int: " + format(ref_seq) +
"\n\tvar int: " + format(var_seq))
print("start motif_match_int")
plusmatch = motif_set.motif_match_int(bp, ref_seq, var_seq, wing_l)
# plusmatch returns an list of MotifMatch objects
# 5. Add local environment data:
print("start process_local_env_int")
plusmatch = motif_set.process_local_env_int(bp, plusmatch, var_element,
None, var_seq, ref_seq, wing_l, run_homotypic)
# 6. Calculate motif matches to reverse complement
ref_seq_rc = var_element.return_full_ref_seq_reverse_complement_int(wing_l)
var_seq_rc = var_element.return_full_var_seq_reverse_complement_int(wing_l)
print("\tref rc int: " + format(ref_seq_rc) +
"\n\tvar rc int: " + format(var_seq_rc))
print("start motif_match_int reverse complement")
minusmatch = motif_set.motif_match_int(bp, ref_seq_rc, var_seq_rc, wing_l)
# 7. Add local environment data
print("start process_local_env_int reverse complement")
minusmatch = motif_set.process_local_env_int(bp, minusmatch, var_element,
None, var_seq_rc, ref_seq_rc, wing_l, run_homotypic)
# 8. Join two match sets
matches = plusmatch + minusmatch
print(("\t" + format(var_element.name) + ":" + format(var_element.position) +
" +match(" + format(len(plusmatch)) +
") + -match(" + format(len(minusmatch)) +
") = matches(" + format(len(matches)) + ")"))
# Update ChIP buffer for current position
# Update matches array with peak overlap data
# WARNING: XXX: match_peaks has not been heavily reviewed for validity
# note: fileHan_chip is only read by children of match_peaks
print("starting match_peaks")
(peak_buffer, matches) = match_peaks(var_element.name, var_element.position,
peak_buffer, fileHan_chip,
matches, fileHan_out_chip,
sorted_lex, filter_bed)
print(("matches number:" + format(len(matches))))
"""print("match_peaks returned "+str(len(peak_buffer))+" peak(s):")
for peak in peak_buffer:
(pchr, psta, pend, ptfs, pmms) = peak
print(pchr+":"+str(psta)+"-"+str(pend)+" tfs:"+str(len(ptfs))+
" mms:"+str(len(pmms)))
print()"""
# Co-binding transcription factors currently not implemented
cb_dict = None # QQQ: does what? Why was it here originally?
# Create the correct line in VCF format and print to file_output
update_vcf(var_element.vcf_line, matches, fileHan_output, options)
sys.stdout.flush()
# Print remaining peaks
for peak in peak_buffer:
print_peak(peak, fileHan_out_chip, filter_bed)
print("Finished analyzing variants(" + timeString() + ").\n")
# Close output files.
fileHan_output.close()
if fileHan_out_chip is not None:
fileHan_out_chip.close()
if fileHan_chip is not None:
fileHan_chip.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage=__doc__)
# Create arguments and options
parser.add_argument("-i", "--input", dest="input_file", required=True)
parser.add_argument("-r", "--ref", dest="file_reference", required=True)
parser.add_argument("-m", "--motif", dest="file_motif", required=True)
parser.add_argument("-o", "--output", dest="output_file", required=True)
parser.add_argument("-ci", "--chip", dest="chip_file",
required=False, default=None)
parser.add_argument("-co", "--chipout", dest="chip_out_file",
required=False, default=None)
parser.add_argument("-bp", "--baseline", dest="baseline_file",
required=False, default=None)
parser.add_argument("-th", "--threshold", dest="threshold",
required=False, default=0.0)
parser.add_argument("-pc", "--pseudocounts", dest="pseudocounts",
required=False, default=0.1)
parser.add_argument("-ws", "--wing_size", dest="wing_size",
required=False, default=50)
parser.add_argument("-fm", "--filter_motif", action="count", required=False)
parser.add_argument("-fc", "--filter_chip", action="count", required=False)
parser.add_argument("-fn", "--filter_novel", action="count", required=False)
parser.add_argument("-fp", "--filter_co", action="count", required=False)
parser.add_argument("-sk", "--kary_sort", action="count", required=False)
parser.add_argument("-mv", "--multi_variant", dest="multi_var",
required=False) # use -1 and correct to wing_size below
parser.add_argument("-rf", "--force_ref_match", action="count",
required=False)
parser.add_argument("-ht", "--homotypic_run", action="count",
required=False)
args = parser.parse_args()
# Easier to use argument variables (YYY: why track duplicate variable names?)
# YYY: Was probably just copy and pasted, tbh. No specific reason they were duped.
file_input = args.input_file
file_reference_genome = args.file_reference
file_motif = args.file_motif
file_output = args.output_file
file_baseline_prob = args.baseline_file # defaults to None
file_chip = args.chip_file
file_output_chip = args.chip_out_file
pc = float(args.pseudocounts)
ws = int(args.wing_size)
th = float(args.threshold)
multivar_distance = args.multi_var
if args.homotypic_run is None:
run_homotypic = False
else:
run_homotypic = True
if args.force_ref_match is None:
force_ref_match = False
else:
force_ref_match = True
# Are input files chr sorted lexicographically (by karyotype order)?
# Input vcf file and chip bed file must be sorted the same way
# -sk sets this to True (means sorted lexicographically)
sorted_lex = (args.kary_sort is None)
filter_co = (args.filter_co is not None) # sets filter_bed if file_chip
filter_motif = args.filter_motif
filter_chip = args.filter_chip
filter_novel = args.filter_novel
main(file_input, file_output, file_reference_genome, file_motif, file_baseline_prob,
pc, th, ws, multivar_distance, run_homotypic, force_ref_match,
file_chip, file_output_chip, filter_co, sorted_lex,
filter_chip, filter_motif, filter_novel
)
|
j-andrews7/VENUSAR
|
venusar/motifs.py
|
Python
|
mit
| 44,017
|
"""Utilities to manipulate JSON objects.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import math
import re
import sys
import types
from base64 import encodestring
from datetime import datetime
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from IPython.utils import text
next_attr_name = '__next__' if py3compat.PY3 else 'next'
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# timestamp formats
ISO8601="%Y-%m-%dT%H:%M:%S.%f"
ISO8601_PAT=re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+$")
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def rekey(dikt):
"""Rekey a dict that has been forced to use str keys where there should be
ints by json."""
for k in dikt.iterkeys():
if isinstance(k, basestring):
ik=fk=None
try:
ik = int(k)
except ValueError:
try:
fk = float(k)
except ValueError:
continue
if ik is not None:
nk = ik
else:
nk = fk
if nk in dikt:
raise KeyError("already have key %r"%nk)
dikt[nk] = dikt.pop(k)
return dikt
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k,v in obj.iteritems():
obj[k] = extract_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [ extract_dates(o) for o in obj ]
elif isinstance(obj, basestring):
if ISO8601_PAT.match(obj):
obj = datetime.strptime(obj, ISO8601)
return obj
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k,v in obj.iteritems():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [ squash_dates(o) for o in obj ]
elif isinstance(obj, datetime):
obj = obj.strftime(ISO8601)
return obj
def date_default(obj):
"""default function for packing datetime objects in JSON."""
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
else:
raise TypeError("%r is not JSON serializable"%obj)
# constants for identifying png/jpeg data
PNG = b'\x89PNG\r\n\x1a\n'
JPEG = b'\xff\xd8'
def encode_images(format_dict):
"""b64-encodes images in a displaypub format dict
Perhaps this should be handled in json_clean itself?
Parameters
----------
format_dict : dict
A dictionary of display data keyed by mime-type
Returns
-------
format_dict : dict
A copy of the same dictionary,
but binary image data ('image/png' or 'image/jpeg')
is base64-encoded.
"""
encoded = format_dict.copy()
pngdata = format_dict.get('image/png')
if isinstance(pngdata, bytes) and pngdata[:8] == PNG:
encoded['image/png'] = encodestring(pngdata).decode('ascii')
jpegdata = format_dict.get('image/jpeg')
if isinstance(jpegdata, bytes) and jpegdata[:2] == JPEG:
encoded['image/jpeg'] = encodestring(jpegdata).decode('ascii')
return encoded
def json_clean(obj):
"""Clean an object to ensure it's safe to encode in JSON.
Atomic, immutable objects are returned unmodified. Sets and tuples are
converted to lists, lists are copied and dicts are also copied.
Note: dicts whose keys could cause collisions upon encoding (such as a dict
with both the number 1 and the string '1' as keys) will cause a ValueError
to be raised.
Parameters
----------
obj : any python object
Returns
-------
out : object
A version of the input which will not cause an encoding error when
encoded as JSON. Note that this function does not *encode* its inputs,
it simply sanitizes it so that there will be no encoding errors later.
Examples
--------
>>> json_clean(4)
4
>>> json_clean(range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> sorted(json_clean(dict(x=1, y=2)).items())
[('x', 1), ('y', 2)]
>>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items())
[('x', 1), ('y', 2), ('z', [1, 2, 3])]
>>> json_clean(True)
True
"""
# types that are 'atomic' and ok in json as-is. bool doesn't need to be
# listed explicitly because bools pass as int instances
atomic_ok = (unicode, int, types.NoneType)
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
if isinstance(obj, float):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return obj
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
return obj.decode(DEFAULT_ENCODING, 'replace')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError('dict can not be safely converted to JSON: '
'key collision would lead to dropped values')
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k,v in obj.iteritems():
out[str(k)] = json_clean(v)
return out
# If we get here, we don't know how to handle the object, so we just get
# its repr and return that. This will catch lambdas, open sockets, class
# objects, and any other complicated contraption that json can't encode
return repr(obj)
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/utils/jsonutil.py
|
Python
|
lgpl-3.0
| 6,908
|
"""The vimdoc parser."""
from vimdoc import codeline
from vimdoc import docline
from vimdoc import error
from vimdoc import regex
def IsComment(line):
return regex.comment_leader.match(line)
def IsContinuation(line):
return regex.line_continuation.match(line)
def StripContinuator(line):
assert regex.line_continuation.match(line)
return regex.line_continuation.sub('', line)
def EnumerateStripNewlinesAndJoinContinuations(lines):
"""Preprocesses the lines of a vimscript file.
Enumerates the lines, strips the newlines from the end, and joins the
continuations.
Args:
lines: The lines of the file.
Yields:
Each preprocessed line.
"""
lineno, cached = (None, None)
for i, line in enumerate(lines):
line = line.rstrip('\n')
if IsContinuation(line):
if cached is None:
raise error.CannotContinue('No preceding line.', i)
elif IsComment(cached) and not IsComment(line):
raise error.CannotContinue('No comment to continue.', i)
else:
cached += StripContinuator(line)
continue
if cached is not None:
yield lineno, cached
lineno, cached = (i, line)
if cached is not None:
yield lineno, cached
def EnumerateParsedLines(lines):
vimdoc_mode = False
for i, line in EnumerateStripNewlinesAndJoinContinuations(lines):
if not vimdoc_mode:
if regex.vimdoc_leader.match(line):
vimdoc_mode = True
# There's no need to yield the blank line if it's an empty starter line.
# For example, in:
# ""
# " @usage whatever
# " description
# There's no need to yield the first docline as a blank.
if not regex.empty_vimdoc_leader.match(line):
# A starter line starts with two comment leaders.
# If we strip one of them it's a normal comment line.
yield i, ParseCommentLine(regex.comment_leader.sub('', line))
elif IsComment(line):
yield i, ParseCommentLine(line)
else:
vimdoc_mode = False
yield i, ParseCodeLine(line)
def ParseCodeLine(line):
"""Parses one line of code and creates the appropriate CodeLine."""
if regex.blank_code_line.match(line):
return codeline.Blank()
fmatch = regex.function_line.match(line)
if fmatch:
namespace, name, args = fmatch.groups()
return codeline.Function(name, namespace, regex.function_arg.findall(args))
cmatch = regex.command_line.match(line)
if cmatch:
args, name = cmatch.groups()
flags = {
'bang': '-bang' in args,
'range': '-range' in args,
'count': '-count' in args,
'register': '-register' in args,
'buffer': '-buffer' in args,
'bar': '-bar' in args,
}
return codeline.Command(name, **flags)
smatch = regex.setting_line.match(line)
if smatch:
name, = smatch.groups()
return codeline.Setting('g:' + name)
flagmatch = regex.flag_line.match(line)
if flagmatch:
a, b, default = flagmatch.groups()
return codeline.Flag(a or b, default)
return codeline.Unrecognized(line)
def ParseCommentLine(line):
"""Parses one line of documentation and creates the appropriate DocLine."""
block = regex.block_directive.match(line)
if block:
return ParseBlockDirective(*block.groups())
return docline.Text(regex.comment_leader.sub('', line))
def ParseBlockDirective(name, rest):
if name in docline.BLOCK_DIRECTIVES:
try:
return docline.BLOCK_DIRECTIVES[name](rest)
except ValueError:
raise error.InvalidBlockArgs(rest)
raise error.UnrecognizedBlockDirective(name)
def ParseBlocks(lines, filename):
blocks = []
selection = []
lineno = 0
try:
for lineno, line in EnumerateParsedLines(lines):
for block in line.Affect(blocks, selection):
yield block.Close()
for block in codeline.EndOfFile().Affect(blocks, selection):
yield block.Close()
except error.ParseError as e:
e.lineno = lineno + 1
e.filename = filename
raise
|
google/vimdoc
|
vimdoc/parser.py
|
Python
|
apache-2.0
| 3,977
|
import spirit.spiritlib as spiritlib
import ctypes
### Load Library
_spirit = spiritlib.LoadSpiritLibrary()
# The Bohr Magneton [meV / T]
_mu_B = _spirit.Constants_mu_B
_mu_B.argtypes = None
_mu_B.restype = scalar
def mu_B():
return _mu_B()
# The Boltzmann constant [meV / K]
_k_B = _spirit.Constants_k_B
_k_B.argtypes = None
_k_B.restype = scalar
def k_B():
return _k_B()
|
GPMueller/spirit
|
core/python/spirit/constants.py
|
Python
|
mit
| 422
|
# -*- coding: utf-8 -*-
"""
Clock Plugin
Provides the datetime
"""
import datetime
from lib.decorators import never_raise
from lib.plugin import BasePlugin, BaseConfig
NAME = 'Clock'
class Plugin(BasePlugin):
DEFAULT_INTERVAL = 5
@never_raise
def update(self):
self.data['datetime'] = datetime.datetime.now()
super(Plugin, self).update()
class Config(BaseConfig):
pass
|
mikesmth/pi-dashboard
|
lib/plugins/clock.py
|
Python
|
bsd-3-clause
| 406
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Dartlang module"""
from contextlib import suppress
from gettext import gettext as _
import logging
import os
import platform
import re
import umake.frameworks.baseinstaller
from umake.interactions import DisplayMessage
from umake.tools import add_env_to_user
from umake.ui import UI
logger = logging.getLogger(__name__)
_supported_archs = ['i386', 'amd64']
class DartCategory(umake.frameworks.BaseCategory):
def __init__(self):
super().__init__(name="Dart", description=_("Dartlang Development Environment"), logo_path=None)
class DartLangEditorRemoval(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart Editor", description=_("Dart SDK with editor (not supported upstream anyymore)"),
download_page=None, category=category, only_on_archs=_supported_archs, only_for_removal=True)
class DartLang(umake.frameworks.baseinstaller.BaseInstaller):
def __init__(self, category):
super().__init__(name="Dart SDK", description=_("Dart SDK (default)"), is_category_default=True,
category=category, only_on_archs=_supported_archs,
download_page="https://www.dartlang.org/downloads/linux.html",
dir_to_decompress_in_tarball="dart-sdk")
def parse_download_link(self, line, in_download):
"""Parse Dart Lang download link, expect to find a url"""
tag_machine = '64'
if platform.machine() == 'i686':
tag_machine = '32'
download_re = r'<a data-bits="{}" data-os="linux" data-tool="sdk".*href="(.*)">'.format(tag_machine)
p = re.search(download_re, line)
with suppress(AttributeError):
url = p.group(1)
return ((url, None), True)
return ((None, None), False)
def post_install(self):
"""Add go necessary env variables"""
add_env_to_user(self.name, {"PATH": {"value": os.path.join(self.install_path, "bin")}})
UI.delayed_display(DisplayMessage(_("You need to restart a shell session for your installation to work")))
@property
def is_installed(self):
# check path and requirements
if not super().is_installed:
return False
if not os.path.isfile(os.path.join(self.install_path, "bin", "dart")):
logger.debug("{} binary isn't installed".format(self.name))
return False
return True
|
mbkulik/ubuntu-make
|
umake/frameworks/dart.py
|
Python
|
gpl-3.0
| 3,196
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
__author__ = 'RemiZOffAlex'
__copyright__ = '(c) RemiZOffAlex'
__license__ = 'MIT'
__email__ = 'remizoffalex@mail.ru'
__url__ = 'http://remizoffalex.ru'
from lxml import etree, html
class Escape():
def __init__(self, text):
self.text = text
self.error = False
try:
root = html.fromstring('<root>' + self.text + '</root>')
for bad in root.xpath("//script"):
print(bad.text)
bad.getparent().remove(bad)
for bad in root.xpath("//form"):
print(bad.text)
bad.getparent().remove(bad)
for bad in root.xpath("//@onclick"):
print(bad)
print(bad.is_attribute)
print(bad.getparent())
# print(bad.getparent().is_attribute)
# Удалить атрибут onclick
bad.getparent().attrib.pop('onclick')
except etree.XMLSyntaxError as error:
self.text = 'Данные некорректны:<br/>'
for item in error.error_log:
msg = ''.join([
'<p>Ошибка: ', item.message,
' в ', str(item.line),
' строке ', str(item.column), ' колонке'
])
self.text = self.text + msg
self.error = True
else:
self.text = html.tostring(root, encoding='utf-8').decode('utf-8')
self.text = self.text[6:]
self.text = self.text[:-7]
def __repr__(self):
return self.text
def tostring(self):
return self.text
|
RemiZOffAlex/specialistoff.net
|
Python/crm/crmapp/lib/escape.py
|
Python
|
mit
| 1,697
|
import random
import copy
from fractions import Fraction
def get_matrix(file_name="matrix_new.txt"):
file = open(file_name, "r")
A = []
for line in file:
A.append([Fraction(x) for x in line.split()])
return A
def print_file(A, comment = ""):
file = open("matrix_show.txt", "a")
file.write("%s\n" % comment)
for i in range(len(A)):
for j in range(len(A[i])):
file.write("%6s " % Fraction.limit_denominator(A[i][j]))
file.write("\n")
file.write("\n\n")
def transform_matrix(A):
n = len(A)
a = [[A[i][j] for j in range(n)] for i in range(n)]
cnt = 10
file_report = open("transform_report_new.txt", "w")
for i in range(n):
for count in range(cnt):
j = random.randint(0, n - 1)
if i == j:
break
kk = Fraction(random.randint(-1, 1))
if not kk == 0:
for g in range(n):
a[i][g] += kk * a[j][g]
a[g][j] -= kk * a[g][i]
file_report.write("(line: ) %i += (%i)%i\n" % (i, kk, j))
file_report.write("(column: ) %i -= (%i)%i\n" % (j, kk, i))
return a
def matrix_product(A, B):
n = len(A)
C = []
for i in range(n):
C.append([])
for j in range(n):
C[i].append(Fraction(0, 1))
for g in range(n):
C[i][j] += A[i][g] * B[g][j]
return C
def matrix_plus_k_elementary(A, k):
ans = [[A[i][j] for j in range(len(A))] for i in range(len(A))]
for i in range(len(A)):
ans[i][i] += k
return ans
def matrix_fundamental_system_of_solutions(A):
n = len(A)
a = [[A[i][j] for j in range(n)] for i in range(n)]
used = [[False, -1] for x in range(n)]
# print(a)
ii = 0
for variable in range(n):
index = -1
for i in range(ii, n):
if a[i][variable].numerator != 0:
index = i
break
if index != -1:
used[variable] = [True, ii]
a[ii], a[index] = a[index], a[ii]
k = a[ii][variable]
for g in range(n):
a[ii][g] /= k
for i in range(n):
if i == ii:
continue
if a[i][variable].numerator != 0:
kk = a[i][variable]
for j in range(n):
a[i][j] -= kk * a[ii][j]
ii += 1
ans = []
for i in range(n):
if not used[i][0]:
ans.append([])
for j in range(n):
if i == j:
ans[-1].append(Fraction(1, 1))
else:
ans[-1].append(-a[used[j][1]][i] if used[j][0] else Fraction(0, 1))
pass
return ans
def matrix_addition_to_base():
file = open("addition.txt", "r")
n, m = [int(x) for x in file.readline().split()]
a = [[Fraction(x) for x in line.split()] for line in file]
a_copy = copy.deepcopy(a)
dimV = len(a[0])
used = [False for x in range(len(a))]
for variable in range(dimV):
index = -1
for i in range(len(a)):
if a[i][variable].numerator != 0 and not used[i]:
index = i
break
if index != -1:
used[index] = True
k = a[index][variable]
for g in range(dimV):
a[index][g] /= k
pass
for ii in range(len(a)):
if index == ii:
continue
if a[ii][variable].numerator != 0:
kk = a[ii][variable]
for g in range(dimV):
a[ii][g] -= kk * a[index][g]
file.close()
file = open("addition_answer.txt", "w")
ans = []
file.write("Addition:\n")
for i in range(n, len(used)):
if used[i]:
ans.append(a_copy[i])
file.write(" ".join([str(item) for item in a_copy[i]]))
file.write("\n")
return ans
def matrix_vector_product(A, v):
n = len(A)
C = []
for i in range(n):
C.append(Fraction(0, 1))
for g in range(n):
C[i] += A[i][g] * v[g]
return C
def vector_series(A, alpha):
N = matrix_plus_k_elementary(A, Fraction(-alpha, 1))
addition_to_base = matrix_addition_to_base()
ans = []
for v in addition_to_base:
ans.append([])
curv = copy.copy(v)
for i in range(50):
ans[-1].append(curv)
curv = matrix_vector_product(N, curv)
not_null = False
for it in curv:
if it != 0:
not_null = True
if not not_null:
break
return ans
def matrix_transposed(A):
n = len(A)
aT = [[A[j][i] for j in range(n)] for i in range(n)]
return aT
def matrix_invers(A):
n = len(A)
a = copy.deepcopy(A)
for i in range(n):
for j in range(n):
a[i].append(Fraction(0))
a[i][i + n] = Fraction(1)
used = [False for x in range(n)]
for variable in range(n):
index = -1
for i in range(n):
if a[i][variable].numerator != 0 and not used[i]:
index = i
break
if index != -1:
used[variable] = True
a[variable], a[index] = a[index], a[variable]
kk = a[variable][variable]
for g in range(2*n):
a[variable][g] /= kk
for i in range(n):
if i == variable or a[i][variable].numerator == 0:
continue
kk = a[i][variable]
for g in range(2*n):
a[i][g] -= kk * a[variable][g]
invers = [[a[i][j + n] for j in range(n)] for i in range(n)]
return invers
def check_jordan_base():
A = get_matrix("matrix_input.txt")
St = get_matrix()
S = matrix_transposed(St)
print_file(S)
S_invers = matrix_invers(S)
print_file(S_invers)
product = matrix_product(matrix_product(S_invers, A), S)
print_file(product)
def print_in_file(a, file):
for item in a:
file.write("%6s " % item)
file.write("\n")
if __name__ == "__main__":
ff = open("matrix_show.txt", "w")
ff.close()
alpha = [2, 4, -5] # eigenvalues numbers
a = get_matrix()
A = transform_matrix(a)
print_file(a, "Initial matrix")
print_file(A, "Transformed matrix")
for alpha_current in alpha:
print_file([], "alpha = %i" % alpha_current)
Ae0 = matrix_plus_k_elementary(A, Fraction(-alpha_current, 1))
Ae = Ae0
print_file(Ae0, "ker N0")
fund_syst_array = []
for cnt in range(50):
fundamental_system = matrix_fundamental_system_of_solutions(Ae)
if cnt == 0 or cnt > 0 and not len(fund_syst_array[-1]) == len(fundamental_system):
fund_syst_array.append(fundamental_system)
print_file(fundamental_system, "fundamental_system Ker N^%i" % (cnt + 1))
Ae = matrix_product(Ae, Ae0)
else:
break;
chain = []
for i in range(len(fund_syst_array) - 1, 0, -1):
#print("i = %i" % i)
tmp = []
for item_chain in chain:
for item in item_chain:
tmp.append(item[-i-1])
file_addition = open("addition.txt", "w")
file_addition.write("%i %i\n" % (len(fund_syst_array[i - 1]) + len(tmp), len(fund_syst_array[i])))
for item in fund_syst_array[i - 1]:
print_in_file(item, file_addition)
for item in tmp:
print_in_file(item, file_addition)
for item in fund_syst_array[i]:
print_in_file(item, file_addition)
file_addition.close()
chain.append(vector_series(A, alpha_current))
print_file([], "Chain #")
for it in chain[-1]:
print_file(it)
first_vector = []
for chain_vect in chain:
for chain_chain in chain_vect:
first_vector.append(chain_chain[-1])
if first_vector:
file_addition = open("addition.txt", "w")
file_addition.write("%i %i\n" % (len(first_vector), len(fund_syst_array[0])))
for item in first_vector:
print_in_file(item, file_addition)
for item in fund_syst_array[0]:
print_in_file(item, file_addition)
file_addition.close()
addition = matrix_addition_to_base()
print_file([], "Chain #, len = 1")
print_file(addition)
|
Lamzin/myPython
|
Algebra/matrix_extreme.py
|
Python
|
gpl-2.0
| 8,779
|
from warnings import warn
import inspect
from .conflict import ordering, ambiguities, super_signature, AmbiguityWarning
from .utils import expand_tuples
import itertools as itl
class MDNotImplementedError(NotImplementedError):
""" A NotImplementedError for multiple dispatch """
def ambiguity_warn(dispatcher, ambiguities):
""" Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
warning_text
"""
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
_unresolved_dispatchers = set()
_resolve = [True]
def halt_ordering():
_resolve[0] = False
def restart_ordering(on_ambiguity=ambiguity_warn):
_resolve[0] = True
while _unresolved_dispatchers:
dispatcher = _unresolved_dispatchers.pop()
dispatcher.reorder(on_ambiguity=on_ambiguity)
class Dispatcher(object):
""" Dispatch methods based on type signature
Use ``dispatch`` to add implementations
Examples
--------
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def f(x):
... return x + 1
>>> @dispatch(float)
... def f(x):
... return x - 1
>>> f(3)
4
>>> f(3.0)
2.0
"""
__slots__ = '__name__', 'name', 'funcs', 'ordering', '_cache', 'doc'
def __init__(self, name, doc=None):
self.name = self.__name__ = name
self.funcs = dict()
self._cache = dict()
self.ordering = []
self.doc = doc
def register(self, *types, **kwargs):
""" Register dispatcher with new implementation
>>> from sympy.multipledispatch.dispatcher import Dispatcher
>>> f = Dispatcher('f')
>>> @f.register(int)
... def inc(x):
... return x + 1
>>> @f.register(float)
... def dec(x):
... return x - 1
>>> @f.register(list)
... @f.register(tuple)
... def reverse(x):
... return x[::-1]
>>> f(1)
2
>>> f(1.0)
0.0
>>> f([1, 2, 3])
[3, 2, 1]
"""
def _(func):
self.add(types, func, **kwargs)
return func
return _
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return sig.parameters.values()
@classmethod
def get_func_annotations(cls, func):
""" Get annotations of function positional parameters
"""
params = cls.get_func_params(func)
if params:
Parameter = inspect.Parameter
params = (param for param in params
if param.kind in
(Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD))
annotations = tuple(
param.annotation
for param in params)
if all(ann is not Parameter.empty for ann in annotations):
return annotations
def add(self, signature, func, on_ambiguity=ambiguity_warn):
""" Add new types/method pair to dispatcher
>>> from sympy.multipledispatch import Dispatcher
>>> D = Dispatcher('add')
>>> D.add((int, int), lambda x, y: x + y)
>>> D.add((float, float), lambda x, y: x + y)
>>> D(1, 2)
3
>>> D(1, 2.0)
Traceback (most recent call last):
...
NotImplementedError: Could not find signature for add: <int, float>
When ``add`` detects a warning it calls the ``on_ambiguity`` callback
with a dispatcher/itself, and a set of ambiguous type signature pairs
as inputs. See ``ambiguity_warn`` for an example.
"""
# Handle annotations
if not signature:
annotations = self.get_func_annotations(func)
if annotations:
signature = annotations
# Handle union types
if any(isinstance(typ, tuple) for typ in signature):
for typs in expand_tuples(signature):
self.add(typs, func, on_ambiguity)
return
for typ in signature:
if not isinstance(typ, type):
str_sig = ', '.join(c.__name__ if isinstance(c, type)
else str(c) for c in signature)
raise TypeError("Tried to dispatch on non-type: %s\n"
"In signature: <%s>\n"
"In function: %s" %
(typ, str_sig, self.name))
self.funcs[signature] = func
self.reorder(on_ambiguity=on_ambiguity)
self._cache.clear()
def reorder(self, on_ambiguity=ambiguity_warn):
if _resolve[0]:
self.ordering = ordering(self.funcs)
amb = ambiguities(self.funcs)
if amb:
on_ambiguity(self, amb)
else:
_unresolved_dispatchers.add(self)
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
try:
func = self._cache[types]
except KeyError:
func = self.dispatch(*types)
if not func:
raise NotImplementedError(
'Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
self._cache[types] = func
try:
return func(*args, **kwargs)
except MDNotImplementedError:
funcs = self.dispatch_iter(*types)
next(funcs) # burn first
for func in funcs:
try:
return func(*args, **kwargs)
except MDNotImplementedError:
pass
raise NotImplementedError("Matching functions for "
"%s: <%s> found, but none completed successfully"
% (self.name, str_signature(types)))
def __str__(self):
return "<dispatched %s>" % self.name
__repr__ = __str__
def dispatch(self, *types):
""" Deterimine appropriate implementation for this type signature
This method is internal. Users should call this object as a function.
Implementation resolution occurs within the ``__call__`` method.
>>> from sympy.multipledispatch import dispatch
>>> @dispatch(int)
... def inc(x):
... return x + 1
>>> implementation = inc.dispatch(int)
>>> implementation(3)
4
>>> print(inc.dispatch(float))
None
See Also:
``sympy.multipledispatch.conflict`` - module to determine resolution order
"""
if types in self.funcs:
return self.funcs[types]
try:
return next(self.dispatch_iter(*types))
except StopIteration:
return None
def dispatch_iter(self, *types):
n = len(types)
for signature in self.ordering:
if len(signature) == n and all(map(issubclass, types, signature)):
result = self.funcs[signature]
yield result
def resolve(self, types):
""" Deterimine appropriate implementation for this type signature
.. deprecated:: 0.4.4
Use ``dispatch(*types)`` instead
"""
warn("resolve() is deprecated, use dispatch(*types)",
DeprecationWarning)
return self.dispatch(*types)
def __getstate__(self):
return {'name': self.name,
'funcs': self.funcs}
def __setstate__(self, d):
self.name = d['name']
self.funcs = d['funcs']
self.ordering = ordering(self.funcs)
self._cache = dict()
@property
def __doc__(self):
docs = ["Multiply dispatched method: %s" % self.name]
if self.doc:
docs.append(self.doc)
other = []
for sig in self.ordering[::-1]:
func = self.funcs[sig]
if func.__doc__:
s = 'Inputs: <%s>\n' % str_signature(sig)
s += '-' * len(s) + '\n'
s += func.__doc__.strip()
docs.append(s)
else:
other.append(str_signature(sig))
if other:
docs.append('Other signatures:\n ' + '\n '.join(other))
return '\n\n'.join(docs)
def _help(self, *args):
return self.dispatch(*map(type, args)).__doc__
def help(self, *args, **kwargs):
""" Print docstring for the function corresponding to inputs """
print(self._help(*args))
def _source(self, *args):
func = self.dispatch(*map(type, args))
if not func:
raise TypeError("No function found")
return source(func)
def source(self, *args, **kwargs):
""" Print source code for the function corresponding to inputs """
print(self._source(*args))
def source(func):
s = 'File: %s\n\n' % inspect.getsourcefile(func)
s = s + inspect.getsource(func)
return s
class MethodDispatcher(Dispatcher):
""" Dispatch methods based on type signature
See Also:
Dispatcher
"""
@classmethod
def get_func_params(cls, func):
if hasattr(inspect, "signature"):
sig = inspect.signature(func)
return itl.islice(sig.parameters.values(), 1, None)
def __get__(self, instance, owner):
self.obj = instance
self.cls = owner
return self
def __call__(self, *args, **kwargs):
types = tuple([type(arg) for arg in args])
func = self.dispatch(*types)
if not func:
raise NotImplementedError('Could not find signature for %s: <%s>' %
(self.name, str_signature(types)))
return func(self.obj, *args, **kwargs)
def str_signature(sig):
""" String representation of type signature
>>> from sympy.multipledispatch.dispatcher import str_signature
>>> str_signature((int, float))
'int, float'
"""
return ', '.join(cls.__name__ for cls in sig)
def warning_text(name, amb):
""" The text for ambiguity warnings """
text = "\nAmbiguities exist in dispatched function %s\n\n" % (name)
text += "The following signatures may result in ambiguous behavior:\n"
for pair in amb:
text += "\t" + \
', '.join('[' + str_signature(s) + ']' for s in pair) + "\n"
text += "\n\nConsider making the following additions:\n\n"
text += '\n\n'.join(['@dispatch(' + str_signature(super_signature(s))
+ ')\ndef %s(...)' % name for s in amb])
return text
|
wxgeo/geophar
|
wxgeometrie/sympy/multipledispatch/dispatcher.py
|
Python
|
gpl-2.0
| 10,936
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import six
import eventlet
import traceback
from st2actions import worker
from st2actions.scheduler import entrypoint as scheduling
from st2actions.scheduler import handler as scheduling_queue
from st2common.constants import action as action_constants
from st2common.models.db.liveaction import LiveActionDB
__all__ = ["MockLiveActionPublisher", "MockLiveActionPublisherNonBlocking"]
class MockLiveActionPublisher(object):
@classmethod
def process(cls, payload):
ex_req = scheduling.get_scheduler_entrypoint().process(payload)
if ex_req is not None:
scheduling_queue.get_handler()._handle_execution(ex_req)
@classmethod
def publish_create(cls, payload):
# The scheduling entry point is only listening for status change and not on create.
# Therefore, no additional processing is required here otherwise this will cause
# duplicate processing in the unit tests.
pass
@classmethod
def publish_state(cls, payload, state):
try:
if isinstance(payload, LiveActionDB):
if state == action_constants.LIVEACTION_STATUS_REQUESTED:
cls.process(payload)
else:
worker.get_worker().process(payload)
except Exception:
traceback.print_exc()
print(payload)
class MockLiveActionPublisherNonBlocking(object):
threads = []
@classmethod
def process(cls, payload):
ex_req = scheduling.get_scheduler_entrypoint().process(payload)
if ex_req is not None:
scheduling_queue.get_handler()._handle_execution(ex_req)
@classmethod
def publish_create(cls, payload):
# The scheduling entry point is only listening for status change and not on create.
# Therefore, no additional processing is required here otherwise this will cause
# duplicate processing in the unit tests.
pass
@classmethod
def publish_state(cls, payload, state):
try:
if isinstance(payload, LiveActionDB):
if state == action_constants.LIVEACTION_STATUS_REQUESTED:
thread = eventlet.spawn(cls.process, payload)
cls.threads.append(thread)
else:
thread = eventlet.spawn(worker.get_worker().process, payload)
cls.threads.append(thread)
except Exception:
traceback.print_exc()
print(payload)
@classmethod
def wait_all(cls):
for thread in cls.threads:
try:
thread.wait()
except Exception as e:
print(six.text_type(e))
finally:
cls.threads.remove(thread)
eventlet.sleep(0.1)
class MockLiveActionPublisherSchedulingQueueOnly(object):
@classmethod
def process(cls, payload):
scheduling.get_scheduler_entrypoint().process(payload)
@classmethod
def publish_create(cls, payload):
# The scheduling entry point is only listening for status change and not on create.
# Therefore, no additional processing is required here otherwise this will cause
# duplicate processing in the unit tests.
pass
@classmethod
def publish_state(cls, payload, state):
try:
if isinstance(payload, LiveActionDB):
if state == action_constants.LIVEACTION_STATUS_REQUESTED:
cls.process(payload)
else:
worker.get_worker().process(payload)
except Exception:
traceback.print_exc()
print(payload)
|
nzlosh/st2
|
st2tests/st2tests/mocks/liveaction.py
|
Python
|
apache-2.0
| 4,369
|
#!/usr/bin/env python
# encoding: utf-8
"""
__init__.py
The MIT License (MIT)
Copyright (c) 2013 Matt Ryan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import os
import stomp
import json
import afutils.file_pattern as pattern
from aflib3 import AFLibraryEntry
class AFMQ:
'''Represents a basic connection to an ActiveMQ
service for AudioFile.
'''
def __init__(self, queue_name):
self.queue_name = queue_name
self.queue_handle = stomp.Connection()
self.queue_handle.start()
self.queue_handle.connect()
self.queue_handle.subscribe(destination=queue_name, ack='auto')
def __del__(self):
self.queue_handle.disconnect()
def put(self, msg):
self.queue_handle.send(msg, destination=self.queue_name)
class BasicHandler:
'''Represents an ActiveMQ handler that consumes information
from the queue.
'''
def __init__(self, aflib, queue_name):
self.aflib = aflib
self.queue_name = queue_name
self.queue_handle = stomp.Connection()
self.queue_handle.set_listener(queue_name, self)
self.queue_handle.start()
self.queue_handle.connect()
self.queue_handle.subscribe(destination=queue_name, ack='auto')
def __del__(self):
self.queue_handle.stop()
def on_error(self, headers, message):
print '%s: Received an error: "%s"' % (self.__class__, message)
def on_message(self, headers, message):
print '%s: Received message: "%s"' % (self.__class__, message)
class AddFileHandler(BasicHandler):
'''Adds files to the AudioFile library as the files
are posted into a queue.
'''
def __init__(self, aflib):
BasicHandler.__init__(self, aflib, '/audiofile/library_additions')
def on_message(self, headers, message):
BasicHandler.on_message(self, headers, message)
args = json.loads(message)
self.aflib.add_mp3(args[0], args[1])
class RenameFileHandler(BasicHandler):
'''Renames files from the old path to the new specified
path as the information is put into a queue.
'''
def __init__(self, aflib):
BasicHandler.__init__(self, aflib, '/audiofile/file_renames')
def on_message(self, headers, message):
BasicHandler.on_message(self, headers, message)
args = json.loads(message)
song = AFLibraryEntry()
song.apply_dict(args[0])
newpath = pattern.get_new_path(song, args[1])
print 'Renaming "%s" as "%s"...' % (song.path, newpath)
os.rename(song.path, newpath)
if __name__ == '__main__':
pass
|
mattvryan/audiofile
|
afmq/__init__.py
|
Python
|
mit
| 3,351
|
#!/usr/bin/env python
# WidgetWindow.py
#
# Copyright (C) 2015-2017 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# The MainWindow for the Desktop Feedback Widget
from gi.repository import Gtk, Gdk, GObject
from kano.gtk3.application_window import ApplicationWindow
from kano.gtk3.scrolled_window import ScrolledWindow
from kano.gtk3.buttons import OrangeButton
from kano.gtk3.apply_styles import apply_styling_to_screen, \
apply_styling_to_widget
from kano_profile.tracker import track_action
from kano_feedback.DataSender import send_question_response
from kano_feedback.WidgetQuestions import WidgetPrompts
from kano_feedback.Media import media_dir
from kano_feedback.SliderInput import SliderInput
from kano_feedback.RadioInput import RadioInput
from kano_feedback.CheckInput import CheckInput
from kano_feedback.DropdownInput import DropdownInput
from kano_feedback.TextInput import TextInput
from kano.logging import logger
class WidgetWindow(ApplicationWindow):
CLOSE_FEEDBACK = 0
KEEP_OPEN = 1
LAUNCH_WIFI = 2
WIDTH = 500
HEIGHT_COMPACT = 50
HEIGHT_EXPANDED = 200
SUBJECT = 'Kano Desktop Feedback Widget'
def __init__(self):
ApplicationWindow.__init__(self, 'widget', self.WIDTH,
self.HEIGHT_COMPACT)
self.wprompts = WidgetPrompts()
self.wprompts.load_prompts()
self._initialise_window()
if not self.wprompts.get_current_prompt():
self.hide_until_more_questions()
return
self.position_widget()
# Catch the window state event to avoid minimising and losing
# the window
self.connect("window-state-event", self._unminimise_if_minimised)
def hide_until_more_questions(self):
'''
Hide the widget and set a timer to get new questions
'''
delay = 15 * 60 * 1000
self.hide()
GObject.timeout_add(delay, self.timer_fetch_questions)
return
def timer_fetch_questions(self):
'''
This function will periodically call the Questions API
Until we get questions for the user, then show the widget again
'''
self.wprompts.load_prompts()
nextp = self.wprompts.get_current_prompt()
if nextp:
self.set_keep_below(True)
self.show()
self.position_widget()
self._shrink()
self._prompt.set_text(nextp)
# Only change the textbuffer if type is correct
if self.wprompts.get_current_prompt_type() == "text":
self._input_widget.get_buffer().set_text('')
return False
else:
return True
def position_widget(self):
'''
Position the widget window at the top center of the screen
'''
screen = Gdk.Screen.get_default()
widget_x = (screen.get_width() - self.WIDTH) / 2
widget_y = 20
self.move(widget_x, widget_y)
def _initialise_window(self):
'''
Inititlaises the gtk window
'''
self.last_click = 0
self.app_name_opened = 'feedback-widget-opened'
self.typeahead = None
self.help_tip_message = _("Type your feedback here!") # noqa: F821
self.rotating_mode = True
self.in_submit = False
apply_styling_to_screen(media_dir() + 'css/widget.css')
ScrolledWindow.apply_styling_to_screen(wide=False)
self.visible = False
self.set_hexpand(False)
self.set_decorated(False)
self.set_resizable(False)
self.set_keep_above(False)
self.set_property('skip-taskbar-hint', True)
self._grid = grid = Gtk.Grid(hexpand=True, vexpand=True)
qmark = Gtk.Label('?')
qmark.get_style_context().add_class('qmark')
qmark_centering = Gtk.Alignment(xalign=0.5, yalign=0.5)
qmark_centering.add(qmark)
qmark_box = Gtk.EventBox()
qmark_box.get_style_context().add_class('qmark_box')
qmark_box.add(qmark_centering)
qmark_box.set_size_request(self.HEIGHT_COMPACT, self.HEIGHT_COMPACT)
grid.attach(qmark_box, 0, 0, 1, 1)
self._prompt = prompt = Gtk.Label(self.wprompts.get_current_prompt(),
hexpand=False)
prompt.get_style_context().add_class('prompt')
prompt.set_justify(Gtk.Justification.LEFT)
prompt.set_size_request(410, -1)
prompt.set_line_wrap(True)
prompt_align = Gtk.Alignment(xalign=0.5, yalign=0.5)
prompt_align.add(prompt)
prompt_ebox = Gtk.EventBox()
prompt_ebox.add(prompt_align)
grid.attach(prompt_ebox, 1, 0, 2, 1)
self._x_button = x_button = Gtk.Button('x')
x_button.set_size_request(20, 20)
x_button.connect('clicked', self._shrink)
x_button.get_style_context().add_class('x_button')
x_button.set_margin_right(20)
x_button_ebox = Gtk.EventBox()
x_button_ebox.add(x_button)
x_button_ebox.connect("realize", self._set_cursor_to_hand_cb)
x_button_align = Gtk.Alignment(xalign=1, yalign=0.5,
xscale=0, yscale=0)
x_button_align.add(x_button_ebox)
grid.attach(x_button_align, 3, 0, 1, 1)
self._gray_box = gray_box = Gtk.EventBox()
gray_box.get_style_context().add_class('gray_box')
gray_box.set_size_request(-1,
self.HEIGHT_EXPANDED - self.HEIGHT_COMPACT)
gray_box_centering = Gtk.Alignment(xalign=0, yalign=0, xscale=1.0,
yscale=1.0)
gray_box_centering.add(gray_box)
grid.attach(gray_box_centering, 0, 1, 1, 2)
self._ebox = Gtk.EventBox()
self._ebox.get_style_context().add_class('scrolled_win')
grid.attach(self._ebox, 1, 1, 2, 1)
self._pack_input_widget()
self._send = send = OrangeButton('SEND')
apply_styling_to_widget(send.label, media_dir() + 'css/widget.css')
send.set_sensitive(False)
send.connect('clicked', self._send_clicked)
send.set_margin_left(10)
send.set_margin_right(20)
send.set_margin_top(10)
send.set_margin_bottom(15)
send_align = Gtk.Alignment(xalign=1, yalign=0.5, xscale=0, yscale=0)
send_align.add(send)
grid.attach(send_align, 3, 2, 1, 1)
self.set_main_widget(grid)
self.show_all()
self._dont_shrink = False
self._shrink()
self.connect("focus-out-event", self._shrink)
self.connect("button-press-event", self._toggle)
def _pack_input_widget(self):
# Unpack the contents of the scrolled window and replace with
# another widget
# for child in self._scrolledwindow.get_children():
for child in self._ebox.get_children():
self._ebox.remove(child)
if self._input_widget:
self._input_widget.destroy()
# This needs to change depending on the type of the question.
# If the type of the question is "text" or "textInput", then pack this,
# otherwise pack other options.
prompt_type = self.wprompts.get_current_prompt_type()
if prompt_type:
if prompt_type == "textInput":
self._input_widget = self._create_text_input()
elif prompt_type == "slider":
start = self.wprompts.get_slider_start_value()
end = self.wprompts.get_slider_end_value()
self._input_widget = self._create_slider_input(start, end)
elif prompt_type == "radio":
radiobutton_labels = self.wprompts.get_current_choices()
radiobutton_labels = map(str, radiobutton_labels)
self._input_widget = self._create_radiobutton_input(radiobutton_labels)
elif prompt_type == "checkbox":
checkbox_labels = self.wprompts.get_current_choices()
checkbox_labels = map(str, checkbox_labels)
maximum = self.wprompts.get_checkbox_max_selected()
minimum = self.wprompts.get_checkbox_min_selected()
self._input_widget = self._create_checkbutton_input(
checkbox_labels, maximum, minimum
)
elif prompt_type == "dropdown":
dropdown_labels = self.wprompts.get_current_choices()
dropdown_labels = map(str, dropdown_labels)
self._input_widget = self._create_dropdown_input(dropdown_labels)
else:
self._input_widget = self._create_text_input()
else:
self._input_widget = self._create_text_input()
self._input_widget.set_margin_left(10)
self._input_widget.set_margin_right(10)
self._input_widget.set_margin_top(10)
self._input_widget.set_margin_bottom(10)
self._ebox.add(self._input_widget)
# Force the widget to be realised
self.show_all()
def _set_anti_shrink_flag(self, widget, value):
self._dont_shrink = value
def _create_text_input(self):
logger.debug("text input being created")
widget = TextInput()
widget.connect("text-changed", self._set_send_sensitive)
widget.connect("text-not-changed", self._set_send_insensitive)
return widget
def _create_slider_input(self, start, end):
logger.debug("slider being created with start {} and end {}".format(start, end))
widget = SliderInput(start, end)
widget.connect("slider-changed", self._set_send_sensitive)
return widget
def _create_radiobutton_input(self, values):
logger.debug("radiobuttons being created with values {}".format(values))
widget = RadioInput(values)
widget.connect("radio-changed", self._set_send_sensitive)
return widget
def _create_checkbutton_input(self, values, maximum, minimum):
logger.debug("checkbuttons being created with values {}, minimum {}, maximum".format(values, minimum, maximum))
widget = CheckInput(values, maximum, minimum)
widget.connect('min-not-selected', self._set_send_insensitive)
widget.connect('min-selected', self._set_send_sensitive)
return widget
def _create_dropdown_input(self, values):
logger.debug("dropdown being created with values {}".format(values))
widget = DropdownInput(values)
widget.connect("dropdown-changed", self._set_send_sensitive)
widget.connect("popup", self._set_anti_shrink_flag, True)
widget.connect("dropdown-changed", self._set_anti_shrink_flag, False)
return widget
def _set_send_sensitive(self, widget=None):
self._send.set_sensitive(True)
def _set_send_insensitive(self, widget=None):
self._send.set_sensitive(False)
def _get_user_input(self):
return self._input_widget.get_selected_text()
def _shrink(self, widget=None, event=None):
'''
Hides the widget
'''
if not self._dont_shrink:
self._x_button.hide()
self._ebox.hide()
self._gray_box.hide()
self._send.hide()
self._expanded = False
def _expand(self, widget=None, event=None):
'''
Shows the text box
'''
self._x_button.show()
self._ebox.show()
self._gray_box.show()
self._send.show()
self._expanded = True
(focusable, focus_widget) = self._input_widget.get_focusable_widget()
if focusable:
self.set_focus(focus_widget)
# Add metrics to kano tracker
track_action(self.app_name_opened)
def _toggle(self, widget=None, event=None):
'''
Toggles between shrink-expand
'''
if self._expanded:
self._shrink()
else:
self._expand()
def _set_cursor_to_hand_cb(self, widget, data=None):
widget.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.HAND1))
def _send_clicked(self, window=None, event=None):
self.blur()
self.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.WATCH))
while Gtk.events_pending():
Gtk.main_iteration()
prompt = self.wprompts.get_current_prompt()
answer = self._get_user_input()
# Don't send! Show an error?
# Otherwise this shows as greyed out and with a spinner indefinitely
if not answer:
print "Not sending, no answer present"
return
qid = self.wprompts.get_current_prompt_id()
# Network send to feedback API
if send_question_response([(qid, answer)]):
# Connection is ok, the answer has been sent
self.wprompts.mark_prompt(prompt, answer, qid, offline=False, rotate=True)
# Also send any pending answers we may have in the cache
for offline in self.wprompts.get_offline_answers():
sent_ok = send_question_response([(offline[2], offline[1])], interactive=False)
if sent_ok:
self.wprompts.mark_prompt(prompt=offline[0], answer=offline[1],
qid=offline[2], offline=False, rotate=False)
else:
# Could not get connection, or user doesn't want to at this time
# Save the answer as offline to send it later
self.wprompts.mark_prompt(prompt, answer, qid, offline=True, rotate=True)
# Get next available question on the queue
nextp = self.wprompts.get_current_prompt()
if nextp:
self._pack_input_widget()
self._prompt.set_text(nextp)
# This isn't needed anymore because the replace the text widget by default
# Disable send button
self._send.set_sensitive(False)
self._shrink()
else:
# There are no more questions available,
# hide the widget until they arrive over the API
self.hide_until_more_questions()
self.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.unblur()
def _unminimise_if_minimised(self, window, event):
# Check if we are attempting to minimise the window
# if so, try to unminimise it
if event.changed_mask & Gdk.WindowState.ICONIFIED:
window.deiconify()
|
KanoComputing/kano-feedback
|
kano_feedback/WidgetWindow.py
|
Python
|
gpl-2.0
| 14,552
|
import json
import io
from os.path import join as pjoin
from config import CONVERSIONS, SIMULATIONS_PATH
def parse_map(name):
""" parses a map from a file. The first line contains the map width
and the second line contains the corresponding height, followed
by the actual map data.
See the example below for a simple map and config.CONVERSIONS for
the representation. Note: Only quadratic maps are currently supported.
WWWW
W W
W W
WWWW
name -- file name regarding the map (located in SIMULATIONS_PATH)
"""
with io.open(pjoin(SIMULATIONS_PATH, name)) as map_stream:
lines = map_stream.read().splitlines()
width = int(lines[0])
height = int(lines[1])
rows = []
for i, l in enumerate(lines[2:]):
if len(l) != width:
raise ValueError("Line %d has invalid length: %d, should be %d according to header" % (i, len(l), width))
rows.append(list(map(lambda x: int(CONVERSIONS[x]), l)))
if len(rows) != height:
raise ValueError(
"parseMap: Read invalid number of lines: %d should be %d according to header" % (len(rows), height))
return rows, width, height
def parse_simulation_params(name, additional_params):
with io.open(pjoin(SIMULATIONS_PATH, name)) as sim_stream:
params = json.load(sim_stream)
#Override some parameters from additional_params which are given explicitly from the console
for param in additional_params:
k, v = param.split("=")
params[k.strip()] = int(v.strip())
return params
|
Tooa/trafficSim
|
ioutil.py
|
Python
|
apache-2.0
| 1,654
|
from django.utils.translation import pgettext_lazy
from django_filters.filters import ModelMultipleChoiceFilter
from geotrek.authent.filters import StructureRelatedFilterSet
from .models import SensitiveArea, Species
class SensitiveAreaFilterSet(StructureRelatedFilterSet):
species = ModelMultipleChoiceFilter(
label=pgettext_lazy("Singular", "Species"),
queryset=Species.objects.filter(category=Species.SPECIES)
)
class Meta(StructureRelatedFilterSet.Meta):
model = SensitiveArea
fields = StructureRelatedFilterSet.Meta.fields + [
'species', 'species__category',
]
|
GeotrekCE/Geotrek-admin
|
geotrek/sensitivity/filters.py
|
Python
|
bsd-2-clause
| 633
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ZonalStatistics.py
---------------------
Date : August 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'August 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import numpy
try:
from scipy.stats.mstats import mode
hasSciPy = True
except:
hasSciPy = False
from osgeo import gdal, ogr, osr
from qgis.core import QgsRectangle, QgsGeometry, QgsFeature
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools.raster import mapToPixel
from processing.tools import dataobjects, vector
class ZonalStatistics(GeoAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
RASTER_BAND = 'RASTER_BAND'
INPUT_VECTOR = 'INPUT_VECTOR'
COLUMN_PREFIX = 'COLUMN_PREFIX'
GLOBAL_EXTENT = 'GLOBAL_EXTENT'
OUTPUT_LAYER = 'OUTPUT_LAYER'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Zonal Statistics')
self.group, self.i18n_group = self.trAlgorithm('Raster tools')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Raster layer')))
self.addParameter(ParameterNumber(self.RASTER_BAND,
self.tr('Raster band'), 1, 999, 1))
self.addParameter(ParameterVector(self.INPUT_VECTOR,
self.tr('Vector layer containing zones'),
[ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterString(self.COLUMN_PREFIX,
self.tr('Output column prefix'), '_'))
self.addParameter(ParameterBoolean(self.GLOBAL_EXTENT,
self.tr('Load whole raster in memory')))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Zonal statistics')))
def processAlgorithm(self, progress):
""" Based on code by Matthew Perry
https://gist.github.com/perrygeo/5667173
"""
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_VECTOR))
rasterPath = unicode(self.getParameterValue(self.INPUT_RASTER))
bandNumber = self.getParameterValue(self.RASTER_BAND)
columnPrefix = self.getParameterValue(self.COLUMN_PREFIX)
useGlobalExtent = self.getParameterValue(self.GLOBAL_EXTENT)
rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
geoTransform = rasterDS.GetGeoTransform()
rasterBand = rasterDS.GetRasterBand(bandNumber)
noData = rasterBand.GetNoDataValue()
cellXSize = abs(geoTransform[1])
cellYSize = abs(geoTransform[5])
rasterXSize = rasterDS.RasterXSize
rasterYSize = rasterDS.RasterYSize
rasterBBox = QgsRectangle(geoTransform[0], geoTransform[3] - cellYSize
* rasterYSize, geoTransform[0] + cellXSize
* rasterXSize, geoTransform[3])
rasterGeom = QgsGeometry.fromRect(rasterBBox)
crs = osr.SpatialReference()
crs.ImportFromProj4(str(layer.crs().toProj4()))
if useGlobalExtent:
xMin = rasterBBox.xMinimum()
xMax = rasterBBox.xMaximum()
yMin = rasterBBox.yMinimum()
yMax = rasterBBox.yMaximum()
(startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
srcArray = srcArray * rasterBand.GetScale() + rasterBand.GetOffset()
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5],
)
memVectorDriver = ogr.GetDriverByName('Memory')
memRasterDriver = gdal.GetDriverByName('MEM')
fields = layer.fields()
(idxMin, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'min', 21, 6)
(idxMax, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'max', 21, 6)
(idxSum, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'sum', 21, 6)
(idxCount, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'count', 21, 6)
(idxMean, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'mean', 21, 6)
(idxStd, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'std', 21, 6)
(idxUnique, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'unique', 21, 6)
(idxRange, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'range', 21, 6)
(idxVar, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'var', 21, 6)
(idxMedian, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'median', 21, 6)
if hasSciPy:
(idxMode, fields) = vector.findOrCreateField(layer, fields,
columnPrefix + 'mode', 21, 6)
writer = self.getOutputFromName(self.OUTPUT_LAYER).getVectorWriter(
fields.toList(), layer.wkbType(), layer.crs())
outFeat = QgsFeature()
outFeat.initAttributes(len(fields))
outFeat.setFields(fields)
features = vector.features(layer)
total = 100.0 / len(features)
for current, f in enumerate(features):
geom = f.geometry()
intersectedGeom = rasterGeom.intersection(geom)
ogrGeom = ogr.CreateGeometryFromWkt(intersectedGeom.exportToWkt())
if not useGlobalExtent:
bbox = intersectedGeom.boundingBox()
xMin = bbox.xMinimum()
xMax = bbox.xMaximum()
yMin = bbox.yMinimum()
yMax = bbox.yMaximum()
(startColumn, startRow) = mapToPixel(xMin, yMax, geoTransform)
(endColumn, endRow) = mapToPixel(xMax, yMin, geoTransform)
width = endColumn - startColumn
height = endRow - startRow
if width == 0 or height == 0:
continue
srcOffset = (startColumn, startRow, width, height)
srcArray = rasterBand.ReadAsArray(*srcOffset)
srcArray = srcArray * rasterBand.GetScale() + rasterBand.GetOffset()
newGeoTransform = (
geoTransform[0] + srcOffset[0] * geoTransform[1],
geoTransform[1],
0.0,
geoTransform[3] + srcOffset[1] * geoTransform[5],
0.0,
geoTransform[5],
)
# Create a temporary vector layer in memory
memVDS = memVectorDriver.CreateDataSource('out')
memLayer = memVDS.CreateLayer('poly', crs, ogr.wkbPolygon)
ft = ogr.Feature(memLayer.GetLayerDefn())
ft.SetGeometry(ogrGeom)
memLayer.CreateFeature(ft)
ft.Destroy()
# Rasterize it
rasterizedDS = memRasterDriver.Create('', srcOffset[2],
srcOffset[3], 1, gdal.GDT_Byte)
rasterizedDS.SetGeoTransform(newGeoTransform)
gdal.RasterizeLayer(rasterizedDS, [1], memLayer, burn_values=[1])
rasterizedArray = rasterizedDS.ReadAsArray()
srcArray = numpy.nan_to_num(srcArray)
masked = numpy.ma.MaskedArray(srcArray,
mask=numpy.logical_or(srcArray == noData,
numpy.logical_not(rasterizedArray)))
outFeat.setGeometry(geom)
attrs = f.attributes()
v = float(masked.min())
attrs.insert(idxMin, None if numpy.isnan(v) else v)
v = float(masked.max())
attrs.insert(idxMax, None if numpy.isnan(v) else v)
v = float(masked.sum())
attrs.insert(idxSum, None if numpy.isnan(v) else v)
attrs.insert(idxCount, int(masked.count()))
v = float(masked.mean())
attrs.insert(idxMean, None if numpy.isnan(v) else v)
v = float(masked.std())
attrs.insert(idxStd, None if numpy.isnan(v) else v)
attrs.insert(idxUnique, numpy.unique(masked.compressed()).size)
v = float(masked.max()) - float(masked.min())
attrs.insert(idxRange, None if numpy.isnan(v) else v)
v = float(masked.var())
attrs.insert(idxVar, None if numpy.isnan(v) else v)
v = float(numpy.ma.median(masked))
attrs.insert(idxMedian, None if numpy.isnan(v) else v)
if hasSciPy:
attrs.insert(idxMode, float(mode(masked, axis=None)[0][0]))
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
memVDS = None
rasterizedDS = None
progress.setPercentage(int(current * total))
rasterDS = None
del writer
|
alexbruy/QGIS
|
python/plugins/processing/algs/qgis/ZonalStatistics.py
|
Python
|
gpl-2.0
| 11,154
|
# -*- coding: utf-8 -*-
# Copyright 2013,2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from __future__ import absolute_import
import os
import ctypes
from quodlibet.compat import text_type
if os.name == "nt":
from . import winapi
from .winapi import SHGFPType, CSIDLFlag, CSIDL, GUID, \
SHGetFolderPathW, S_OK, MAX_PATH, \
KnownFolderFlag, FOLDERID, SHGetKnownFolderPath, CoTaskMemFree, \
CoInitialize, IShellLinkW, CoCreateInstance, CLSID_ShellLink, \
CLSCTX_INPROC_SERVER, IPersistFile
def open_folder_and_select_items(folder, items=None):
"""Shows a directory and optional files or subdirectories in the
file manager (explorer.exe).
If both folder and items is given the file manager will
display the content of `folder` and highlight all `items`.
If only a directory is given then the content of the parent directory is
shown and the `folder` highlighted.
Might raise WindowsError in case something fails (any of the
files not existing etc.)
"""
if items is None:
items = []
assert isinstance(folder, text_type)
for item in items:
assert isinstance(item, text_type)
assert not os.path.split(item)[0]
desktop = winapi.IShellFolder()
parent = winapi.IShellFolder()
parent_id = winapi.PIDLIST_ABSOLUTE()
child_ids = (winapi.PIDLIST_RELATIVE * len(items))()
try:
winapi.CoInitialize(None)
winapi.SHParseDisplayName(
folder, None, ctypes.byref(parent_id), 0, None)
winapi.SHGetDesktopFolder(ctypes.byref(desktop))
desktop.BindToObject(
parent_id, None, winapi.IShellFolder.IID, ctypes.byref(parent))
for i, item in enumerate(items):
attrs = winapi.ULONG(0)
parent.ParseDisplayName(
None, None, item, None,
ctypes.byref(child_ids[i]), ctypes.byref(attrs))
winapi.SHOpenFolderAndSelectItems(
parent_id, len(child_ids),
winapi.PCUITEMID_CHILD_ARRAY(child_ids), 0)
finally:
for child_id in child_ids:
if child_id:
winapi.CoTaskMemFree(child_id)
if parent_id:
winapi.ILFree(parent_id)
if parent:
parent.Release()
if desktop:
desktop.Release()
def _get_path(folder, default=False, create=False):
"""A path to an directory or None.
Takes a CSIDL instance as folder.
"""
if default:
flags = SHGFPType.DEFAULT
else:
flags = SHGFPType.CURRENT
if create:
folder |= CSIDLFlag.CREATE
# we don't want env vars
folder |= CSIDLFlag.DONT_UNEXPAND
buffer_ = ctypes.create_unicode_buffer(MAX_PATH)
try:
result = SHGetFolderPathW(0, folder, 0, flags, buffer_)
except WindowsError:
return None
if result != S_OK:
return None
return buffer_.value
def _get_known_path(folder, default=False, create=False):
"""A path to an directory or None
Takes a FOLDERID instances as folder.
"""
if default:
flags = KnownFolderFlag.DEFAULT_PATH
else:
flags = 0
if create:
flags |= KnownFolderFlag.CREATE
flags |= KnownFolderFlag.DONT_VERIFY
ptr = ctypes.c_wchar_p()
guid = GUID(folder)
try:
result = SHGetKnownFolderPath(
ctypes.byref(guid), flags, None, ctypes.byref(ptr))
except WindowsError:
return None
if result != S_OK:
return None
path = ptr.value
CoTaskMemFree(ptr)
return path
def get_personal_dir(**kwargs):
r"""e.g. 'C:\Users\<user>\Documents'"""
return _get_path(CSIDL.PERSONAL, **kwargs)
def get_appdata_dir(**kwargs):
r"""e.g. 'C:\Users\<user>\AppData\Roaming'"""
return _get_path(CSIDL.APPDATA, **kwargs)
def get_desktop_dir(**kwargs):
r"""e.g. 'C:\Users\<user>\Desktop'"""
return _get_path(CSIDL.DESKTOP, **kwargs)
def get_music_dir(**kwargs):
r"""e.g. 'C:\Users\<user>\Music'"""
return _get_path(CSIDL.MYMUSIC, **kwargs)
def get_profile_dir(**kwargs):
r"""e.g. 'C:\Users\<user>'"""
return _get_path(CSIDL.PROFILE, **kwargs)
def get_links_dir(**kwargs):
r"""e.g. 'C:\Users\<user>\Links'"""
return _get_known_path(FOLDERID.LINKS, **kwargs)
def get_link_target(path):
"""Takes a path to a .lnk file and returns a path the .lnk file
is targeting.
Might raise WindowsError in case something fails.
"""
assert isinstance(path, text_type)
CoInitialize(None)
pShellLinkW = IShellLinkW()
CoCreateInstance(
ctypes.byref(CLSID_ShellLink), None, CLSCTX_INPROC_SERVER,
ctypes.byref(IShellLinkW.IID), ctypes.byref(pShellLinkW))
try:
pPersistFile = IPersistFile()
pShellLinkW.QueryInterface(ctypes.byref(IPersistFile.IID),
ctypes.byref(pPersistFile))
try:
buffer_ = ctypes.create_unicode_buffer(path, MAX_PATH)
pPersistFile.Load(buffer_, 0)
finally:
pPersistFile.Release()
pShellLinkW.GetPath(buffer_, MAX_PATH, None, 0)
finally:
pShellLinkW.Release()
return ctypes.wstring_at(buffer_)
|
elbeardmorez/quodlibet
|
quodlibet/quodlibet/util/windows.py
|
Python
|
gpl-2.0
| 5,464
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import (
filters_from_querystring,
sequence_from_querystring,
dhcp_configuration_from_querystring)
class DHCPOptions(BaseResponse):
def associate_dhcp_options(self):
dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0]
vpc_id = self.querystring.get("VpcId", [None])[0]
dhcp_opt = self.ec2_backend.describe_dhcp_options([dhcp_opt_id])[0]
vpc = self.ec2_backend.get_vpc(vpc_id)
self.ec2_backend.associate_dhcp_options(dhcp_opt, vpc)
template = self.response_template(ASSOCIATE_DHCP_OPTIONS_RESPONSE)
return template.render()
def create_dhcp_options(self):
dhcp_config = dhcp_configuration_from_querystring(self.querystring)
# TODO validate we only got the options we know about
domain_name_servers = dhcp_config.get("domain-name-servers", None)
domain_name = dhcp_config.get("domain-name", None)
ntp_servers = dhcp_config.get("ntp-servers", None)
netbios_name_servers = dhcp_config.get("netbios-name-servers", None)
netbios_node_type = dhcp_config.get("netbios-node-type", None)
dhcp_options_set = self.ec2_backend.create_dhcp_options(
domain_name_servers=domain_name_servers,
domain_name=domain_name,
ntp_servers=ntp_servers,
netbios_name_servers=netbios_name_servers,
netbios_node_type=netbios_node_type
)
template = self.response_template(CREATE_DHCP_OPTIONS_RESPONSE)
return template.render(dhcp_options_set=dhcp_options_set)
def delete_dhcp_options(self):
dhcp_opt_id = self.querystring.get("DhcpOptionsId", [None])[0]
delete_status = self.ec2_backend.delete_dhcp_options_set(dhcp_opt_id)
template = self.response_template(DELETE_DHCP_OPTIONS_RESPONSE)
return template.render(delete_status=delete_status)
def describe_dhcp_options(self):
dhcp_opt_ids = sequence_from_querystring("DhcpOptionsId", self.querystring)
filters = filters_from_querystring(self.querystring)
dhcp_opts = self.ec2_backend.get_all_dhcp_options(dhcp_opt_ids, filters)
template = self.response_template(DESCRIBE_DHCP_OPTIONS_RESPONSE)
return template.render(dhcp_options=dhcp_opts)
CREATE_DHCP_OPTIONS_RESPONSE = u"""
<CreateDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<dhcpOptions>
<dhcpOptionsId>{{ dhcp_options_set.id }}</dhcpOptionsId>
<dhcpConfigurationSet>
{% for key, values in dhcp_options_set.options.items() %}
{{ values }}
{% if values %}
<item>
<key>{{key}}</key>
<valueSet>
{% for value in values %}
<item>
<value>{{ value }}</value>
</item>
{% endfor %}
</valueSet>
</item>
{% endif %}
{% endfor %}
</dhcpConfigurationSet>
<tagSet>
{% for tag in dhcp_options_set.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</dhcpOptions>
</CreateDhcpOptionsResponse>
"""
DELETE_DHCP_OPTIONS_RESPONSE = u"""
<DeleteDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>{{delete_status}}</return>
</DeleteDhcpOptionsResponse>
"""
DESCRIBE_DHCP_OPTIONS_RESPONSE = u"""
<DescribeDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<dhcpOptionsSet>
{% for dhcp_options_set in dhcp_options %}
<item>
<dhcpOptionsId>{{ dhcp_options_set.id }}</dhcpOptionsId>
<dhcpConfigurationSet>
{% for key, values in dhcp_options_set.options.items() %}
{{ values }}
{% if values %}
<item>
<key>{{ key }}</key>
<valueSet>
{% for value in values %}
<item>
<value>{{ value }}</value>
</item>
{% endfor %}
</valueSet>
</item>
{% endif %}
{% endfor %}
</dhcpConfigurationSet>
<tagSet>
{% for tag in dhcp_options_set.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</dhcpOptionsSet>
</DescribeDhcpOptionsResponse>
"""
ASSOCIATE_DHCP_OPTIONS_RESPONSE = u"""
<AssociateDhcpOptionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</AssociateDhcpOptionsResponse>
"""
|
silveregg/moto
|
moto/ec2/responses/dhcp_options.py
|
Python
|
apache-2.0
| 5,169
|
'''
Settings
========
.. versionadded:: 1.0.7
This module is a complete and extensible framework for adding a
Settings interface to your application. By default, the interface uses
a :class:`SettingsWithSpinner`, which consists of a
:class:`~kivy.uix.spinner.Spinner` (top) to switch between individual
settings panels (bottom). See :ref:`differentlayouts` for some
alternatives.
.. image:: images/settingswithspinner_kivy.jpg
:align: center
A :class:`SettingsPanel` represents a group of configurable options. The
:attr:`SettingsPanel.title` property is used by :class:`Settings` when a panel
is added - it determines the name of the sidebar button. SettingsPanel controls
a :class:`~kivy.config.ConfigParser` instance.
The panel can be automatically constructed from a JSON definition file: you
describe the settings you want and corresponding sections/keys in the
ConfigParser instance... and you're done!
Settings are also integrated with the :class:`~kivy.app.App` class. Use
:meth:`Settings.add_kivy_panel` to configure the Kivy core settings in a panel.
.. _settings_json:
Create a panel from JSON
------------------------
To create a panel from a JSON-file, you need two things:
* a :class:`~kivy.config.ConfigParser` instance with default values
* a JSON file
.. warning::
The :class:`kivy.config.ConfigParser` is required. You cannot use the
default ConfigParser from Python libraries.
You must create and handle the :class:`~kivy.config.ConfigParser`
object. SettingsPanel will read the values from the associated
ConfigParser instance. Make sure you have default values for all sections/keys
in your JSON file!
The JSON file contains structured information to describe the available
settings. Here is an example::
[
{
"type": "title",
"title": "Windows"
},
{
"type": "bool",
"title": "Fullscreen",
"desc": "Set the window in windowed or fullscreen",
"section": "graphics",
"key": "fullscreen",
"true": "auto"
}
]
Each element in the root list represents a setting that the user can configure.
Only the "type" key is mandatory: an instance of the associated class will be
created and used for the setting - other keys are assigned to corresponding
properties of that class.
============== =================================================
Type Associated class
-------------- -------------------------------------------------
title :class:`SettingTitle`
bool :class:`SettingBoolean`
numeric :class:`SettingNumeric`
options :class:`SettingOptions`
string :class:`SettingString`
path :class:`SettingPath` (new from 1.1.0)
============== =================================================
In the JSON example above, the first element is of type "title". It will create
a new instance of :class:`SettingTitle` and apply the rest of the key-value
pairs to the properties of that class, i.e. "title": "Windows" sets the
:attr:`SettingTitle.title` property to "Windows".
To load the JSON example to a :class:`Settings` instance, use the
:meth:`Settings.add_json_panel` method. It will automatically instantiate a
:class:`SettingsPanel` and add it to :class:`Settings`::
from kivy.config import ConfigParser
config = ConfigParser()
config.read('myconfig.ini')
s = Settings()
s.add_json_panel('My custom panel', config, 'settings_custom.json')
s.add_json_panel('Another panel', config, 'settings_test2.json')
# then use the s as a widget...
.. _differentlayouts:
Different panel layouts
-----------------------
A kivy :class:`~kivy.app.App` can automatically create and display a
:class:`Settings` instance. See the :attr:`~kivy.app.App.settings_cls`
documentation for details on how to choose which settings class to
display.
Several pre-built settings widgets are available. All except
:class:`SettingsWithNoMenu` include close buttons triggering the
on_close event.
- :class:`Settings`: Displays settings with a sidebar at the left to
switch between json panels.
- :class:`SettingsWithSidebar`: A trivial subclass of
:class:`Settings`.
- :class:`SettingsWithSpinner`: Displays settings with a spinner at
the top, which can be used to switch between json panels. Uses
:class:`InterfaceWithSpinner` as the
:attr:`~Settings.interface_cls`. This is the default behavior from
Kivy 1.8.0.
- :class:`SettingsWithTabbedPanel`: Displays json panels as individual
tabs in a :class:`~kivy.uix.tabbedpanel.TabbedPanel`. Uses
:class:`InterfaceWithTabbedPanel` as the :attr:`~Settings.interface_cls`.
- :class:`SettingsWithNoMenu`: Displays a single json panel, with no
way to switch to other panels and no close button. This makes it
impossible for the user to exit unless
:meth:`~kivy.app.App.close_settings` is overridden with a different
close trigger! Uses :class:`InterfaceWithNoMenu` as the
:attr:`~Settings.interface_cls`.
You can construct your own settings panels with any layout you choose
by setting :attr:`Settings.interface_cls`. This should be a widget
that displays a json settings panel with some way to switch between
panels. An instance will be automatically created by :class:`Settings`.
Interface widgets may be anything you like, but *must* have a method
add_panel that recieves newly created json settings panels for the
interface to display. See the documentation for
:class:`InterfaceWithSidebar` for more information. They may
optionally dispatch an on_close event, for instance if a close button
is clicked. This event is used by :class:`Settings` to trigger its own
on_close event.
'''
__all__ = ('Settings', 'SettingsPanel', 'SettingItem', 'SettingString',
'SettingPath', 'SettingBoolean', 'SettingNumeric', 'SettingOptions',
'SettingTitle', 'SettingsWithSidebar', 'SettingsWithSpinner',
'SettingsWithTabbedPanel', 'SettingsWithNoMenu',
'InterfaceWithSidebar', 'ContentPanel')
import json
import os
from kivy.compat import string_types
from kivy.factory import Factory
from kivy.metrics import dp
from kivy.config import ConfigParser
from kivy.animation import Animation
from kivy.compat import string_types, text_type
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.button import Button
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.scrollview import ScrollView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, StringProperty, ListProperty, \
BooleanProperty, NumericProperty, DictProperty
class SettingSpacer(Widget):
# Internal class, not documented.
pass
class SettingItem(FloatLayout):
'''Base class for individual settings (within a panel). This class cannot
be used directly; it is used for implementing the other setting classes.
It builds a row with a title/description (left) and a setting control
(right).
Look at :class:`SettingBoolean`, :class:`SettingNumeric` and
:class:`SettingOptions` for usage examples.
:Events:
`on_release`
Fired when the item is touched and then released.
'''
title = StringProperty('<No title set>')
'''Title of the setting, defaults to '<No title set>'.
:attr:`title` is a :class:`~kivy.properties.StringProperty` and defaults to
'<No title set>'.
'''
desc = StringProperty(None, allownone=True)
'''Description of the setting, rendered on the line below the title.
:attr:`desc` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
disabled = BooleanProperty(False)
'''Indicate if this setting is disabled. If True, all touches on the
setting item will be discarded.
:attr:`disabled` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
section = StringProperty(None)
'''Section of the token inside the :class:`~kivy.config.ConfigParser`
instance.
:attr:`section` is a :class:`~kivy.properties.StringProperty` and defaults
to None.
'''
key = StringProperty(None)
'''Key of the token inside the :attr:`section` in the
:class:`~kivy.config.ConfigParser` instance.
:attr:`key` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
value = ObjectProperty(None)
'''Value of the token according to the :class:`~kivy.config.ConfigParser`
instance. Any change to this value will trigger a
:meth:`Settings.on_config_change` event.
:attr:`value` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
panel = ObjectProperty(None)
'''(internal) Reference to the SettingsPanel for this setting. You don't
need to use it.
:attr:`panel` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
content = ObjectProperty(None)
'''(internal) Reference to the widget that contains the real setting.
As soon as the content object is set, any further call to add_widget will
call the content.add_widget. This is automatically set.
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
selected_alpha = NumericProperty(0)
'''(internal) Float value from 0 to 1, used to animate the background when
the user touches the item.
:attr:`selected_alpha` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
__events__ = ('on_release', )
def __init__(self, **kwargs):
super(SettingItem, self).__init__(**kwargs)
self.value = self.panel.get_value(self.section, self.key)
def add_widget(self, *largs):
if self.content is None:
return super(SettingItem, self).add_widget(*largs)
return self.content.add_widget(*largs)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if self.disabled:
return
touch.grab(self)
self.selected_alpha = 1
return super(SettingItem, self).on_touch_down(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
self.dispatch('on_release')
Animation(selected_alpha=0, d=.25, t='out_quad').start(self)
return True
return super(SettingItem, self).on_touch_up(touch)
def on_release(self):
pass
def on_value(self, instance, value):
if not self.section or not self.key:
return
# get current value in config
panel = self.panel
if not isinstance(value, string_types):
value = str(value)
panel.set_value(self.section, self.key, value)
class SettingBoolean(SettingItem):
'''Implementation of a boolean setting on top of a :class:`SettingItem`. It
is visualized with a :class:`~kivy.uix.switch.Switch` widget. By default,
0 and 1 are used for values: you can change them by setting :attr:`values`.
'''
values = ListProperty(['0', '1'])
'''Values used to represent the state of the setting. If you want to use
"yes" and "no" in your ConfigParser instance::
SettingBoolean(..., values=['no', 'yes'])
.. warning::
You need a minimum of two values, the index 0 will be used as False,
and index 1 as True
:attr:`values` is a :class:`~kivy.properties.ListProperty` and defaults to
['0', '1']
'''
class SettingString(SettingItem):
'''Implementation of a string setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it's shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.text.strip()
self.value = value
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, None),
size=(popup_width, '250dp'))
# create the textinput used for numeric input
self.textinput = textinput = TextInput(
text=self.value, font_size='24sp', multiline=False,
size_hint_y=None, height='42sp')
textinput.bind(on_text_validate=self._validate)
self.textinput = textinput
# construct the content, widget are used as a spacer
content.add_widget(Widget())
content.add_widget(textinput)
content.add_widget(Widget())
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingPath(SettingItem):
'''Implementation of a Path setting on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.filechooser.FileChooserListView` so the user can enter
a custom value.
.. versionadded:: 1.1.0
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
textinput = ObjectProperty(None)
'''(internal) Used to store the current textinput from the popup and
to listen for changes.
:attr:`textinput` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _dismiss(self, *largs):
if self.textinput:
self.textinput.focus = False
if self.popup:
self.popup.dismiss()
self.popup = None
def _validate(self, instance):
self._dismiss()
value = self.textinput.selection
if not value:
return
self.value = os.path.realpath(value[0])
def _create_popup(self, instance):
# create popup layout
content = BoxLayout(orientation='vertical', spacing=5)
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
title=self.title, content=content, size_hint=(None, 0.9),
width=popup_width)
# create the filechooser
self.textinput = textinput = FileChooserListView(
path=self.value, size_hint=(1, 1), dirselect=True)
textinput.bind(on_path=self._validate)
self.textinput = textinput
# construct the content
content.add_widget(textinput)
content.add_widget(SettingSpacer())
# 2 buttons are created for accept or cancel the current value
btnlayout = BoxLayout(size_hint_y=None, height='50dp', spacing='5dp')
btn = Button(text='Ok')
btn.bind(on_release=self._validate)
btnlayout.add_widget(btn)
btn = Button(text='Cancel')
btn.bind(on_release=self._dismiss)
btnlayout.add_widget(btn)
content.add_widget(btnlayout)
# all done, open the popup !
popup.open()
class SettingNumeric(SettingString):
'''Implementation of a numeric setting on top of a :class:`SettingString`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
:class:`~kivy.uix.textinput.Textinput` so the user can enter a custom
value.
'''
def _validate(self, instance):
# we know the type just by checking if there is a '.' in the original
# value
is_float = '.' in str(self.value)
self._dismiss()
try:
if is_float:
self.value = text_type(float(self.textinput.text))
else:
self.value = text_type(int(self.textinput.text))
except ValueError:
return
class SettingOptions(SettingItem):
'''Implementation of an option list on top of a :class:`SettingItem`.
It is visualized with a :class:`~kivy.uix.label.Label` widget that, when
clicked, will open a :class:`~kivy.uix.popup.Popup` with a
list of options from which the user can select.
'''
options = ListProperty([])
'''List of all availables options. This must be a list of "string" items.
Otherwise, it will crash. :)
:attr:`options` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
popup = ObjectProperty(None, allownone=True)
'''(internal) Used to store the current popup when it is shown.
:attr:`popup` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
def on_panel(self, instance, value):
if value is None:
return
self.fbind('on_release', self._create_popup)
def _set_option(self, instance):
self.value = instance.text
self.popup.dismiss()
def _create_popup(self, instance):
# create the popup
content = BoxLayout(orientation='vertical', spacing='5dp')
popup_width = min(0.95 * Window.width, dp(500))
self.popup = popup = Popup(
content=content, title=self.title, size_hint=(None, None),
size=(popup_width, '400dp'))
popup.height = len(self.options) * dp(55) + dp(150)
# add all the options
content.add_widget(Widget(size_hint_y=None, height=1))
uid = str(self.uid)
for option in self.options:
state = 'down' if option == self.value else 'normal'
btn = ToggleButton(text=option, state=state, group=uid)
btn.bind(on_release=self._set_option)
content.add_widget(btn)
# finally, add a cancel button to return on the previous panel
content.add_widget(SettingSpacer())
btn = Button(text='Cancel', size_hint_y=None, height=dp(50))
btn.bind(on_release=popup.dismiss)
content.add_widget(btn)
# and open the popup !
popup.open()
class SettingTitle(Label):
'''A simple title label, used to organize the settings in sections.
'''
title = Label.text
class SettingsPanel(GridLayout):
'''This class is used to contruct panel settings, for use with a
:class:`Settings` instance or subclass.
'''
title = StringProperty('Default title')
'''Title of the panel. The title will be reused by the :class:`Settings` in
the sidebar.
'''
config = ObjectProperty(None, allownone=True)
'''A :class:`kivy.config.ConfigParser` instance. See module documentation
for more information.
'''
settings = ObjectProperty(None)
'''A :class:`Settings` instance that will be used to fire the
`on_config_change` event.
'''
def __init__(self, **kwargs):
kwargs.setdefault('cols', 1)
super(SettingsPanel, self).__init__(**kwargs)
def on_config(self, instance, value):
if value is None:
return
if not isinstance(value, ConfigParser):
raise Exception('Invalid config object, you must use a'
'kivy.config.ConfigParser, not another one !')
def get_value(self, section, key):
'''Return the value of the section/key from the :attr:`config`
ConfigParser instance. This function is used by :class:`SettingItem` to
get the value for a given section/key.
If you don't want to use a ConfigParser instance, you might want to
override this function.
'''
config = self.config
if not config:
return
return config.get(section, key)
def set_value(self, section, key, value):
current = self.get_value(section, key)
if current == value:
return
config = self.config
if config:
config.set(section, key, value)
config.write()
settings = self.settings
if settings:
settings.dispatch('on_config_change',
config, section, key, value)
class InterfaceWithSidebar(BoxLayout):
'''The default Settings interface class. It displays a sidebar menu
with names of available settings panels, which may be used to switch
which one is currently displayed.
See :meth:`~InterfaceWithSidebar.add_panel` for information on the
method you must implement if creating your own interface.
This class also dispatches an event 'on_close', which is triggered
when the sidebar menu's close button is released. If creating your
own interface widget, it should also dispatch such an event which
will automatically be caught by :class:`Settings` and used to
trigger its own 'on_close' event.
'''
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`content` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithSidebar, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and the interface should provide a way to switch
between panels.
:param name: The name of the panel as a string. It
may be used to represent the panel but isn't necessarily
unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.menu.add_item(name, uid)
self.content.add_panel(panel, name, uid)
def on_close(self, *args):
pass
class InterfaceWithSpinner(BoxLayout):
'''A settings interface that displays a spinner at the top for
switching between panels.
The workings of this class are considered internal and are not
documented. See :meth:`InterfaceWithSidebar` for
information on implementing your own interface class.
'''
__events__ = ('on_close', )
menu = ObjectProperty()
'''(internal) A reference to the sidebar menu widget.
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defauls to None.
'''
content = ObjectProperty()
'''(internal) A reference to the panel display widget (a
:class:`ContentPanel`).
:attr:`menu` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, *args, **kwargs):
super(InterfaceWithSpinner, self).__init__(*args, **kwargs)
self.menu.close_button.bind(
on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and the interface should provide a way to switch
between panels.
:param name: The name of the panel as a string. It
may be used to represent the panel but may not
be unique.
:param uid: A unique int identifying the panel. It should be
used to identify and switch between panels.
'''
self.content.add_panel(panel, name, uid)
self.menu.add_item(name, uid)
def on_close(self, *args):
pass
class ContentPanel(ScrollView):
'''A class for displaying settings panels. It displays a single
settings panel at a time, taking up the full size and shape of the
ContentPanel. It is used by :class:`InterfaceWithSidebar` and
:class:`InterfaceWithSpinner` to display settings.
'''
panels = DictProperty({})
'''(internal) Stores a dictionary mapping settings panels to their uids.
:attr:`panels` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
container = ObjectProperty()
'''(internal) A reference to the GridLayout that contains the
settings panel.
:attr:`container` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_panel = ObjectProperty(None)
'''(internal) A reference to the current settings panel.
:attr:`current_panel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
current_uid = NumericProperty(0)
'''(internal) A reference to the uid of the current settings panel.
:attr:`current_uid` is a
:class:`~kivy.properties.NumericProperty` and defaults to 0.
'''
def add_panel(self, panel, name, uid):
'''This method is used by Settings to add new panels for possible
display. Any replacement for ContentPanel *must* implement
this method.
:param panel: A :class:`SettingsPanel`. It should be stored
and displayed when requested.
:param name: The name of the panel as a string. It
may be used to represent the panel.
:param uid: A unique int identifying the panel. It should be
stored and used to identify panels when switching.
'''
self.panels[uid] = panel
if not self.current_uid:
self.current_uid = uid
def on_current_uid(self, *args):
'''The uid of the currently displayed panel. Changing this will
automatically change the displayed panel.
:param uid: A panel uid. It should be used to retrieve and
display a settings panel that has previously been
added with :meth:`add_panel`.
'''
uid = self.current_uid
if uid in self.panels:
if self.current_panel is not None:
self.remove_widget(self.current_panel)
new_panel = self.panels[uid]
self.add_widget(new_panel)
self.current_panel = new_panel
return True
return False # New uid doesn't exist
def add_widget(self, widget):
if self.container is None:
super(ContentPanel, self).add_widget(widget)
else:
self.container.add_widget(widget)
def remove_widget(self, widget):
self.container.remove_widget(widget)
class Settings(BoxLayout):
'''Settings UI. Check module documentation for more information on how
to use this class.
:Events:
`on_config_change`: ConfigParser instance, section, key, value
Fired when the section's key-value pair of a ConfigParser changes.
.. warning:
value will be str/unicode type, regardless of the setting
type (numeric, boolean, etc)
`on_close`
Fired by the default panel when the Close button is pressed.
'''
interface = ObjectProperty(None)
'''(internal) Reference to the widget that will contain, organise and
display the panel configuration panel widgets.
:attr:`interface` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
interface_cls = ObjectProperty(InterfaceWithSidebar)
'''The widget class that will be used to display the graphical
interface for the settings panel. By default, it displays one Settings
panel at a time with a sidebar to switch between them.
:attr:`interface_cls` is an
:class:`~kivy.properties.ObjectProperty` and defaults to
:class`InterfaceWithSidebar`.
.. versionchanged:: 1.8.0
If you set a string, the :class:`~kivy.factory.Factory` will be used to
resolve the class.
'''
__events__ = ('on_close', 'on_config_change')
def __init__(self, *args, **kargs):
self._types = {}
super(Settings, self).__init__(*args, **kargs)
self.add_interface()
self.register_type('string', SettingString)
self.register_type('bool', SettingBoolean)
self.register_type('numeric', SettingNumeric)
self.register_type('options', SettingOptions)
self.register_type('title', SettingTitle)
self.register_type('path', SettingPath)
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
super(Settings, self).on_touch_down(touch)
return True
def register_type(self, tp, cls):
'''Register a new type that can be used in the JSON definition.
'''
self._types[tp] = cls
def on_close(self, *args):
pass
def add_interface(self):
'''(Internal) creates an instance of :attr:`Settings.interface_cls`,
and sets it to :attr:`~Settings.interface`. When json panels are
created, they will be added to this interface which will display them
to the user.
'''
cls = self.interface_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
interface = cls()
self.interface = interface
self.add_widget(interface)
self.interface.bind(on_close=lambda j: self.dispatch('on_close'))
def on_config_change(self, config, section, key, value):
pass
def add_json_panel(self, title, config, filename=None, data=None):
'''Create and add a new :class:`SettingsPanel` using the configuration
`config` with the JSON definition `filename`.
Check the :ref:`settings_json` section in the documentation for more
information about JSON format and the usage of this function.
'''
panel = self.create_json_panel(title, config, filename, data)
uid = panel.uid
if self.interface is not None:
self.interface.add_panel(panel, title, uid)
def create_json_panel(self, title, config, filename=None, data=None):
'''Create new :class:`SettingsPanel`.
.. versionadded:: 1.5.0
Check the documentation of :meth:`add_json_panel` for more information.
'''
if filename is None and data is None:
raise Exception('You must specify either the filename or data')
if filename is not None:
with open(filename, 'r') as fd:
data = json.loads(fd.read())
else:
data = json.loads(data)
if type(data) != list:
raise ValueError('The first element must be a list')
panel = SettingsPanel(title=title, settings=self, config=config)
for setting in data:
# determine the type and the class to use
if not 'type' in setting:
raise ValueError('One setting are missing the "type" element')
ttype = setting['type']
cls = self._types.get(ttype)
if cls is None:
raise ValueError(
'No class registered to handle the <%s> type' %
setting['type'])
# create a instance of the class, without the type attribute
del setting['type']
str_settings = {}
for key, item in setting.items():
str_settings[str(key)] = item
instance = cls(panel=panel, **str_settings)
# instance created, add to the panel
panel.add_widget(instance)
return panel
def add_kivy_panel(self):
'''Add a panel for configuring Kivy. This panel acts directly on the
kivy configuration. Feel free to include or exclude it in your
configuration.
See :meth:`~kivy.app.App.use_kivy_settings` for information on
enabling/disabling the automatic kivy panel.
'''
from kivy import kivy_data_dir
from kivy.config import Config
from os.path import join
self.add_json_panel('Kivy', Config,
join(kivy_data_dir, 'settings_kivy.json'))
class SettingsWithSidebar(Settings):
'''A settings widget that displays settings panels with a sidebar to
switch between them. This is the default behaviour of
:class:`Settings`, and this widget is a trivial wrapper subclass.
'''
class SettingsWithSpinner(Settings):
'''A settings widget that displays one settings panel at a time with a
spinner at the top to switch between them.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithSpinner
super(SettingsWithSpinner, self).__init__(*args, **kwargs)
class SettingsWithTabbedPanel(Settings):
'''A settings widget that displays settings panels as pages in a
:class:`~kivy.uix.tabbedpanel.TabbedPanel`.
'''
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithTabbedPanel
super(SettingsWithTabbedPanel, self).__init__(*args, **kwargs)
def on_close(self, *args):
pass
class SettingsWithNoMenu(Settings):
'''A settings widget that displays a single settings panel with *no*
Close button. It will not accept more than one Settings panel. It
is intended for use in programs with few enough settings that a
full panel switcher is not useful.
.. warning::
This Settings panel does *not* provide a Close
button, and so it is impossible to leave the settings screen
unless you also add other behaviour or override
:meth:`~kivy.app.App.display_settings` and
:meth:`~kivy.app.App.close_settings`.
'''
def __init__(self, *args, **kwargs):
self.interface_cls = InterfaceWithNoMenu
super(SettingsWithNoMenu, self).__init__(*args, **kwargs)
class InterfaceWithNoMenu(ContentPanel):
'''The interface widget used by :class:`SettingsWithNoMenu`. It
stores and displays a single settings panel.
This widget is considered internal and is not documented. See the
:class:`ContentPanel` for information on defining your own content
widget.
'''
def add_widget(self, widget):
if self.container is not None and len(self.container.children) > 0:
raise Exception(
'ContentNoMenu cannot accept more than one settings panel')
super(InterfaceWithNoMenu, self).add_widget(widget)
class InterfaceWithTabbedPanel(FloatLayout):
'''The content widget used by :class:`SettingsWithTabbedPanel`. It
stores and displays Settings panels in tabs of a TabbedPanel.
This widget is considered internal and is not documented. See
:class:`InterfaceWithSidebar` for information on defining your own
interface widget.
'''
tabbedpanel = ObjectProperty()
close_button = ObjectProperty()
__events__ = ('on_close', )
def __init__(self, *args, **kwargs):
super(InterfaceWithTabbedPanel, self).__init__(*args, **kwargs)
self.close_button.bind(on_release=lambda j: self.dispatch('on_close'))
def add_panel(self, panel, name, uid):
scrollview = ScrollView()
scrollview.add_widget(panel)
if not self.tabbedpanel.default_tab_content:
self.tabbedpanel.default_tab_text = name
self.tabbedpanel.default_tab_content = scrollview
else:
panelitem = TabbedPanelHeader(text=name, content=scrollview)
self.tabbedpanel.add_widget(panelitem)
def on_close(self, *args):
pass
class MenuSpinner(BoxLayout):
'''The menu class used by :class:`SettingsWithSpinner`. It provides a
sidebar with an entry for each settings panel.
This widget is considered internal and is not documented. See
:class:`MenuSidebar` for information on menus and creating your own menu
class.
'''
selected_uid = NumericProperty(0)
close_button = ObjectProperty(0)
spinner = ObjectProperty()
panel_names = DictProperty({})
spinner_text = StringProperty()
close_button = ObjectProperty()
def add_item(self, name, uid):
values = self.spinner.values
if name in values:
i = 2
while name + ' {}'.format(i) in values:
i += 1
name = name + ' {}'.format(i)
self.panel_names[name] = uid
self.spinner.values.append(name)
if not self.spinner.text:
self.spinner.text = name
def on_spinner_text(self, *args):
text = self.spinner_text
self.selected_uid = self.panel_names[text]
class MenuSidebar(FloatLayout):
'''The menu used by :class:`InterfaceWithSidebar`. It provides a
sidebar with an entry for each settings panel, which the user may
click to select.
'''
selected_uid = NumericProperty(0)
'''The uid of the currently selected panel. This may be used to switch
between displayed panels, e.g. by binding it to the
:attr:`~ContentPanel.current_uid` of a :class:`ContentPanel`.
:attr:`selected_uid` is a
:class`~kivy.properties.NumericProperty` and defaults to 0.
'''
buttons_layout = ObjectProperty(None)
'''(internal) Reference to the GridLayout that contains individual
settings panel menu buttons.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
close_button = ObjectProperty(None)
'''(internal) Reference to the widget's Close button.
:attr:`buttons_layout` is an
:class:`~kivy.properties.ObjectProperty` and defaults to None.
'''
def add_item(self, name, uid):
'''This method is used to add new panels to the menu.
:param name: The name (a string) of the panel. It should be
used to represent the panel in the menu.
:param uid: The name (an int) of the panel. It should be used
internally to represent the panel and used to set
self.selected_uid when the panel is changed.
'''
label = SettingSidebarLabel(text=name, uid=uid, menu=self)
if len(self.buttons_layout.children) == 0:
label.selected = True
if self.buttons_layout is not None:
self.buttons_layout.add_widget(label)
def on_selected_uid(self, *args):
'''(internal) unselects any currently selected menu buttons, unless
they represent the current panel.
'''
for button in self.buttons_layout.children:
if button.uid != self.selected_uid:
button.selected = False
class SettingSidebarLabel(Label):
# Internal class, not documented.
selected = BooleanProperty(False)
uid = NumericProperty(0)
menu = ObjectProperty(None)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
self.selected = True
self.menu.selected_uid = self.uid
if __name__ == '__main__':
from kivy.app import App
class SettingsApp(App):
def build(self):
s = Settings()
s.add_kivy_panel()
s.bind(on_close=self.stop)
return s
SettingsApp().run()
|
jffernandez/kivy
|
kivy/uix/settings.py
|
Python
|
mit
| 41,333
|
#!flask/bin/python
import imp
from migrate.versioning import api
from main import db
from main import SQLALCHEMY_DATABASE_URI
from main import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
YinYang-Coders/Campus-Canteen
|
db_migrate.py
|
Python
|
mit
| 934
|
#!/usr/bin/env python3
import cmd
import json
import argparse
import requests
def grab_CLA():
"""
Return the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, required=False,
help='Host to connect to.')
parser.add_argument('--port', type=int, required=False,
help='Port of host.')
parsed_args = parser.parse_args()
return parsed_args
class TestCMD(cmd.Cmd):
"""
Send commands to the server.
"""
def __init__(self, args):
cmd.Cmd.__init__(self)
self.prompt = '>> '
self.intro = 'Welcome to fem-gl command line interface version 1.'
self.headers = {'user-agent': 'fem-gl/1--alpha'}
self.host = args.host
self.port = args.port
def post_prototype(self, api_call, data=None):
"""
Prototype for sending a post request with error handling and so on.
"""
try:
url = 'http://{}:{}/{}'.format(self.host, self.port, api_call)
response = requests.post(
url=url,
json=data,
timeout=3.5,
headers=self.headers
)
response.raise_for_status()
except BaseException as e:
return '{}'.format(e)
return response.json()
def do_hi(self, line):
"""
Provoke a request by the server.
"""
api_call = 'request_something'
data = {'argument': 'noooo'}
answer = self.post_prototype(api_call=api_call, data=data)
print(answer)
print(json.loads(answer)['answer'])
def do_append(self, line):
api_call = 'append_to_stack'
data = ''
answer = self.post_prototype(api_call=api_call, data=data)
print(answer)
def do_pop(self, line):
api_call = 'pop_from_stack'
data = ''
answer = self.post_prototype(api_call=api_call, data=data)
print(answer)
def do_exit(self, line):
"""
Exit the CLI.
"""
print('Bye.')
return -1
def do_quit(self, line):
"""
Exit alias.
"""
return(self.do_exit(line))
if __name__ == '__main__':
"""
Start.
"""
ARGS = grab_CLA()
CLI = TestCMD(args=ARGS)
CLI.cmdloop()
|
Klump3n/fem-gl
|
fem-gl-cmd.py
|
Python
|
gpl-3.0
| 2,382
|
"""
Tests that deepchem models make deterministic predictions.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
class TestPredict(test_util.TensorFlowTestCase):
"""
Test that models make deterministic predictions
These tests guard against failures like having dropout turned on at
test time.
"""
def setUp(self):
super(TestPredict, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_tf_progressive_regression_predict(self):
"""Test tf progressive multitask makes deterministic predictions."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[.25],
learning_rate=0.003,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Check same predictions are made.
y_pred_first = model.predict(dataset)
y_pred_second = model.predict(dataset)
np.testing.assert_allclose(y_pred_first, y_pred_second)
|
joegomes/deepchem
|
deepchem/models/tests/test_predict.py
|
Python
|
mit
| 2,023
|
#
# Copyright 2017-2020 Manuel Barrette
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
""" Initialize windows and make the main window appear """
import sys
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow, QDialog
import onde_stationnaire_main_window
import dialog_onde
import platform
import locale, ctypes
# Initialize windows
app = QApplication(sys.argv)
window_Onde = QMainWindow()
dialog = QDialog()
systeme_exploitation = platform.system()
if systeme_exploitation == 'Windows':
langwin = ctypes.windll.kernel32
langue_sys = locale.windows_locale[langwin.GetUserDefaultUILanguage()]
elif systeme_exploitation == 'Darwin' or 'Linux':
langue_sys = locale.getdefaultlocale()[0]
langue_sys = langue_sys[0:2]
translator = QtCore.QTranslator()
if langue_sys == "fr":
langue = "fr_CA"
else:
langue = "en_CA"
translator.load(langue)
app.installTranslator(translator)
ui_Onde_Sonore_Stat = onde_stationnaire_main_window.Ui_MainWindow()
ui_Dial = dialog_onde.Ui_Dialog()
ui_Dial.setupUi(dialog)
ui_Onde_Sonore_Stat.setupUi(window_Onde, dialog, None)
# Make main window appear
window_Onde.show()
sys.exit(app.exec_())
|
Pattedetable/onde-sonore-stationnaire
|
onde_son_stat.py
|
Python
|
gpl-3.0
| 1,760
|
import sys
__all__ = ['TestCase']
if sys.version_info[:2] == (2, 6):
import contextlib
import collections
from unittest import TestCase as BaseTestCase
class SkipException(Exception):
pass
class ExceptionContext(object):
def __init__(self):
self.exception = None
class TestCase(BaseTestCase):
def assertIs(self, a, b):
self.assertTrue(a is b)
def assertIsNot(self, a, b):
self.assertTrue(a is not b)
def assertIsNone(self, a):
self.assertTrue(a is None)
def assertIsNotNone(self, a):
self.assertTrue(a is not None)
def assertIn(self, a, b):
self.assertTrue(a in b)
def assertNotIn(self, a, b):
self.assertTrue(a not in b)
def assertIsInstance(self, a, b):
self.assertTrue(isinstance(a, b))
def assertNotIsInstance(self, a, b):
self.assertTrue(not isinstance(a, b))
def assertSequenceEqual(self, a, b, msg=None, seq_type=None):
return self.assertEqual(tuple(a), tuple(b), msg=msg)
def assertMultiLineEqual(self, a, b, msg=None):
return self.assertEqual(a, b, msg=msg)
@contextlib.contextmanager
def failUnlessRaises(self, error, *args):
context = ExceptionContext()
if len(args) == 0:
try:
yield context
except error as exception:
context.exception = exception
else:
self.fail('{0} was not raised'.format(error))
else:
super(TestCase, self).failUnlessRaises(error, *args)
assertRaises = failUnlessRaises
def run(self, result=None):
BaseTestCase.run(self, result)
if result is not None:
errors = result.errors
skip_error = (
'in skipTest\n raise SkipException(msg)'
'\nSkipException:')
result.errors = []
for error in errors:
if skip_error in error[1]:
print ('Skipped')
else:
result.errors.append(error)
return result
def skipTest(self, msg):
raise SkipException(msg)
else:
from unittest import TestCase
|
b-jesch/service.fritzbox.callmonitor
|
resources/lib/PhoneBooks/pyicloud/vendorlibs/win32ctypes/tests/compat.py
|
Python
|
gpl-2.0
| 2,422
|
#!/usr/bin/env python2
'''
You should normally never use this! Use emcc instead.
This is a small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
'''
import os, sys, json, optparse, subprocess, re, time, multiprocessing, string, logging
from tools import shared
from tools import jsrun, cache as cache_module, tempfiles
from tools.response_file import read_response_file
from tools.shared import WINDOWS
__rootpath__ = os.path.abspath(os.path.dirname(__file__))
def path_from_root(*pathelems):
"""Returns the absolute path for which the given path elements are
relative to the emscripten root.
"""
return os.path.join(__rootpath__, *pathelems)
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def scan(ll, settings):
# blockaddress(@main, %23)
blockaddrs = []
for blockaddr in re.findall('blockaddress\([^)]*\)', ll):
b = blockaddr.split('(')[1][:-1].split(', ')
blockaddrs.append(b)
if len(blockaddrs) > 0:
settings['NECESSARY_BLOCKADDRS'] = blockaddrs
NUM_CHUNKS_PER_CORE = 1.0
MIN_CHUNK_SIZE = 1024*1024
MAX_CHUNK_SIZE = float(os.environ.get('EMSCRIPT_MAX_CHUNK_SIZE') or 'inf') # configuring this is just for debugging purposes
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logging.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def process_funcs((i, funcs_file, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine, DEBUG)):
try:
#print >> sys.stderr, 'running', str([settings_file, funcs_file, 'funcs', forwarded_file] + libraries).replace("'/", "'") # can use this in src/compiler_funcs.html arguments,
# # just copy temp dir to under this one
out = jsrun.run_js(
compiler,
engine=compiler_engine,
args=[settings_file, funcs_file, 'funcs', forwarded_file] + libraries,
stdout=subprocess.PIPE,
stderr=STDERR_FILE,
cwd=path_from_root('src'))
except KeyboardInterrupt:
# Python 2.7 seems to lock up when a child process throws KeyboardInterrupt
raise Exception()
if DEBUG: logging.debug('.')
return out
def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible
Args:
infile: The path to the input LLVM assembly file.
settings: JSON-formatted settings that override the values
defined in src/settings.js.
outfile: The file where the output is written.
"""
compiler = path_from_root('src', 'compiler.js')
# Parallelization: We run 3 phases:
# 1 aka 'pre' : Process types and metadata and so forth, and generate the preamble.
# 2 aka 'funcs': Process functions. We can parallelize this, working on each function independently.
# 3 aka 'post' : Process globals, generate postamble and finishing touches.
if DEBUG: logging.debug('emscript: ll=>js')
if jcache: jcache.ensure()
# Pre-scan ll and alter settings as necessary
if DEBUG: t = time.time()
ll = open(infile).read()
scan(ll, settings)
total_ll_size = len(ll)
if DEBUG: logging.debug(' emscript: scan took %s seconds' % (time.time() - t))
# Split input into the relevant parts for each phase
if DEBUG: t = time.time()
pre = []
funcs = [] # split up functions here, for parallelism later
meta_start = ll.find('\n!')
if meta_start > 0:
meta = ll[meta_start:]
else:
meta = ''
meta_start = -1
start = ll.find('\n') if ll[0] == ';' else 0 # ignore first line, which contains ; ModuleID = '/dir name'
func_start = start
last = func_start
while 1:
last = func_start
func_start = ll.find('\ndefine ', func_start)
if func_start > last:
pre.append(ll[last:min(func_start+1, meta_start) if meta_start > 0 else func_start+1] + '\n')
if func_start < 0:
pre.append(ll[last:meta_start] + '\n')
break
header = ll[func_start+1:ll.find('\n', func_start+1)+1]
end = ll.find('\n}', func_start)
last = end+3
funcs.append((header, ll[func_start+1:last]))
pre.append(header + '}\n')
func_start = last
ll = None
if DEBUG and len(meta) > 1024*1024: logging.debug('emscript warning: large amounts of metadata, will slow things down')
if DEBUG: logging.debug(' emscript: split took %s seconds' % (time.time() - t))
if len(funcs) == 0:
logging.error('No functions to process. Make sure you prevented LLVM from eliminating them as dead (use EXPORTED_FUNCTIONS if necessary, see the FAQ)')
#if DEBUG:
# logging.debug('========= pre ================\n')
# logging.debug(''.join(pre))
# logging.debug('========== funcs ===============\n')
# for func in funcs:
# logging.debug('\n// ===\n\n', ''.join(func))
# logging.debug('=========================\n')
# Save settings to a file to work around v8 issue 1579
settings_file = temp_files.get('.txt').name
def save_settings():
global settings_text
settings_text = json.dumps(settings, sort_keys=True)
s = open(settings_file, 'w')
s.write(settings_text)
s.close()
save_settings()
# Phase 1 - pre
if DEBUG: t = time.time()
pre_file = temp_files.get('.pre.ll').name
pre_input = ''.join(pre) + '\n' + meta
out = None
if jcache:
keys = [pre_input, settings_text, ','.join(libraries)]
shortkey = jcache.get_shortkey(keys)
if DEBUG_CACHE: logging.debug('shortkey', shortkey)
out = jcache.get(shortkey, keys)
if DEBUG_CACHE and not out:
dfpath = os.path.join(get_configuration().TEMP_DIR, "ems_" + shortkey)
dfp = open(dfpath, 'w')
dfp.write(pre_input)
dfp.write("\n\n========================== settings_text\n\n")
dfp.write(settings_text)
dfp.write("\n\n========================== libraries\n\n")
dfp.write("\n".join(libraries))
dfp.close()
logging.debug(' cache miss, key data dumped to %s' % dfpath)
if out and DEBUG: logging.debug(' loading pre from jcache')
if not out:
open(pre_file, 'w').write(pre_input)
#print >> sys.stderr, 'running', str([settings_file, pre_file, 'pre'] + libraries).replace("'/", "'") # see funcs
out = jsrun.run_js(compiler, compiler_engine, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'))
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
if jcache:
if DEBUG: logging.debug(' saving pre to jcache')
jcache.set(shortkey, keys, out)
pre, forwarded_data = out.split('//FORWARDED_DATA:')
forwarded_file = temp_files.get('.json').name
pre_input = None
open(forwarded_file, 'w').write(forwarded_data)
if DEBUG: logging.debug(' emscript: phase 1 took %s seconds' % (time.time() - t))
indexed_functions = set()
forwarded_json = json.loads(forwarded_data)
for key in forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)
# Phase 2 - func
cores = int(os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())
assert cores >= 1
if cores > 1:
intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
chunk_size = max(MIN_CHUNK_SIZE, total_ll_size / intended_num_chunks)
chunk_size += 3*len(meta) # keep ratio of lots of function code to meta (expensive to process, and done in each parallel task)
chunk_size = min(MAX_CHUNK_SIZE, chunk_size)
else:
chunk_size = MAX_CHUNK_SIZE # if 1 core, just use the max chunk size
if DEBUG: t = time.time()
if settings.get('ASM_JS'):
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
save_settings()
chunks = cache_module.chunkify(
funcs, chunk_size,
jcache.get_cachename('emscript_files') if jcache else None)
#sys.exit(1)
#chunks = [chunks[0]] # pick specific chunks for debugging/profiling
funcs = None
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
cached_outputs = []
def load_from_cache(chunk):
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys) # TODO: share shortkeys with later code
out = jcache.get(shortkey, keys) # this is relatively expensive (pickling?)
if out:
cached_outputs.append(out)
return False
return True
chunks = filter(load_from_cache, chunks)
if len(cached_outputs) > 0:
if out and DEBUG: logging.debug(' loading %d funcchunks from jcache' % len(cached_outputs))
else:
cached_outputs = []
# TODO: minimize size of forwarded data from funcs to what we actually need
if len(chunks) > 0:
if cores == 1 and total_ll_size < MAX_CHUNK_SIZE:
assert len(chunks) == 1, 'no point in splitting up without multiple cores'
if DEBUG: logging.debug(' emscript: phase 2 working on %d chunks %s (intended chunk size: %.2f MB, meta: %.2f MB, forwarded: %.2f MB, total: %.2f MB)' % (len(chunks), ('using %d cores' % cores) if len(chunks) > 1 else '', chunk_size/(1024*1024.), len(meta)/(1024*1024.), len(forwarded_data)/(1024*1024.), total_ll_size/(1024*1024.)))
commands = []
for i in range(len(chunks)):
funcs_file = temp_files.get('.func_%d.ll' % i).name
f = open(funcs_file, 'w')
f.write(chunks[i])
if not jcache:
chunks[i] = None # leave chunks array alive (need its length later)
f.write('\n')
f.write(meta)
f.close()
commands.append(
(i, funcs_file, meta, settings_file, compiler, forwarded_file, libraries, compiler_engine,# + ['--prof'],
DEBUG)
)
if len(chunks) > 1:
pool = multiprocessing.Pool(processes=cores)
outputs = pool.map(process_funcs, commands, chunksize=1)
elif len(chunks) == 1:
outputs = [process_funcs(commands[0])]
commands = None
else:
outputs = []
if jcache:
# save chunks to cache
for i in range(len(chunks)):
chunk = chunks[i]
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys)
jcache.set(shortkey, keys, outputs[i])
if out and DEBUG and len(chunks) > 0: logging.debug(' saving %d funcchunks to jcache' % len(chunks))
chunks = None
if jcache: outputs += cached_outputs # TODO: preserve order
outputs = [output.split('//FORWARDED_DATA:') for output in outputs]
for output in outputs:
assert len(output) == 2, 'Did not receive forwarded data in an output - process failed? We only got: ' + output[0][-3000:]
if DEBUG: logging.debug(' emscript: phase 2 took %s seconds' % (time.time() - t))
if DEBUG: t = time.time()
# merge forwarded data
if settings.get('ASM_JS'):
all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
for additional_export in settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']: # additional functions to export from asm, if they are implemented
all_exported_functions.add('_' + additional_export)
exported_implemented_functions = set()
for func_js, curr_forwarded_data in outputs:
curr_forwarded_json = json.loads(curr_forwarded_data)
forwarded_json['Types']['hasInlineJS'] = forwarded_json['Types']['hasInlineJS'] or curr_forwarded_json['Types']['hasInlineJS']
forwarded_json['Types']['usesSIMD'] = forwarded_json['Types']['usesSIMD'] or curr_forwarded_json['Types']['usesSIMD']
forwarded_json['Types']['preciseI64MathUsed'] = forwarded_json['Types']['preciseI64MathUsed'] or curr_forwarded_json['Types']['preciseI64MathUsed']
for key, value in curr_forwarded_json['Functions']['blockAddresses'].iteritems():
forwarded_json['Functions']['blockAddresses'][key] = value
for key in curr_forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)
if settings.get('ASM_JS'):
export_bindings = settings['EXPORT_BINDINGS']
export_all = settings['EXPORT_ALL']
for key in curr_forwarded_json['Functions']['implementedFunctions'].iterkeys():
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
for key, value in curr_forwarded_json['Functions']['unimplementedFunctions'].iteritems():
forwarded_json['Functions']['unimplementedFunctions'][key] = value
for key, value in curr_forwarded_json['Functions']['neededTables'].iteritems():
forwarded_json['Functions']['neededTables'][key] = value
if settings.get('ASM_JS'):
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
outputs.append([parts[1]])
funcs_js = [output[0] for output in outputs]
outputs = None
if DEBUG: logging.debug(' emscript: phase 2b took %s seconds' % (time.time() - t))
if DEBUG: t = time.time()
# calculations on merged forwarded data
forwarded_json['Functions']['indexedFunctions'] = {}
i = settings['FUNCTION_POINTER_ALIGNMENT'] # universal counter
if settings['ASM_JS']: i += settings['RESERVED_FUNCTION_POINTERS']*settings['FUNCTION_POINTER_ALIGNMENT']
base_fp = i
table_counters = {} # table-specific counters
alias = settings['ASM_JS'] and settings['ALIASING_FUNCTION_POINTERS']
sig = None
for indexed in indexed_functions:
if alias:
sig = forwarded_json['Functions']['implementedFunctions'].get(indexed) or forwarded_json['Functions']['unimplementedFunctions'].get(indexed)
assert sig, indexed
if sig not in table_counters:
table_counters[sig] = base_fp
curr = table_counters[sig]
table_counters[sig] += settings['FUNCTION_POINTER_ALIGNMENT']
else:
curr = i
i += settings['FUNCTION_POINTER_ALIGNMENT']
#logging.debug('function indexing ' + str([indexed, curr, sig]))
forwarded_json['Functions']['indexedFunctions'][indexed] = curr # make sure not to modify this python object later - we use it in indexize
def split_32(x):
x = int(x)
return '%d,%d,%d,%d' % (x&255, (x >> 8)&255, (x >> 16)&255, (x >> 24)&255)
indexing = forwarded_json['Functions']['indexedFunctions']
def indexize_mem(js):
return re.sub(r"\"?'?{{ FI_([\w\d_$]+) }}'?\"?,0,0,0", lambda m: split_32(indexing.get(m.groups(0)[0]) or 0), js)
def indexize(js):
return re.sub(r"'{{ FI_([\w\d_$]+) }}'", lambda m: str(indexing.get(m.groups(0)[0]) or 0), js)
blockaddrs = forwarded_json['Functions']['blockAddresses']
def blockaddrsize_mem(js):
return re.sub(r'"?{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}"?,0,0,0', lambda m: split_32(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)
def blockaddrsize(js):
return re.sub(r'"?{{{ BA_([\w\d_$]+)\|([\w\d_$]+) }}}"?', lambda m: str(blockaddrs[m.groups(0)[0]][m.groups(0)[1]]), js)
pre = blockaddrsize(blockaddrsize_mem(indexize(indexize_mem(pre))))
if settings.get('ASM_JS'):
# move postsets into the asm module
class PostSets: js = ''
def handle_post_sets(m):
PostSets.js = m.group(0)
return '\n'
pre = re.sub(r'function runPostSets[^}]+}', handle_post_sets, pre)
#if DEBUG: outfile.write('// pre\n')
outfile.write(pre)
pre = None
#if DEBUG: outfile.write('// funcs\n')
# forward
forwarded_data = json.dumps(forwarded_json)
forwarded_file = temp_files.get('.2.json').name
open(forwarded_file, 'w').write(indexize(forwarded_data))
if DEBUG: logging.debug(' emscript: phase 2c took %s seconds' % (time.time() - t))
# Phase 3 - post
if DEBUG: t = time.time()
post_file = temp_files.get('.post.ll').name
open(post_file, 'w').write('\n') # no input, just processing of forwarded data
out = jsrun.run_js(compiler, compiler_engine, [settings_file, post_file, 'post', forwarded_file] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'))
post, last_forwarded_data = out.split('//FORWARDED_DATA:') # if this fails, perhaps the process failed prior to printing forwarded data?
last_forwarded_json = json.loads(last_forwarded_data)
if settings.get('ASM_JS'):
post_funcs, post_rest = post.split('// EMSCRIPTEN_END_FUNCS\n')
post = post_rest
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
post_funcs = re.sub(r'/\* PRE_ASM \*/(.*)\n', lambda m: move_preasm(m), post_funcs)
funcs_js += ['\n' + post_funcs + '// EMSCRIPTEN_END_FUNCS\n']
simple = os.environ.get('EMCC_SIMPLE_ASM')
class Counter:
i = 0
j = 0
pre_tables = last_forwarded_json['Functions']['tables']['pre']
del last_forwarded_json['Functions']['tables']['pre']
def make_table(sig, raw):
i = Counter.i
Counter.i += 1
bad = 'b' + str(i)
params = ','.join(['p%d' % p for p in range(len(sig)-1)])
coercions = ';'.join(['p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p+1], settings)) for p in range(len(sig)-1)]) + ';'
ret = '' if sig[0] == 'v' else ('return %s' % shared.JS.make_initializer(sig[0], settings))
start = raw.index('[')
end = raw.rindex(']')
body = raw[start+1:end].split(',')
for j in range(settings['RESERVED_FUNCTION_POINTERS']):
body[settings['FUNCTION_POINTER_ALIGNMENT'] * (1 + j)] = 'jsCall_%s_%s' % (sig, j)
Counter.j = 0
def fix_item(item):
Counter.j += 1
newline = Counter.j % 30 == 29
if item == '0': return bad if not newline else (bad + '\n')
return item if not newline else (item + '\n')
body = ','.join(map(fix_item, body))
return ('function %s(%s) { %s %s(%d); %s }' % (bad, params, coercions, 'abort' if not settings['ASSERTIONS'] else 'nullFunc', i, ret), ''.join([raw[:start+1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n// EMSCRIPTEN_END_FUNCS\n' + '\n'.join([info[1] for info in infos])
asm_setup = ''
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul']]
fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array']
if settings['ALLOW_MEMORY_GROWTH']: fundamentals.append('byteLength')
math_envs = ['Math.min'] # TODO: move min to maths
asm_setup += '\n'.join(['var %s = %s;' % (f.replace('.', '_'), f) for f in math_envs])
if settings['PRECISE_F32']: maths += ['Math.fround']
basic_funcs = ['abort', 'assert', 'asmPrintInt', 'asmPrintFloat'] + [m.replace('.', '_') for m in math_envs]
if settings['RESERVED_FUNCTION_POINTERS'] > 0: basic_funcs.append('jsCall')
if settings['SAFE_HEAP']: basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_FT_MASK']
if settings['CHECK_HEAP_ALIGN']: basic_funcs += ['CHECK_ALIGN_2', 'CHECK_ALIGN_4', 'CHECK_ALIGN_8']
if settings['ASSERTIONS']:
basic_funcs += ['nullFunc']
asm_setup += 'function nullFunc(x) { Module["printErr"]("Invalid function pointer called. Perhaps a miscast function pointer (check compilation warnings) or bad vtable lookup (maybe due to derefing a bad pointer, like NULL)?"); abort(x) }\n'
basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
basic_float_vars = ['NaN', 'Infinity']
if forwarded_json['Types']['preciseI64MathUsed'] or \
forwarded_json['Functions']['libraryFunctions'].get('_llvm_cttz_i32') or \
forwarded_json['Functions']['libraryFunctions'].get('_llvm_ctlz_i32'):
basic_vars += ['cttz_i8', 'ctlz_i8']
if settings.get('DLOPEN_SUPPORT'):
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_vars.append('F_BASE_%s' % sig)
asm_setup += ' var F_BASE_%s = %s;\n' % (sig, 'FUNCTION_TABLE_OFFSET' if settings.get('SIDE_MODULE') else '0') + '\n'
asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew'] + ['setTempRet%d' % i for i in range(10)] + ['getTempRet%d' % i for i in range(10)]
# function tables
function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
function_tables_impls = []
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i], settings) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings) for i in range(1, len(sig))])
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0], settings)
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
for i in range(settings['RESERVED_FUNCTION_POINTERS']):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall(%d%s%s)' % (i, ',' if coerced_args else '', coerced_args), sig[0], settings)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
shared.Settings.copy(settings)
asm_setup += '\n' + shared.JS.make_invoke(sig) + '\n'
basic_funcs.append('invoke_%s' % sig)
if settings.get('DLOPEN_SUPPORT'):
asm_setup += '\n' + shared.JS.make_extcall(sig) + '\n'
basic_funcs.append('extCall_%s' % sig)
# calculate exports
exported_implemented_functions = list(exported_implemented_functions)
exported_implemented_functions.append('runPostSets')
exports = []
if not simple:
for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
exports.append("%s: %s" % (export, export))
exports = '{ ' + ', '.join(exports) + ' }'
else:
exports = '_main'
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except:
pass
# If no named globals, only need externals
global_vars = map(lambda g: g['name'], filter(lambda g: settings['NAMED_GLOBALS'] or g.get('external') or g.get('unIndexable'), forwarded_json['Variables']['globals'].values()))
global_funcs = [key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global.' + g + ';\n' for g in maths]) + \
''.join([' var ' + g + '=env.' + math_fix(g) + ';\n' for g in basic_funcs + global_funcs])
asm_global_vars = ''.join([' var ' + g + '=env.' + g + '|0;\n' for g in basic_vars + global_vars]) + \
''.join([' var ' + g + '=+env.' + g + ';\n' for g in basic_float_vars])
# In linkable modules, we need to add some explicit globals for global variables that can be linked and used across modules
if settings.get('MAIN_MODULE') or settings.get('SIDE_MODULE'):
assert settings.get('TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'), 'TODO: support x86 target when linking modules (needs offset of 4 and not 8 here)'
for key, value in forwarded_json['Variables']['globals'].iteritems():
if value.get('linkable'):
init = forwarded_json['Variables']['indexedGlobals'][key] + 8 # 8 is Runtime.GLOBAL_BASE / STATIC_BASE
if settings.get('SIDE_MODULE'): init = '(H_BASE+' + str(init) + ')|0'
asm_global_vars += ' var %s=%s;\n' % (key, str(init))
# sent data
the_global = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
sending = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
# received
if not simple:
receiving = ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"]' for s in exported_implemented_functions + function_tables])
else:
receiving = 'var _main = Module["_main"] = asm;'
# finalize
if DEBUG: logging.debug('asm text sizes' + str([map(len, funcs_js), len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables), len('\n'.join(function_tables_impls)), len(function_tables_defs.replace('\n', '\n ')), len(exports), len(the_global), len(sending), len(receiving)]))
funcs_js = ['''
%s
function asmPrintInt(x, y) {
Module.print('int ' + x + ',' + y);// + ' ' + new Error().stack);
}
function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
var asm = (function(global, env, buffer) {
%s
%s
''' % (asm_setup, "'use asm';" if not forwarded_json['Types']['hasInlineJS'] and not settings['SIDE_MODULE'] and settings['ASM_JS'] == 1 else "'almost asm';",
'''
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
var HEAP32 = new global.Int32Array(buffer);
var HEAPU8 = new global.Uint8Array(buffer);
var HEAPU16 = new global.Uint16Array(buffer);
var HEAPU32 = new global.Uint32Array(buffer);
var HEAPF32 = new global.Float32Array(buffer);
var HEAPF64 = new global.Float64Array(buffer);
''' if not settings['ALLOW_MEMORY_GROWTH'] else '''
var Int8View = global.Int8Array;
var Int16View = global.Int16Array;
var Int32View = global.Int32Array;
var Uint8View = global.Uint8Array;
var Uint16View = global.Uint16Array;
var Uint32View = global.Uint32Array;
var Float32View = global.Float32Array;
var Float64View = global.Float64Array;
var HEAP8 = new Int8View(buffer);
var HEAP16 = new Int16View(buffer);
var HEAP32 = new Int32View(buffer);
var HEAPU8 = new Uint8View(buffer);
var HEAPU16 = new Uint16View(buffer);
var HEAPU32 = new Uint32View(buffer);
var HEAPF32 = new Float32View(buffer);
var HEAPF64 = new Float64View(buffer);
var byteLength = global.byteLength;
''') + '\n' + asm_global_vars + '''
var __THREW__ = 0;
var threwValue = 0;
var setjmpId = 0;
var undef = 0;
var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' + ''.join(['''
var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs] + [' var tempFloat = %s;\n' % ('Math_fround(0)' if settings.get('PRECISE_F32') else '0.0')] + ([' const f0 = Math_fround(0);\n'] if settings.get('PRECISE_F32') else []) + ['''
// EMSCRIPTEN_START_FUNCS
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
''' + ('STACKTOP = (STACKTOP + 3)&-4;' if settings['TARGET_X86'] else 'STACKTOP = (STACKTOP + 7)&-8;') + '''
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function setThrew(threw, value) {
threw = threw|0;
value = value|0;
if ((__THREW__|0) == 0) {
__THREW__ = threw;
threwValue = value;
}
}
function copyTempFloat(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
}
function copyTempDouble(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
HEAP8[tempDoublePtr+4>>0] = HEAP8[ptr+4>>0];
HEAP8[tempDoublePtr+5>>0] = HEAP8[ptr+5>>0];
HEAP8[tempDoublePtr+6>>0] = HEAP8[ptr+6>>0];
HEAP8[tempDoublePtr+7>>0] = HEAP8[ptr+7>>0];
}
''' + ''.join(['''
function setTempRet%d(value) {
value = value|0;
tempRet%d = value;
}
''' % (i, i) for i in range(10)]) + ''.join(['''
function getTempRet%d() {
return tempRet%d|0;
}
''' % (i, i) for i in range(10)])] + [PostSets.js + '\n'] + funcs_js + ['''
%s
return %s;
})
// EMSCRIPTEN_END_ASM
(%s, %s, buffer);
%s;
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs.replace('\n', '\n '), exports, the_global, sending, receiving)]
if not settings.get('SIDE_MODULE'):
funcs_js.append('''
Runtime.stackAlloc = asm['stackAlloc'];
Runtime.stackSave = asm['stackSave'];
Runtime.stackRestore = asm['stackRestore'];
Runtime.setTempRet0 = asm['setTempRet0'];
Runtime.getTempRet0 = asm['getTempRet0'];
''')
# Set function table masks
masks = {}
max_mask = 0
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
mask = table.count(',')
masks[sig] = str(mask)
max_mask = max(mask, max_mask)
def function_table_maskize(js, masks):
def fix(m):
sig = m.groups(0)[0]
return masks[sig]
return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
funcs_js = map(lambda js: function_table_maskize(js, masks), funcs_js)
if settings.get('DLOPEN_SUPPORT'):
funcs_js.append('''
asm.maxFunctionIndex = %(max_mask)d;
DLFCN.registerFunctions(asm, %(max_mask)d+1, %(sigs)s, Module);
Module.SYMBOL_TABLE = SYMBOL_TABLE;
''' % { 'max_mask': max_mask, 'sigs': str(map(str, last_forwarded_json['Functions']['tables'].keys())) })
else:
function_tables_defs = '\n'.join([table for table in last_forwarded_json['Functions']['tables'].itervalues()])
outfile.write(function_tables_defs)
funcs_js = ['''
// EMSCRIPTEN_START_FUNCS
'''] + funcs_js + ['''
// EMSCRIPTEN_END_FUNCS
''']
# Create symbol table for self-dlopen
if settings.get('DLOPEN_SUPPORT'):
symbol_table = {}
for k, v in forwarded_json['Variables']['indexedGlobals'].iteritems():
if forwarded_json['Variables']['globals'][k]['named']:
symbol_table[k] = str(v + forwarded_json['Runtime']['GLOBAL_BASE'])
for raw in last_forwarded_json['Functions']['tables'].itervalues():
if raw == '': continue
table = map(string.strip, raw[raw.find('[')+1:raw.find(']')].split(","))
for i in range(len(table)):
value = table[i]
if value != '0':
if settings.get('SIDE_MODULE'):
symbol_table[value] = 'FUNCTION_TABLE_OFFSET+' + str(i)
else:
symbol_table[value] = str(i)
outfile.write("var SYMBOL_TABLE = %s;" % json.dumps(symbol_table).replace('"', ''))
for i in range(len(funcs_js)): # do this loop carefully to save memory
funcs_js_item = funcs_js[i]
funcs_js[i] = None
funcs_js_item = indexize(funcs_js_item)
funcs_js_item = blockaddrsize(funcs_js_item)
if WINDOWS: funcs_js_item = funcs_js_item.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(funcs_js_item)
funcs_js = None
indexized = indexize(post)
if WINDOWS: indexized = indexized.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(indexized)
outfile.close()
if DEBUG: logging.debug(' emscript: phase 3 took %s seconds' % (time.time() - t))
# emscript_fast: emscript'en code using the 'fast' compilation path, using
# an LLVM backend
# FIXME: this is just a copy-paste of normal emscript(), and we trample it
# if the proper env var is set (see below). we should refactor to
# share code between the two, once emscript_fast stabilizes (or,
# leaving it separate like it is will make it trivial to rip out
# if the experiment fails)
def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
settings: JSON-formatted settings that override the values
defined in src/settings.js.
outfile: The file where the output is written.
"""
assert settings['ASM_JS'], 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
temp_js = temp_files.get('.4.js').name
backend_compiler = os.path.join(shared.LLVM_ROOT, 'llc')
backend_args = [backend_compiler, infile, '-march=js', '-filetype=asm', '-o', temp_js]
if settings['PRECISE_F32']:
backend_args += ['-emscripten-precise-f32']
if settings['WARN_UNALIGNED']:
backend_args += ['-emscripten-warn-unaligned']
if settings['RESERVED_FUNCTION_POINTERS'] > 0:
backend_args += ['-emscripten-reserved-function-pointers=%d' % settings['RESERVED_FUNCTION_POINTERS']]
if settings['ASSERTIONS'] > 0:
backend_args += ['-emscripten-assertions=%d' % settings['ASSERTIONS']]
if settings['ALIASING_FUNCTION_POINTERS'] == 0:
backend_args += ['-emscripten-no-aliasing-function-pointers']
if settings['GLOBAL_BASE'] >= 0:
backend_args += ['-emscripten-global-base=%d' % settings['GLOBAL_BASE']]
backend_args += ['-O' + str(settings['OPT_LEVEL'])]
if DEBUG:
logging.debug('emscript: llvm backend: ' + ' '.join(backend_args))
t = time.time()
shared.jsrun.timeout_run(subprocess.Popen(backend_args, stdout=subprocess.PIPE))
if DEBUG:
logging.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
t = time.time()
# Split up output
backend_output = open(temp_js).read()
#if DEBUG: print >> sys.stderr, backend_output
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs+len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split+len(metadata_split_marker):]
#if DEBUG: print >> sys.stderr, "METAraw", metadata_raw
metadata = json.loads(metadata_raw)
mem_init = backend_output[end_funcs+len(end_funcs_marker):metadata_split]
#if DEBUG: print >> sys.stderr, "FUNCS", funcs
#if DEBUG: print >> sys.stderr, "META", metadata
#if DEBUG: print >> sys.stderr, "meminit", mem_init
# if emulating pointer casts, force all tables to the size of the largest
if settings['EMULATE_FUNCTION_POINTER_CASTS']:
max_size = 0
for k, v in metadata['tables'].iteritems():
max_size = max(max_size, v.count(',')+1)
for k, v in metadata['tables'].iteritems():
curr = v.count(',')+1
if curr < max_size:
metadata['tables'][k] = v.replace(']', (',0'*(max_size - curr)) + ']')
# function table masks
table_sizes = {}
for k, v in metadata['tables'].iteritems():
table_sizes[k] = str(v.count(',')) # undercounts by one, but that is what we want
#if settings['ASSERTIONS'] >= 2 and table_sizes[k] == 0:
# print >> sys.stderr, 'warning: no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not settings['RUNNING_JS_OPTS']:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e');
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', lambda m: fix_dot_zero(m), funcs)
# js compiler
if DEBUG: logging.debug('emscript: js compiler glue')
# Settings changes
assert settings['TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'] == 1
settings['TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'] = 2
i64_funcs = ['i64Add', 'i64Subtract', '__muldi3', '__divdi3', '__udivdi3', '__remdi3', '__uremdi3']
for i64_func in i64_funcs:
if i64_func in metadata['declares']:
settings['PRECISE_I64_MATH'] = 2
break
metadata['declares'] = filter(lambda i64_func: i64_func not in ['getHigh32', 'setHigh32', '__muldi3', '__divdi3', '__remdi3', '__udivdi3', '__uremdi3'], metadata['declares']) # FIXME: do these one by one as normal js lib funcs
# Integrate info from backend
settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE'] = list(
set(settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE'] + map(shared.JS.to_nice_ident, metadata['declares'])).difference(
map(lambda x: x[1:], metadata['implementedFunctions'])
)
) + map(lambda x: x[1:], metadata['externs'])
if metadata['simd']:
settings['SIMD'] = 1
if metadata['cantValidate'] and settings['ASM_JS'] != 2:
logging.warning('disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
settings['ASM_JS'] = 2
# Save settings to a file to work around v8 issue 1579
settings_file = temp_files.get('.txt').name
def save_settings():
global settings_text
settings_text = json.dumps(settings, sort_keys=True)
s = open(settings_file, 'w')
s.write(settings_text)
s.close()
save_settings()
# Call js compiler
if DEBUG: t = time.time()
out = jsrun.run_js(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file, ';', 'glue'] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), error_limit=300)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
if DEBUG:
logging.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
last_forwarded_json = forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
last_forwarded_json['Functions']['tables'] = metadata['tables']
'''indexed_functions = set()
for key in forwarded_json['Functions']['indexedFunctions'].iterkeys():
indexed_functions.add(key)'''
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
#print >> sys.stderr, 'glue:', pre, '\n\n||||||||||||||||\n\n', post, '...............'
# memory and global initializers
global_initializers = ', '.join(map(lambda i: '{ func: function() { %s() } }' % i, metadata['initializers']))
if settings['SIMD'] == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = mem_init.count(',')+1
while staticbump % 16 != 0: staticbump += 1
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
/* global initializers */ __ATINIT__.push(%s);
%s''' % (staticbump, global_initializers, mem_init)) # XXX wrong size calculation!
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
# merge forwarded data
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
all_exported_functions = set(settings['EXPORTED_FUNCTIONS']) # both asm.js and otherwise
for additional_export in settings['DEFAULT_LIBRARY_FUNCS_TO_INCLUDE']: # additional functions to export from asm, if they are implemented
all_exported_functions.add('_' + additional_export)
if settings['EXPORT_FUNCTION_TABLES']:
for table in last_forwarded_json['Functions']['tables'].values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
exported_implemented_functions = set(metadata['exports'])
export_bindings = settings['EXPORT_BINDINGS']
export_all = settings['EXPORT_ALL']
all_implemented = metadata['implementedFunctions'] + forwarded_json['Functions']['implementedFunctions'].keys() # XXX perf?
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
exported_implemented_functions.add(key)
implemented_functions = set(metadata['implementedFunctions'])
if settings['ASSERTIONS'] and settings.get('ORIGINAL_EXPORTED_FUNCTIONS'):
original_exports = settings['ORIGINAL_EXPORTED_FUNCTIONS']
if original_exports[0] == '@': original_exports = json.loads(open(original_exports[1:]).read())
for requested in original_exports:
if requested not in all_implemented and \
requested != '_malloc': # special-case malloc, EXPORTED by default for internal use, but we bake in a trivial allocator and warn at runtime if used in ASSERTIONS
logging.warning('function requested to be exported, but not implemented: "%s"', requested)
# Add named globals
named_globals = '\n'.join(['var %s = %s;' % (k, v) for k, v in metadata['namedGlobals'].iteritems()])
pre = pre.replace('// === Body ===', '// === Body ===\n' + named_globals + '\n')
#if DEBUG: outfile.write('// pre\n')
outfile.write(pre)
pre = None
#if DEBUG: outfile.write('// funcs\n')
# when emulating function pointer casts, we need to know what is the target of each pointer
if settings['EMULATE_FUNCTION_POINTER_CASTS']:
function_pointer_targets = {}
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
start = table.index('[')
end = table.rindex(']')
body = table[start+1:end].split(',')
parsed = map(lambda x: x.strip(), body)
for i in range(len(parsed)):
if parsed[i] != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed[i])]
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not settings['BOOTSTRAPPING_STRUCT_INFO']:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', lambda m: move_preasm(m), funcs_js[1])
class Counter:
i = 0
j = 0
if 'pre' in last_forwarded_json['Functions']['tables']:
pre_tables = last_forwarded_json['Functions']['tables']['pre']
del last_forwarded_json['Functions']['tables']['pre']
else:
pre_tables = ''
def unfloat(s):
return 'd' if s == 'f' else s # lower float to double for ffis
if settings['ASSERTIONS'] >= 2:
debug_tables = {}
def make_params(sig): return ','.join(['p%d' % p for p in range(len(sig)-1)])
def make_coerced_params(sig): return ','.join([shared.JS.make_coercion('p%d', unfloat(sig[p+1]), settings) % p for p in range(len(sig)-1)])
def make_coercions(sig): return ';'.join(['p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p+1], settings)) for p in range(len(sig)-1)]) + ';'
def make_func(name, code, params, coercions): return 'function %s(%s) { %s %s }' % (name, params, coercions, code)
def make_table(sig, raw):
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.i
Counter.i += 1
if target is None: target = i
name = 'b' + str(i)
if not settings['ASSERTIONS']:
code = 'abort(%s);' % target
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0], settings) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if settings['ASSERTIONS'] <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start+1:end].split(',')
for j in range(settings['RESERVED_FUNCTION_POINTERS']):
curr = 'jsCall_%s_%s' % (sig, j)
body[settings['FUNCTION_POINTER_ALIGNMENT'] * (1 + j)] = curr
implemented_functions.add(curr)
Counter.j = 0
def fix_item(item):
j = Counter.j
Counter.j += 1
newline = Counter.j % 30 == 29
if item == '0':
if j > 0 and settings['EMULATE_FUNCTION_POINTER_CASTS'] and j in function_pointer_targets: # emulate all non-null pointer calls, if asked to
proper_sig, proper_target = function_pointer_targets[j]
def make_emulated_param(i):
if i >= len(sig): return shared.JS.make_initializer(proper_sig[i], settings) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i-1), proper_sig[i], settings, convert_from=sig[i])
proper_code = proper_target + '(' + ','.join(map(lambda i: make_emulated_param(i+1), range(len(proper_sig)-1))) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0], settings)
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0], settings)
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if settings['ASSERTIONS'] <= 1:
return bad if not newline else (bad + '\n')
else:
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
if item not in implemented_functions:
# this is imported into asm, we must wrap it
call_ident = item
if call_ident in metadata['redirects']: call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'): call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f': code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0], settings)
code += ';'
Counter.pre.append(make_func(item + '__wrapper', code, params, coercions))
return item + '__wrapper'
return item if not newline else (item + '\n')
if settings['ASSERTIONS'] >= 2:
debug_tables[sig] = body
body = ','.join(map(fix_item, body))
return ('\n'.join(Counter.pre), ''.join([raw[:start+1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in last_forwarded_json['Functions']['tables'].iteritems()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n\n// EMSCRIPTEN_END_FUNCS\n' + '\n'.join([info[1] for info in infos])
asm_setup = ''
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'clz32']]
simdfloattypes = ['float32x4']
simdinttypes = ['int32x4']
simdtypes = simdfloattypes + simdinttypes
simdfuncs = ['check', 'add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'and', 'or', 'xor', 'not',
'splat', 'swizzle', 'shuffle',
'withX', 'withY', 'withZ', 'withW',
'load', 'store', 'loadX', 'storeX', 'loadXY', 'storeXY', 'loadXYZ', 'storeXYZ']
simdfloatfuncs = simdfuncs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'fromInt32x4', 'fromInt32x4Bits',
'reciprocalApproximation', 'reciprocalSqrtApproximation'];
simdintfuncs = simdfuncs + ['fromFloat32x4', 'fromFloat32x4Bits',
'shiftRightArithmeticByScalar',
'shiftRightLogicalByScalar',
'shiftLeftByScalar'];
fundamentals = ['Math', 'Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']
if metadata['simd']:
fundamentals += ['SIMD']
if settings['ALLOW_MEMORY_GROWTH']: fundamentals.append('byteLength')
math_envs = []
provide_fround = settings['PRECISE_F32'] or settings['SIMD']
if provide_fround: maths += ['Math.fround']
basic_funcs = ['abort', 'assert'] + [m.replace('.', '_') for m in math_envs]
if settings['RESERVED_FUNCTION_POINTERS'] > 0: basic_funcs.append('jsCall')
if settings['SAFE_HEAP']: basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_STORE', 'SAFE_FT_MASK']
if settings['CHECK_HEAP_ALIGN']: basic_funcs += ['CHECK_ALIGN_2', 'CHECK_ALIGN_4', 'CHECK_ALIGN_8']
if settings['ASSERTIONS']:
if settings['ASSERTIONS'] >= 2: import difflib
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_funcs += ['nullFunc_' + sig]
if settings['ASSERTIONS'] <= 1:
extra = ' Module["printErr"]("Build with ASSERTIONS=2 for more info.");'
pointer = ' '
else:
pointer = ' \'" + x + "\' '
asm_setup += '\nvar debug_table_' + sig + ' = ' + json.dumps(debug_tables[sig]) + ';'
extra = ' Module["printErr"]("This pointer might make sense in another type signature: '
# sort signatures, attempting to show most likely related ones first
sigs = last_forwarded_json['Functions']['tables'].keys()
def keyfunc(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other): ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133*difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15*abs(len(other) - len(sig))/float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]: ret -= 5/float(maxlen) # prioritize on identically-placed params
ret += 20*len(other) # deprioritize on length
return ret
sigs.sort(key=keyfunc)
for other in sigs:
if other != sig:
extra += other + ': " + debug_table_' + other + '[x] + " '
extra += '"); '
asm_setup += '\nfunction nullFunc_' + sig + '(x) { Module["printErr"]("Invalid function pointer' + pointer + 'called with signature \'' + sig + '\'. ' + \
'Perhaps this is an invalid value (e.g. caused by calling a virtual method on a NULL pointer)? ' + \
'Or calling a function with an incorrect type, which will fail? ' + \
'(it is worth building your source files with -Werror (warnings are errors), as warnings can indicate undefined behavior which can cause this)' + \
'"); ' + extra + ' abort(x) }\n'
basic_vars = ['STACKTOP', 'STACK_MAX', 'tempDoublePtr', 'ABORT']
basic_float_vars = []
if metadata.get('preciseI64MathUsed'):
basic_vars += ['cttz_i8']
else:
if forwarded_json['Functions']['libraryFunctions'].get('_llvm_cttz_i32'):
basic_vars += ['cttz_i8']
if settings.get('DLOPEN_SUPPORT'):
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
basic_vars.append('F_BASE_%s' % sig)
asm_setup += ' var F_BASE_%s = %s;\n' % (sig, 'FUNCTION_TABLE_OFFSET' if settings.get('SIDE_MODULE') else '0') + '\n'
asm_runtime_funcs = ['stackAlloc', 'stackSave', 'stackRestore', 'setThrew', 'setTempRet0', 'getTempRet0']
# See if we need ASYNCIFY functions
# We might not need them even if ASYNCIFY is enabled
need_asyncify = '_emscripten_alloc_async_context' in exported_implemented_functions
if need_asyncify:
basic_vars += ['___async', '___async_unwind', '___async_retval', '___async_cur_frame']
asm_runtime_funcs += ['setAsync']
if settings.get('EMTERPRETIFY'):
asm_runtime_funcs += ['emterpret']
if settings.get('EMTERPRETIFY_ASYNC'):
asm_runtime_funcs += ['setAsyncState', 'emtStackSave']
# function tables
function_tables = ['dynCall_' + table for table in last_forwarded_json['Functions']['tables']]
function_tables_impls = []
for sig in last_forwarded_json['Functions']['tables'].iterkeys():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i], settings) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings) for i in range(1, len(sig))])
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('FUNCTION_TABLE_%s[index&{{{ FTM_%s }}}](%s)' % (sig, sig, coerced_args), sig[0], settings)
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], settings, ffi_arg=True) for i in range(1, len(sig))])
for i in range(settings['RESERVED_FUNCTION_POINTERS']):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall(%d%s%s)' % (i, ',' if ffi_args else '', ffi_args), sig[0], settings, ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
shared.Settings.copy(settings)
asm_setup += '\n' + shared.JS.make_invoke(sig) + '\n'
basic_funcs.append('invoke_%s' % sig)
if settings.get('DLOPEN_SUPPORT'):
asm_setup += '\n' + shared.JS.make_extcall(sig) + '\n'
basic_funcs.append('extCall_%s' % sig)
def quote(prop):
if settings['CLOSURE_COMPILER'] == 2:
return "'" + prop + "'"
else:
return prop
def access_quote(prop):
if settings['CLOSURE_COMPILER'] == 2:
return "['" + prop + "']"
else:
return '.' + prop
# calculate exports
exported_implemented_functions = list(exported_implemented_functions) + metadata['initializers']
exported_implemented_functions.append('runPostSets')
if settings['ALLOW_MEMORY_GROWTH']:
exported_implemented_functions.append('_emscripten_replace_memory')
exports = []
for export in exported_implemented_functions + asm_runtime_funcs + function_tables:
exports.append(quote(export) + ": " + export)
exports = '{ ' + ', '.join(exports) + ' }'
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except:
pass
# If no named globals, only need externals
global_vars = metadata['externs'] #+ forwarded_json['Variables']['globals']
global_funcs = list(set([key for key, value in forwarded_json['Functions']['libraryFunctions'].iteritems() if value != 2]).difference(set(global_vars)).difference(implemented_functions))
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
asm_global_funcs = ''.join([' var ' + g.replace('.', '_') + '=global' + access_quote(g) + ';\n' for g in maths]);
asm_global_funcs += ''.join([' var ' + g + '=env' + access_quote(math_fix(g)) + ';\n' for g in basic_funcs + global_funcs])
if metadata['simd']:
asm_global_funcs += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simdtypes])
asm_global_funcs += ''.join([' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in simdinttypes for g in simdintfuncs])
asm_global_funcs += ''.join([' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in simdfloattypes for g in simdfloatfuncs])
asm_global_vars = ''.join([' var ' + g + '=env' + access_quote(g) + '|0;\n' for g in basic_vars + global_vars])
# In linkable modules, we need to add some explicit globals for global variables that can be linked and used across modules
if settings.get('MAIN_MODULE') or settings.get('SIDE_MODULE'):
assert settings.get('TARGET_ASMJS_UNKNOWN_EMSCRIPTEN'), 'TODO: support x86 target when linking modules (needs offset of 4 and not 8 here)'
for key, value in forwarded_json['Variables']['globals'].iteritems():
if value.get('linkable'):
init = forwarded_json['Variables']['indexedGlobals'][key] + 8 # 8 is Runtime.GLOBAL_BASE / STATIC_BASE
if settings.get('SIDE_MODULE'): init = '(H_BASE+' + str(init) + ')|0'
asm_global_vars += ' var %s=%s;\n' % (key, str(init))
if settings['POINTER_MASKING']:
for i in [0, 1, 2, 3]:
if settings['POINTER_MASKING_DYNAMIC']:
asm_global_vars += ' const MASK%d=env' % i + access_quote('MASK%d' % i) + '|0;\n';
basic_vars += ['MASK%d' %i]
else:
asm_global_vars += ' const MASK%d=%d;\n' % (i, (settings['TOTAL_MEMORY']-1) & (~((2**i)-1)));
# sent data
the_global = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
sending = '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in basic_funcs + global_funcs + basic_vars + basic_float_vars + global_vars]) + ' }'
# received
receiving = ''
if settings['ASSERTIONS']:
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code like malloc TODO: verify that malloc is actually safe to use that way
receiving = '\n'.join(['var real_' + s + ' = asm["' + s + '"]; asm["' + s + '''"] = function() {
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');
return real_''' + s + '''.apply(null, arguments);
};
''' for s in exported_implemented_functions if s not in ['_malloc', '_free', '_memcpy', '_memset']])
if not settings['SWAPPABLE_ASM_MODULE']:
receiving += ';\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"]' for s in exported_implemented_functions + function_tables])
else:
receiving += 'Module["asm"] = asm;\n' + ';\n'.join(['var ' + s + ' = Module["' + s + '"] = function() { return Module["asm"]["' + s + '"].apply(null, arguments) }' for s in exported_implemented_functions + function_tables])
if settings['EXPORT_FUNCTION_TABLES']:
receiving += '\n'
for table in last_forwarded_json['Functions']['tables'].values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
# finalize
if DEBUG: logging.debug('asm text sizes' + str([map(len, funcs_js), len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables), len('\n'.join(function_tables_impls)), len(function_tables_defs.replace('\n', '\n ')), len(exports), len(the_global), len(sending), len(receiving)]))
funcs_js = ['''
%s
Module%s = %s;
Module%s = %s;
// EMSCRIPTEN_START_ASM
var asm = (function(global, env, buffer) {
%s
%s
''' % (asm_setup,
access_quote('asmGlobalArg'), the_global,
access_quote('asmLibraryArg'), sending,
"'use asm';" if not metadata.get('hasInlineJS') and not settings['SIDE_MODULE'] and settings['ASM_JS'] == 1 else "'almost asm';", '''
var HEAP8 = new global%s(buffer);
var HEAP16 = new global%s(buffer);
var HEAP32 = new global%s(buffer);
var HEAPU8 = new global%s(buffer);
var HEAPU16 = new global%s(buffer);
var HEAPU32 = new global%s(buffer);
var HEAPF32 = new global%s(buffer);
var HEAPF64 = new global%s(buffer);
''' % (access_quote('Int8Array'),
access_quote('Int16Array'),
access_quote('Int32Array'),
access_quote('Uint8Array'),
access_quote('Uint16Array'),
access_quote('Uint32Array'),
access_quote('Float32Array'),
access_quote('Float64Array')) if not settings['ALLOW_MEMORY_GROWTH'] else '''
var Int8View = global%s;
var Int16View = global%s;
var Int32View = global%s;
var Uint8View = global%s;
var Uint16View = global%s;
var Uint32View = global%s;
var Float32View = global%s;
var Float64View = global%s;
var HEAP8 = new Int8View(buffer);
var HEAP16 = new Int16View(buffer);
var HEAP32 = new Int32View(buffer);
var HEAPU8 = new Uint8View(buffer);
var HEAPU16 = new Uint16View(buffer);
var HEAPU32 = new Uint32View(buffer);
var HEAPF32 = new Float32View(buffer);
var HEAPF64 = new Float64View(buffer);
var byteLength = global.byteLength;
''' % (access_quote('Int8Array'),
access_quote('Int16Array'),
access_quote('Int32Array'),
access_quote('Uint8Array'),
access_quote('Uint16Array'),
access_quote('Uint32Array'),
access_quote('Float32Array'),
access_quote('Float64Array'))) + '\n' + asm_global_vars + ('''
var __THREW__ = 0;
var threwValue = 0;
var setjmpId = 0;
var undef = 0;
var nan = global%s, inf = global%s;
var tempInt = 0, tempBigInt = 0, tempBigIntP = 0, tempBigIntS = 0, tempBigIntR = 0.0, tempBigIntI = 0, tempBigIntD = 0, tempValue = 0, tempDouble = 0.0;
''' % (access_quote('NaN'), access_quote('Infinity'))) + ''.join(['''
var tempRet%d = 0;''' % i for i in range(10)]) + '\n' + asm_global_funcs] + \
[' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround else '0.0')] + \
[' var asyncState = 0;\n' if settings.get('EMTERPRETIFY_ASYNC') else ''] + \
([' const f0 = Math_fround(0);\n'] if provide_fround else []) + \
['' if not settings['ALLOW_MEMORY_GROWTH'] else '''
function _emscripten_replace_memory(newBuffer) {
if ((byteLength(newBuffer) & 0xffffff || byteLength(newBuffer) <= 0xffffff) || byteLength(newBuffer) > 0x80000000) return false;
HEAP8 = new Int8View(newBuffer);
HEAP16 = new Int16View(newBuffer);
HEAP32 = new Int32View(newBuffer);
HEAPU8 = new Uint8View(newBuffer);
HEAPU16 = new Uint16View(newBuffer);
HEAPU32 = new Uint32View(newBuffer);
HEAPF32 = new Float32View(newBuffer);
HEAPF64 = new Float64View(newBuffer);
buffer = newBuffer;
return true;
}
'''] + \
['' if not settings['POINTER_MASKING'] or settings['POINTER_MASKING_DYNAMIC'] else '''
function _declare_heap_length() {
return HEAP8[%s] | 0;
}
''' % (settings['TOTAL_MEMORY'] + settings['POINTER_MASKING_OVERFLOW'] - 1)] + ['''
// EMSCRIPTEN_START_FUNCS
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
''' + ('STACKTOP = (STACKTOP + 3)&-4;' if settings['TARGET_X86'] else 'STACKTOP = (STACKTOP + 15)&-16;\n') +
('if ((STACKTOP|0) >= (STACK_MAX|0)) abort();\n' if settings['ASSERTIONS'] else '') + '''
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
''' + ('''
function setAsync() {
___async = 1;
}''' if need_asyncify else '') + ('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}
''' if settings['EMTERPRETIFY'] else '') + ('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
''' if settings['EMTERPRETIFY_ASYNC'] else '') + '''
function setThrew(threw, value) {
threw = threw|0;
value = value|0;
if ((__THREW__|0) == 0) {
__THREW__ = threw;
threwValue = value;
}
}
function copyTempFloat(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
}
function copyTempDouble(ptr) {
ptr = ptr|0;
HEAP8[tempDoublePtr>>0] = HEAP8[ptr>>0];
HEAP8[tempDoublePtr+1>>0] = HEAP8[ptr+1>>0];
HEAP8[tempDoublePtr+2>>0] = HEAP8[ptr+2>>0];
HEAP8[tempDoublePtr+3>>0] = HEAP8[ptr+3>>0];
HEAP8[tempDoublePtr+4>>0] = HEAP8[ptr+4>>0];
HEAP8[tempDoublePtr+5>>0] = HEAP8[ptr+5>>0];
HEAP8[tempDoublePtr+6>>0] = HEAP8[ptr+6>>0];
HEAP8[tempDoublePtr+7>>0] = HEAP8[ptr+7>>0];
}
function setTempRet0(value) {
value = value|0;
tempRet0 = value;
}
function getTempRet0() {
return tempRet0|0;
}
'''] + funcs_js + ['''
%s
return %s;
})
// EMSCRIPTEN_END_ASM
(%s, %s, buffer);
%s;
''' % (pre_tables + '\n'.join(function_tables_impls) + '\n' + function_tables_defs, exports,
'Module' + access_quote('asmGlobalArg'),
'Module' + access_quote('asmLibraryArg'),
receiving)]
if not settings.get('SIDE_MODULE'):
funcs_js.append('''
Runtime.stackAlloc = asm['stackAlloc'];
Runtime.stackSave = asm['stackSave'];
Runtime.stackRestore = asm['stackRestore'];
Runtime.setTempRet0 = asm['setTempRet0'];
Runtime.getTempRet0 = asm['getTempRet0'];
''')
# Set function table masks
masks = {}
max_mask = 0
for sig, table in last_forwarded_json['Functions']['tables'].iteritems():
mask = table.count(',')
masks[sig] = str(mask)
max_mask = max(mask, max_mask)
def function_table_maskize(js, masks):
def fix(m):
sig = m.groups(0)[0]
return masks[sig]
return re.sub(r'{{{ FTM_([\w\d_$]+) }}}', lambda m: fix(m), js) # masks[m.groups(0)[0]]
funcs_js = map(lambda js: function_table_maskize(js, masks), funcs_js)
if settings.get('DLOPEN_SUPPORT'):
funcs_js.append('''
asm.maxFunctionIndex = %(max_mask)d;
DLFCN.registerFunctions(asm, %(max_mask)d+1, %(sigs)s, Module);
Module.SYMBOL_TABLE = SYMBOL_TABLE;
''' % { 'max_mask': max_mask, 'sigs': str(map(str, last_forwarded_json['Functions']['tables'].keys())) })
# Create symbol table for self-dlopen
if settings.get('DLOPEN_SUPPORT'):
symbol_table = {}
for k, v in forwarded_json['Variables']['indexedGlobals'].iteritems():
if forwarded_json['Variables']['globals'][k]['named']:
symbol_table[k] = str(v + forwarded_json['Runtime']['GLOBAL_BASE'])
for raw in last_forwarded_json['Functions']['tables'].itervalues():
if raw == '': continue
table = map(string.strip, raw[raw.find('[')+1:raw.find(']')].split(","))
for i in range(len(table)):
value = table[i]
if value != '0':
if settings.get('SIDE_MODULE'):
symbol_table[value] = 'FUNCTION_TABLE_OFFSET+' + str(i)
else:
symbol_table[value] = str(i)
outfile.write("var SYMBOL_TABLE = %s;" % json.dumps(symbol_table).replace('"', ''))
for i in range(len(funcs_js)): # do this loop carefully to save memory
if WINDOWS: funcs_js[i] = funcs_js[i].replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(funcs_js[i])
funcs_js = None
if WINDOWS: post = post.replace('\r\n', '\n') # Normalize to UNIX line endings, otherwise writing to text file will duplicate \r\n to \r\r\n!
outfile.write(post)
outfile.close()
if DEBUG: logging.debug(' emscript: final python processing took %s seconds' % (time.time() - t))
success = True
finally:
if not success:
outfile.close()
shared.try_delete(outfile.name) # remove partial output
if os.environ.get('EMCC_FAST_COMPILER') != '0':
emscript = emscript_fast
else:
logging.critical('Non-fastcomp compiler is no longer available, please use fastcomp or an older version of emscripten')
sys.exit(1)
def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBUG_CACHE):
# Prepare settings for serialization to JSON.
settings = {}
for setting in args.settings:
name, value = setting.strip().split('=', 1)
settings[name] = json.loads(value)
# libraries
libraries = args.libraries[0].split(',') if len(args.libraries) > 0 else []
# Compile the assembly to Javascript.
if settings.get('RELOOP'):
if not relooper:
relooper = settings.get('RELOOPER')
if not relooper:
relooper = cache.get_path('relooper.js')
settings.setdefault('RELOOPER', relooper)
if not os.path.exists(relooper):
shared.Building.ensure_relooper(relooper)
settings.setdefault('STRUCT_INFO', cache.get_path('struct_info.compiled.json'))
struct_info = settings.get('STRUCT_INFO')
if not os.path.exists(struct_info) and not settings.get('BOOTSTRAPPING_STRUCT_INFO'):
if DEBUG: logging.debug(' emscript: bootstrapping struct info...')
shared.Building.ensure_struct_info(struct_info)
if DEBUG: logging.debug(' emscript: bootstrapping struct info complete')
emscript(args.infile, settings, args.outfile, libraries, compiler_engine=compiler_engine,
jcache=jcache, temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE)
def _main(environ):
response_file = True
while response_file:
response_file = None
for index in range(1, len(sys.argv)):
if sys.argv[index][0] == '@':
# found one, loop again next time
response_file = True
response_file_args = read_response_file(sys.argv[index])
# slice in extra_args in place of the response file arg
sys.argv[index:index+1] = response_file_args
break
parser = optparse.OptionParser(
usage='usage: %prog [-h] [-H HEADERS] [-o OUTFILE] [-c COMPILER_ENGINE] [-s FOO=BAR]* infile',
description=('You should normally never use this! Use emcc instead. '
'This is a wrapper around the JS compiler, converting .ll to .js.'),
epilog='')
parser.add_option('-H', '--headers',
default=[],
action='append',
help='System headers (comma separated) whose #defines should be exposed to the compiled code.')
parser.add_option('-L', '--libraries',
default=[],
action='append',
help='Library files (comma separated) to use in addition to those in emscripten src/library_*.')
parser.add_option('-o', '--outfile',
default=sys.stdout,
help='Where to write the output; defaults to stdout.')
parser.add_option('-c', '--compiler',
default=None,
help='Which JS engine to use to run the compiler; defaults to the one in ~/.emscripten.')
parser.add_option('--relooper',
default=None,
help='Which relooper file to use if RELOOP is enabled.')
parser.add_option('-s', '--setting',
dest='settings',
default=[],
action='append',
metavar='FOO=BAR',
help=('Overrides for settings defined in settings.js. '
'May occur multiple times.'))
parser.add_option('-j', '--jcache',
action='store_true',
default=False,
help=('Enable jcache (ccache-like caching of compilation results, for faster incremental builds).'))
parser.add_option('-T', '--temp-dir',
default=None,
help=('Where to create temporary files.'))
parser.add_option('-v', '--verbose',
action='store_true',
dest='verbose',
help='Displays debug output')
parser.add_option('-q', '--quiet',
action='store_false',
dest='verbose',
help='Hides debug output')
parser.add_option('--suppressUsageWarning',
action='store_true',
default=environ.get('EMSCRIPTEN_SUPPRESS_USAGE_WARNING'),
help=('Suppress usage warning'))
# Convert to the same format that argparse would have produced.
keywords, positional = parser.parse_args()
if not keywords.suppressUsageWarning:
logging.warning('''
==============================================================
WARNING: You should normally never use this! Use emcc instead.
==============================================================
''')
if len(positional) != 1:
raise RuntimeError('Must provide exactly one positional argument. Got ' + str(len(positional)) + ': "' + '", "'.join(positional) + '"')
keywords.infile = os.path.abspath(positional[0])
if isinstance(keywords.outfile, basestring):
keywords.outfile = open(keywords.outfile, 'w')
if keywords.relooper:
relooper = os.path.abspath(keywords.relooper)
else:
relooper = None # use the cache
if keywords.temp_dir is None:
temp_files = get_configuration().get_temp_files()
temp_dir = get_configuration().TEMP_DIR
else:
temp_dir = os.path.abspath(keywords.temp_dir)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_files = tempfiles.TempFiles(temp_dir)
if keywords.compiler is None:
keywords.compiler = shared.COMPILER_ENGINE
if keywords.verbose is None:
DEBUG = get_configuration().DEBUG
DEBUG_CACHE = get_configuration().DEBUG_CACHE
else:
DEBUG = keywords.verbose
DEBUG_CACHE = keywords.verbose
cache = cache_module.Cache()
temp_files.run_and_clean(lambda: main(
keywords,
compiler_engine=keywords.compiler,
cache=cache,
jcache=cache_module.JCache(cache) if keywords.jcache else None,
relooper=relooper,
temp_files=temp_files,
DEBUG=DEBUG,
DEBUG_CACHE=DEBUG_CACHE,
))
if __name__ == '__main__':
_main(environ=os.environ)
|
PopCap/GameIdea
|
Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/emscripten.py
|
Python
|
bsd-2-clause
| 75,271
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import pkg_resources
version = pkg_resources.require("Autobahn")[0].version
except:
## i.e. no setuptools or no package installed ..
version = "?.?.?"
import util
import httpstatus
import utf8validator
import websocket
import case
import report
import fuzzing
import prefixmap
import wamp
|
frivoal/presto-testo
|
wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/lib/python/autobahn/__init__.py
|
Python
|
bsd-3-clause
| 1,107
|
import seven_segment
import myhdl
from myhdl import *
code = [None] * 10
for key, val in seven_segment.encoding.items():
if 0 <= key <= 9:
code[key] = int(val, 2)
code = tuple(code)
def bcd2led(led, bcd, clock):
""" bcd to seven segment led convertor.
led: seven segment led output
bcd: bcd input
clock: clock input
"""
@always(clock.posedge)
def logic():
led.next = code[int(bcd)]
return logic
def convert():
led = Signal(intbv(0)[7:])
bcd = Signal(intbv(0)[4:])
clock = Signal(False)
toVerilog(bcd2led, led, bcd, clock)
toVHDL(bcd2led, led, bcd, clock)
convert()
|
cfelton/myhdl
|
example/cookbook/stopwatch/bcd2led.py
|
Python
|
lgpl-2.1
| 649
|
# Orthographic Spanish
public_name='Orthographic Spanish'
default_data='orthographic_spanish.txt'
default_neighbor_lexicon='orthographic_spanish.txt'
default_word_lexicon='orthographic_spanish.txt'
default_lookup_lexicon='orthographic_spanish.txt'
from subsyllabic_common import *
import orth.es as language
def transform(input_sequence, frequency=1):
return pre_transform(input_sequence, frequency=frequency, language=language)
|
crr-ugent/wuggy
|
plugins/orthographic_spanish.py
|
Python
|
gpl-2.0
| 433
|
import requests
import time
from bs4 import BeautifulSoup
from Crawler.HduCrawler.HduConfig import HduUser
class HduScanner:
s = requests.session()
scan_url = 'http://acm.hdu.edu.cn/status.php?first=&pid=&user={}&lang=0&status=0'
def Analyse(self, html):
'''
f = open('/tmp/status.html','w')
f.write(html)
f.close()
'''
soup = BeautifulSoup(html, 'html5lib')
# print('-'*30)
L = list()
for i in range(2, 20):
td = soup.select('#fixed_table > table > tbody > tr:nth-of-type({})'.format(i))
if len(td) == 0: break
dt = dict()
dt['originOJ'] = 'HDU'
titles = ['realrunid', 'realsubmittime', 'status', 'originProb', 'runtime',
'runmemory', 'codelenth', 'language', 'nickname']
for con in td[0].contents:
dt[titles[0]] = con.text
if titles[0] == 'codelenth':
dt[titles[0]] = dt[titles[0]][:-1]
titles = titles[1:]
L.append(dt)
return L
def Scanner(self):
L = list()
for x in HduUser:
url = self.scan_url.format(x.get('username'))
r = self.s.get(url, timeout=5)
r.encoding = 'gb2312'
tL = self.Analyse(r.text)
L += tL
return L
def UpdateToDB(self):
pass
def scann_test():
hs = HduScanner()
L = hs.Scanner()
return L
if __name__ == '__main__':
scann_test()
|
CKboss/VirtualJudgePY
|
Crawler/HduCrawler/HduScanner.py
|
Python
|
gpl-2.0
| 1,551
|
import json
from housepy import util, log, config, strings
from ingest import ingest_json_body, ingest_data
def parse(request):
log.info("sensornet.parse")
data = ingest_json_body(request)
try:
t_local = strings.as_numeric(data['t_local'])
data = data['data']
log.debug(json.dumps(data, indent=4, default=lambda x: str(x)))
if 'gps_long' in data and 'gps_lat' in data:
data['Longitude'] = data['gps_long']
del data['gps_long']
data['Latitude'] = data['gps_lat']
del data['gps_lat']
data['FeatureType'] = "sensor"
data['FeatureSubType'] = "hybrid"
data['SensorName'] = "sensornet"
data['t_utc'] = util.delocalize_timestamp(t_local, tz=config['local_tz'])
except Exception as e:
log.error("--> failed: %s" % log.exc(e))
return None, "Unexpected format"
return data
"""
{ t_local: '1429805254',
data:
{ dissolved_oxygen: '9.79',
orp: '202.2',
salinity: '0.00',
ph: '3.747',
conductivity: '0',
'water temp': '0' },
t_utc: 1429805257 }
"""
|
brianhouse/okavango
|
ingest/sensornet.py
|
Python
|
mit
| 1,122
|
#!/usr/bin/env python3
import sys
from subprocess import run, PIPE
from urllib.parse import urljoin
args = sys.argv[1:]
hostenv = args.pop(0) if args else 'dev'
user = args.pop(0) if args else 'whelk'
host = ('pgsql01-{}.libris.kb.se'.format(hostenv)
if '.' not in hostenv else hostenv)
new_ids, old_ids = [], []
for l in sys.stdin:
s = l.strip()
if len(s) > 12:
new_ids.append(s)
else:
old_ids.append(s)
def to_sql_collection(seq):
return '({})'.format(', '.join(
"'{}'".format(s) for s in seq))
select = 'SELECT id FROM lddb__identifiers WHERE iri in {};'.format(
to_sql_collection(
urljoin('http://libris.kb.se/resource/bib/', s)
for s in old_ids))
cmd = ['psql', '-h', host, '-U', user, '-t']
result = run(cmd, input=select, check=True, encoding='ascii',
stdout=PIPE)
ids = result.stdout.split() + new_ids
print(to_sql_collection(ids))
|
libris/librisxl
|
librisxl-tools/scripts/canonical-lddb-ids.py
|
Python
|
apache-2.0
| 942
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# aviau, alexandre.viau@savoirfairelinux.com
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# Jean Gabes, naparuba@gmail.com
# Zoran Zaric, zz@zoranzaric.de
# Gerhard Lausser, gerhard.lausser@consol.de
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import time
from action import Action
from alignak.property import IntegerProp, StringProp, FloatProp, BoolProp
from alignak.autoslots import AutoSlots
""" TODO: Add some comment about this class for the doc"""
class EventHandler(Action):
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
my_type = 'eventhandler'
properties = {
'is_a': StringProp(default='eventhandler'),
'type': StringProp(default=''),
'_in_timeout': StringProp(default=False),
'status': StringProp(default=''),
'exit_status': StringProp(default=3),
'output': StringProp(default=''),
'long_output': StringProp(default=''),
't_to_go': StringProp(default=0),
'check_time': StringProp(default=0),
'execution_time': FloatProp(default=0),
'u_time': FloatProp(default=0.0),
's_time': FloatProp(default=0.0),
'env': StringProp(default={}),
'perf_data': StringProp(default=''),
'sched_id': IntegerProp(default=0),
'timeout': IntegerProp(default=10),
'check_time': IntegerProp(default=0),
'command': StringProp(default=''),
'module_type': StringProp(default='fork'),
'worker': StringProp(default='none'),
'reactionner_tag': StringProp(default='None'),
'is_snapshot': BoolProp(default=False),
}
# id = 0 #Is common to Actions
def __init__(self, command, id=None, ref=None, timeout=10, env={},
module_type='fork', reactionner_tag='None', is_snapshot=False):
self.is_a = 'eventhandler'
self.type = ''
self.status = 'scheduled'
if id is None: # id != None is for copy call only
self.id = Action.id
Action.id += 1
self.ref = ref
self._in_timeout = False
self.timeout = timeout
self.exit_status = 3
self.command = command
self.output = ''
self.long_output = ''
self.t_to_go = time.time()
self.check_time = 0
self.execution_time = 0
self.u_time = 0
self.s_time = 0
self.perf_data = ''
self.env = {}
self.module_type = module_type
self.worker = 'none'
self.reactionner_tag = reactionner_tag
self.is_snapshot = is_snapshot
# return a copy of the check but just what is important for execution
# So we remove the ref and all
def copy_shell(self):
# We create a dummy check with nothing in it, just defaults values
return self.copy_shell__(EventHandler('', id=self.id, is_snapshot=self.is_snapshot))
def get_return_from(self, e):
self.exit_status = e.exit_status
self.output = e.output
self.long_output = getattr(e, 'long_output', '')
self.check_time = e.check_time
self.execution_time = getattr(e, 'execution_time', 0.0)
self.perf_data = getattr(e, 'perf_data', '')
def get_outputs(self, out, max_plugins_output_length):
self.output = out
def is_launchable(self, t):
return t >= self.t_to_go
def __str__(self):
return "Check %d status:%s command:%s" % (self.id, self.status, self.command)
def get_id(self):
return self.id
# Call by pickle to dataify the comment
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
if not hasattr(self, 'worker'):
self.worker = 'none'
if not getattr(self, 'module_type', None):
self.module_type = 'fork'
# s_time and u_time are added between 1.2 and 1.4
if not hasattr(self, 'u_time'):
self.u_time = 0
self.s_time = 0
|
ddurieux/alignak
|
alignak/eventhandler.py
|
Python
|
agpl-3.0
| 6,318
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from . import source as pythonparser_source, lexer as pythonparser_lexer, parser as pythonparser_parser, diagnostic as pythonparser_diagnostic
def parse_buffer(buffer, mode="exec", flags=[], version=None, engine=None):
"""
Like :meth:`parse`, but accepts a :class:`source.Buffer` instead of
source and filename, and returns comments as well.
:see: :meth:`parse`
:return: (:class:`ast.AST`, list of :class:`source.Comment`)
Abstract syntax tree and comments
"""
if version is None:
version = sys.version_info[0:2]
if engine is None:
engine = pythonparser_diagnostic.Engine()
lexer = pythonparser_lexer.Lexer(buffer, version, engine)
if mode in ("single", "eval"):
lexer.interactive = True
parser = pythonparser_parser.Parser(lexer, version, engine)
parser.add_flags(flags)
if mode == "exec":
return parser.file_input(), lexer.comments
elif mode == "single":
return parser.single_input(), lexer.comments
elif mode == "eval":
return parser.eval_input(), lexer.comments
def parse(source, filename="<unknown>", mode="exec",
flags=[], version=None, engine=None):
"""
Parse a string into an abstract syntax tree.
This is the replacement for the built-in :meth:`..ast.parse`.
:param source: (string) Source code in the correct encoding
:param filename: (string) Filename of the source (used in diagnostics)
:param mode: (string) Execution mode. Pass ``"exec"`` to parse a module,
``"single"`` to parse a single (interactive) statement,
and ``"eval"`` to parse an expression. In the last two cases,
``source`` must be terminated with an empty line
(i.e. end with ``"\\n\\n"``).
:param flags: (list of string) Future flags.
Equivalent to ``from __future__ import <flags>``.
:param version: (2-tuple of int) Major and minor version of Python
syntax to recognize, ``sys.version_info[0:2]`` by default.
:param engine: (:class:`diagnostic.Engine`) Diagnostic engine,
a fresh one is created by default
:return: (:class:`ast.AST`) Abstract syntax tree
:raise: :class:`diagnostic.Error`
if the source code is not well-formed
"""
ast, comments = parse_buffer(pythonparser_source.Buffer(source, filename),
mode, flags, version, engine)
return ast
|
google/grumpy
|
third_party/pythonparser/__init__.py
|
Python
|
apache-2.0
| 2,511
|
# Padding needed if length of input is not a multiple of BLOCK_SIZE
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[:-ord(s[len(s)-1:])]
|
TheShellLand/pies
|
v3/Libraries/Crypto/AES_Padding_BLOCKSIZE.py
|
Python
|
mit
| 182
|
from __future__ import division
import re
import cStringIO
from symbol import Symbol
from token import Token
class Parser(object):
tokenizer = r"""\s*(#`|#,@|#,|#'|,@|[('`,)]|"(?:[\\].|;|[^\\"])*"|;.*|[^\s('"`,;)]*)(.*)"""
eof_object = Symbol('#<eof-object>')
eol_object = Symbol('#<eol-object>')
pause_object = Symbol('#<pause-object>')
@classmethod
def stringParser(cls, string):
return cls(cStringIO.StringIO(string))
def __init__(self, _file):
self.file = _file;
self.line = u''
self.line_number = 0
self.iga=self.igetast()
def gettokens(self):
"""Return the next token, reading new text into line buffer if needed."""
while True:
if self.line == '\n' or self.line == '':
self.line = self.file.readline().decode('utf-8')
self.line_number += 1
if (self.line_number == 1 or self.line_number == 2) and self.line.startswith('#!'):
self.line = self.file.readline().decode('utf-8')
self.line_number+=1
if self.line == '':
yield self.eof_object
continue
# noinspection PyUnresolvedReferences
token, self.line = re.match(self.tokenizer, self.line).groups()
if token != '' and not token.startswith(';'):
yield Token(token).setLine(self.line_number)
if self.line == '\n' or self.line == '':
yield self.eol_object
#yield self.eof_object
tokens=property(gettokens)
def igetast(self):
tokens = self.gettokens()
o = []
for t in tokens:
if t is self.eof_object:
yield self.pause_object
continue
if t is self.eol_object:
if o:
yield o
o=[]
continue
ra=self.read_ahead(t,tokens)
n=ra.next()
while n is self.pause_object:
yield n
n=ra.next()
o.append(n)
def getast(self):
n = self.iga.next()
if n is self.pause_object:
return []
return n
def read_ahead(self, token, tokens):
if '(' == token:
L = []
while True:
token = tokens.next()
if token is self.eof_object:
yield self.pause_object
continue
if token is self.eol_object:
continue
if token == ')':
yield L
else:
ra = self.read_ahead(token, tokens)
n = ra.next()
while n is self.pause_object:
yield n
n = ra.next()
L.append(n)
elif ')' == token:
raise SyntaxError('unexpected )')
elif token is self.eol_object:
raise SyntaxError('unexpected eol')
else:
yield token.symbol
ast=property(getast)
|
perkinslr/pypyjs
|
addedLibraries/scheme/parser.py
|
Python
|
mit
| 3,126
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "website_django.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
fanchunke1991/website_django
|
manage.py
|
Python
|
mit
| 812
|
#! /usr/bin/env python3
bdf = 'wenquanyi_cjk_basic_9pt.bdf'
cpp = '../../src/bitmapfont_wqy.h'
##cpp = 'bitmapfont_wqy.h'
# FONTBOUNDINGBOX 12 14 0 -3
control_point = 14 - 3
h = 12
w = 12
w_half = 12 // 2
chars = 0
glyphs = []
def getKey(item):
return item[0]
if __name__ == '__main__':
with open(bdf) as f:
new_glyph = False
glyph_line_count = 0
glyph = []
xoff = 0 # 左移位数
for line in f:
#print(line)
if line.find('CHARS ') == 0: #CHARS 30503
s = line.split()
#print('CHARS: %s' % s)
chars = int(s[1])
#print(chars)
elif line.find('STARTCHAR ') == 0:
glyph = [0, 'true', ['0']*w]
#print(glyph)
elif line.find('ENCODING ') == 0:
s = line.split()
glyph[0] = int(s[1])
elif line.find('BBX ') == 0:
bbx = line.split()
#print(bbx)
# 计算偏移, 并将该字节左移.
x_padding = int(bbx[1]) + int(bbx[3])
if x_padding <= w_half:
glyph[1] = 'false'
xoff = w_half - x_padding
else:
xoff = w - x_padding
# 计算高度, 补全收尾行.
y_padding = int(bbx[4]) + 2 # 底补全
y_padding_first = h - int(bbx[2]) - y_padding # 顶补全 = 全长(12px) - 字高 - 底补全
#if xoff < 0 or y_padding_first < 0 or y_padding < 0: print(xoff, y_padding, y_padding_first, glyph[0])
#if xoff != 1 and y_padding_first != 0 and y_padding != 1: print(xoff, y_padding, y_padding_first, glyph[0])
#print(glyph[0], ' BBX gen offsite :', xoff, y_padding_first, y_padding)
elif line.find('BITMAP') == 0:
new_glyph = True
glyph_line_count = 0
if y_padding_first > 0:
glyph_line_count = glyph_line_count + y_padding_first
else:
pass # < 0, need skip some lines.
elif line.find('ENDCHAR') == 0:
new_glyph = False
glyph_line_count = 0
#print('Final Glyph is: %s.' % glyph)
glyphs.append(glyph)
elif new_glyph:
# pack line
line_final = ''
line_s = line.strip()
#print(line_s)
i = int('0x%s' % line_s, 16) >> 4
i_str = '{0:0>12b}'.format(i)
i_str_new = i_str[11] + i_str[10] + i_str[9] + i_str[8] + i_str[7] + i_str[6] + i_str[5] + i_str[4] + i_str[3] + i_str[2] + i_str[1] + i_str[0]
i_int_new = int('0b%s' % i_str_new, 2)
#print('{0:0>12b}'.format(i_int_new))
#if xoff > 0: pass
#if xoff < 0: pass
#print('glyph_line_count', glyph_line_count)
glyph[2][glyph_line_count] = str(i_int_new)
glyph_line_count = glyph_line_count + 1
glyphs.sort(key=getKey)
fw = open(cpp, 'w')
fw.write('''/* !!!! GENERATED FILE - DO NOT EDIT !!!!
* --------------------------------------
*/
#ifndef EP_BITMAPFONT_WQY_H
#define EP_BITMAPFONT_WQY_H
#include <array>
#include "bitmapfont_glyph.h"
constexpr const std::array<BitmapFontGlyph,%s> BITMAPFONT_WQY = {{
''' % len(glyphs))
for x in glyphs:
#print(x[0])
#print(', '.join(x[2]))
s = '\t{ %s, %s, { %s } },\n' % (x[0], x[1], ', '.join(x[2]))
#print(s)
fw.write(s)
fw.write('''}};
#endif
''')
fw.close()
|
BlisterB/Player
|
resources/wenquanyi/gen_wqy_font_cpp.py
|
Python
|
gpl-3.0
| 3,721
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^update/', views.update, name='update')
]
|
njustesen/coep-starcraft
|
http_server/sup/urls.py
|
Python
|
gpl-3.0
| 165
|
#!/usr/bin/env python
# Read mouse events if X isn't running -- for instance, on a Raspberry Pi.
# Needs root, or at least read access to /dev/input/*
import evdev
import select
import time
import sys
class MouseReader:
def __init__(self):
self.mousedevice = None
devices = map(evdev.InputDevice, evdev.list_devices())
for dev in devices:
caps = dev.capabilities()
keys = caps.keys()
# 1L is "EV_KEY" events (mouse buttons);
# 2L is 'EV_REL' for the wheel.
if evdev.ecodes.EV_KEY in keys and evdev.ecodes.EV_REL in keys:
if evdev.ecodes.BTN_LEFT in caps[evdev.ecodes.EV_KEY] and \
evdev.ecodes.BTN_RIGHT in caps[evdev.ecodes.EV_KEY] \
and evdev.ecodes.REL_WHEEL in caps[evdev.ecodes.EV_REL]:
# Quacks like a mouse. Use it.
self.mousedevice = dev
return
if not mousedevice:
print "Didn't see a mouse device"
def pval(self, code, val):
try:
codes = evdev.ecodes.BTN[code]
if type(codes) is list:
print codes[0],
else:
print codes,
except:
try:
print evdev.ecodes.REL[code],
except:
print "Unknown code", code
if val == 1:
if code == evdev.ecodes.REL_WHEEL:
print "scroll up"
else:
print "press"
elif val == 0:
print "release"
elif val == -1:
print "scroll down"
else:
print "unknown value", val
def read_mouse(self, timeout=None):
"""Returns an evdev event.
timeout is specified in floating-point seconds.
timeout=None will block until there's something to read.
"""
r,w,x = select.select([self.mousedevice], [], [], timeout)
events = []
for event in self.mousedevice.read():
# event.value will be 1 for button down, 0 for button up,
# 1 for wheel up, -1 for wheel down.
if event.code in (evdev.ecodes.REL_WHEEL,
evdev.ecodes.BTN_LEFT,
evdev.ecodes.BTN_RIGHT,
evdev.ecodes.BTN_MIDDLE):
events.append((event.code, event.value))
return events
if __name__ == '__main__':
mousereader = MouseReader()
if not mousereader.mousedevice:
sys.exit(1)
while True:
try:
events = mousereader.read_mouse(.1)
for ev in events:
mousereader.pval(ev[0], ev[1])
except IOError:
pass
print "\n==================\nSleeping"
time.sleep(5)
|
akkana/scripts
|
mouseevent.py
|
Python
|
gpl-2.0
| 2,847
|
"""Source estimate functions."""
# Authors: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD (3-clause)
import mne
import numpy as np
import os.path as op
from mne import get_volume_labels_from_src
from .import_data import write_hdf5
from .source_space import _create_MNI_label_files
def _process_stc(stc, basename, sbj_id, subjects_dir, parc, forward,
aseg, is_fixed, all_src_space=False, ROIs_mean=True):
if not isinstance(stc, list):
print('***')
print(('stc dim ' + str(stc.shape)))
print('***')
stc = [stc]
else:
print('***')
print(('len stc %d' % len(stc)))
print('***')
print('**************************************************************')
print('all_src_space: {}'.format(all_src_space))
print('ROIs_mean: {}'.format(ROIs_mean))
print('**************************************************************')
if all_src_space:
stc_data = list()
stc_file = op.abspath(basename + '_stc.hdf5')
for i in range(len(stc)):
stc_data.append(stc[i].data)
write_hdf5(stc_file, stc_data, dataset_name='stc_data')
if ROIs_mean:
label_ts, labels_file, label_names_file, label_coords_file = \
_compute_mean_ROIs(stc, sbj_id, subjects_dir, parc,
forward, aseg, is_fixed)
ts_file = op.abspath(basename + '_ROI_ts.npy')
np.save(ts_file, label_ts)
else:
ts_file = stc_file
labels_file = ''
label_names_file = ''
label_coords_file = ''
return ts_file, labels_file, label_names_file, label_coords_file
def _compute_mean_ROIs(stc, sbj_id, subjects_dir, parc,
forward, aseg, is_fixed):
# these coo are in MRI space and we have to convert them to MNI space
labels_cortex = mne.read_labels_from_annot(sbj_id, parc=parc,
subjects_dir=subjects_dir)
print(('\n*** %d ***\n' % len(labels_cortex)))
src = forward['src']
# allow_empty : bool -> Instead of emitting an error, return all-zero time
# courses for labels that do not have any vertices in the source estimate
if is_fixed:
mode = 'mean_flip'
else:
mode = 'mean'
label_ts = mne.extract_label_time_course(stc, labels_cortex, src,
mode=mode,
allow_empty=True,
return_generator=False)
# save results in .npy file that will be the input for spectral node
print('\n*** SAVE ROI TS ***\n')
print((len(label_ts)))
if aseg:
print(sbj_id)
labels_aseg = get_volume_labels_from_src(src, sbj_id, subjects_dir)
labels = labels_cortex + labels_aseg
else:
labels = labels_cortex
labels_aseg = None
print((labels[0].pos))
print((len(labels)))
labels_file, label_names_file, label_coords_file = \
_create_MNI_label_files(forward, labels_cortex, labels_aseg,
sbj_id, subjects_dir)
return label_ts, labels_file, label_names_file, label_coords_file
|
neuropycon/ephypype
|
ephypype/source_estimate.py
|
Python
|
bsd-3-clause
| 3,229
|
# -*- coding: utf-8 -*-
"""
flaskbb.utils.decorators
~~~~~~~~~~~~~~~~~~~~~~~~
A place for our decorators.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from functools import wraps
from flask import abort
from flask_login import current_user
from openspending.auth.perms import check_perm
from openspending.auth import *
def can_access_forum(func):
"""
If you are logged in you can view the forum
"""
def decorated(*args, **kwargs):
if not is_authenticated(current_user):
abort(403)
return func(*args, **kwargs)
# forum_id = kwargs['forum_id'] if 'forum_id' in kwargs else args[1]
# from openspending.forum.forum.models import Forum
# user_forums = Forum.query.all()
# if len(user_forums) < 1:
# abort(403)
# return func(*args, **kwargs)
return decorated
def can_access_topic(func):
def decorated(*args, **kwargs):
if not is_authenticated(current_user):
abort(403)
return func(*args, **kwargs)
# topic_id = kwargs['topic_id'] if 'topic_id' in kwargs else args[1]
# from openspending.forum.forum.models import Forum, Topic
# topic = Topic.query.filter_by(id=topic_id).first()
# user_forums = Forum.query.all()
# if len(user_forums) < 1:
# abort(403)
# return func(*args, **kwargs)
return decorated
|
USStateDept/FPA_Core
|
openspending/forum/utils/decorators.py
|
Python
|
agpl-3.0
| 1,481
|
# Generated by Django 1.11.5 on 2017-11-07 13:33
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('devices', '0005_auto_20171026_1835'),
]
operations = [
migrations.AddField(
model_name='device',
name='used_in',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='devices.Device'),
),
]
|
MPIB/Lagerregal
|
devices/migrations/0006_device_used_in.py
|
Python
|
bsd-3-clause
| 529
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
''' Update handler, gets new pictures from picasa
only admins should be allowed to access this.'''
# Copyright (C) 2009 Florian Heinle
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
import conf
from model import Picture
from utils import EXIF_FMTS, render
from google.appengine.ext import db
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import webapp
from google.appengine.api import memcache
import gdata.photos.service
from gdata.alt.appengine import run_on_appengine
run_on_appengine(gdata.photos.service.PhotosService())
class Update(webapp.RequestHandler):
'''update the datastore with pictures from picasa when called'''
def __init__(self, *args, **kwargs):
'''login to google picasa api services'''
self.gd_client = gdata.photos.service.PhotosService()
self.gd_client.email = conf.picasaweb_username
self.gd_client.password = conf.picasaweb_password
self.gd_client.source = 'photoblog of %s' % conf.picasaweb_username
self.gd_client.ProgrammaticLogin()
super(Update, self).__init__(*args, **kwargs)
def get(self):
'''no arguments'''
photos = self.gd_client.GetFeed(
'/data/feed/api/user/%s/albumid/%s?kind=photo' %
(conf.picasaweb_username, conf.picasaweb_album_id)
)
updated_pics, old_pics = [], []
for photo in photos.entry:
new_pic = Picture()
new_pic.public = True
new_pic.gphoto_id = int(photo.gphoto_id.text)
new_pic.width = int(photo.width.text)
new_pic.height = int(photo.height.text)
new_pic.title = str(photo.title.text)
# size 64 is cropped to square, which we need for the mosaic
thumbnail = photo.media.thumbnail[0].url.replace('/s72/', '/s64-c/')
new_pic.thumbnail = str(thumbnail)
new_pic.content = str(photo.media.content[0].url)
new_pic.uploaded = datetime.datetime.strptime(
photo.published.text[:-5],
'%Y-%m-%dT%H:%M:%S'
)
try:
new_pic.taken = photo.exif.time.datetime()
except AttributeError: # exif time missing
new_pic.taken = new_pic.uploaded
# gdata api provides values as strings. Missing attributes
# are not empty strings but None, hence need to be taken care of
for attr in EXIF_FMTS.keys():
try:
exif_value = getattr(photo.exif, attr).text
conv_func = EXIF_FMTS[attr]
setattr(new_pic,
attr,
conv_func(exif_value)
)
except (AttributeError, ValueError):
# set an empty value for missing keys
setattr(new_pic, attr, EXIF_FMTS[attr]())
try:
new_pic.flash = bool(photo.exif.flash.text.replace('false',''))
except AttributeError:
new_pic.flash = False
try:
new_pic.put()
except db.BadValueError, e:
if e.args[0].startswith('Picture already stored'):
old_pics.append(new_pic)
logging.info(e.args[0])
else:
raise
else:
updated_pics.append(new_pic)
logging.info('New picture: %s' % new_pic.gphoto_id)
logging.debug('Memcache entry for mosaic deleted')
memcache.delete('mosaic')
self.response.out.write(
render('updated.html', {'updated':updated_pics,
'old':old_pics
}))
application = webapp.WSGIApplication(
[('/update', Update)],
debug=conf.debug
)
if __name__ == '__main__':
run_wsgi_app(application)
|
fheinle/Photoblog
|
update.py
|
Python
|
gpl-3.0
| 4,607
|
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from six.moves import http_client
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder import objects
from cinder.objects import fields
LOG = logging.getLogger(__name__)
def authorize(context, action_name):
action = 'snapshot_actions:%s' % action_name
extensions.extension_authorizer('snapshot', action)(context)
class SnapshotActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotActionsController, self).__init__(*args, **kwargs)
LOG.debug("SnapshotActionsController initialized")
@wsgi.action('os-update_snapshot_status')
def _update_snapshot_status(self, req, id, body):
"""Update database fields related to status of a snapshot.
Intended for creation of snapshots, so snapshot state
must start as 'creating' and be changed to 'available',
'creating', or 'error'.
"""
context = req.environ['cinder.context']
authorize(context, 'update_snapshot_status')
LOG.debug("body: %s", body)
try:
status = body['os-update_snapshot_status']['status']
except KeyError:
msg = _("'status' must be specified.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Allowed state transitions
status_map = {fields.SnapshotStatus.CREATING:
[fields.SnapshotStatus.CREATING,
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.ERROR],
fields.SnapshotStatus.DELETING:
[fields.SnapshotStatus.DELETING,
fields.SnapshotStatus.ERROR_DELETING]}
current_snapshot = objects.Snapshot.get_by_id(context, id)
if current_snapshot.status not in status_map:
msg = _("Snapshot status %(cur)s not allowed for "
"update_snapshot_status") % {
'cur': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
if status not in status_map[current_snapshot.status]:
msg = _("Provided snapshot status %(provided)s not allowed for "
"snapshot with status %(current)s.") % \
{'provided': status,
'current': current_snapshot.status}
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict = {'id': id,
'status': status}
progress = body['os-update_snapshot_status'].get('progress', None)
if progress:
# This is expected to be a string like '73%'
msg = _('progress must be an integer percentage')
try:
integer = int(progress[:-1])
except ValueError:
raise webob.exc.HTTPBadRequest(explanation=msg)
if integer < 0 or integer > 100 or progress[-1] != '%':
raise webob.exc.HTTPBadRequest(explanation=msg)
update_dict.update({'progress': progress})
LOG.info(_LI("Updating snapshot %(id)s with info %(dict)s"),
{'id': id, 'dict': update_dict})
current_snapshot.update(update_dict)
current_snapshot.save()
return webob.Response(status_int=http_client.ACCEPTED)
class Snapshot_actions(extensions.ExtensionDescriptor):
"""Enable snapshot manager actions."""
name = "SnapshotActions"
alias = "os-snapshot-actions"
updated = "2013-07-16T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotActionsController()
extension = extensions.ControllerExtension(self,
'snapshots',
controller)
return [extension]
|
ge0rgi/cinder
|
cinder/api/contrib/snapshot_actions.py
|
Python
|
apache-2.0
| 4,496
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/style_variable_generator/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
WHITELIST = [r'.+_test.py$']
def CheckChangeOnUpload(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
def CheckChangeOnCommit(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
|
endlessm/chromium-browser
|
tools/style_variable_generator/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 746
|
# This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import numpy as np
from collections import defaultdict
from math import sqrt
import bmesh
import mathutils
from mathutils.bvhtree import BVHTree
from sverchok.data_structure import repeat_last_for_length
from sverchok.utils.sv_mesh_utils import polygons_to_edges
from sverchok.utils.sv_bmesh_utils import pydata_from_bmesh, bmesh_from_pydata
from sverchok.utils.geom import center, linear_approximation
NONE = 'NONE'
BVH = 'BVH'
LINEAR = 'LINEAR'
NORMAL = 'NORMAL'
MINIMUM = 'MIN'
MAXIMUM = 'MAX'
AVERAGE = 'MEAN'
def mask_axes(src_vert, dst_vert, axes):
if axes == {0,1,2}:
return dst_vert
result = []
for axis in range(3):
if axis in axes:
result.append(dst_vert[axis])
else:
result.append(src_vert[axis])
return result
def map_mask_axes(src_verts, dst_verts, axes):
if axes == {0,1,2}:
return dst_verts
result = np.asarray(src_verts).copy()
dst = np.asarray(dst_verts)
for i in range(3):
if i in axes:
result[:,i] = dst[:,i]
return result.tolist()
def lloyd_relax(vertices, faces, iterations, mask=None, method=NORMAL, skip_boundary=True, use_axes={0,1,2}):
"""
supported shape preservation methods: NONE, NORMAL, LINEAR, BVH
"""
def do_iteration(bvh, bm):
verts_out = []
face_centers = np.array([face.calc_center_median() for face in bm.faces])
for bm_vert in bm.verts:
co = bm_vert.co
if (skip_boundary and bm_vert.is_boundary) or (mask is not None and not mask[bm_vert.index]):
new_vert = tuple(co)
else:
normal = bm_vert.normal
cs = np.array([face_centers[face.index] for face in bm_vert.link_faces])
if method == NONE:
new_vert = cs.mean(axis=0)
elif method == NORMAL:
median = mathutils.Vector(cs.mean(axis=0))
dv = median - co
dv = dv - dv.project(normal)
new_vert = co + dv
elif method == LINEAR:
approx = linear_approximation(cs)
median = mathutils.Vector(approx.center)
plane = approx.most_similar_plane()
dist = plane.distance_to_point(bm_vert.co)
new_vert = median + plane.normal.normalized() * dist
elif method == BVH:
median = mathutils.Vector(cs.mean(axis=0))
new_vert, normal, idx, dist = bvh.find_nearest(median)
else:
raise Exception("Unsupported volume preservation method")
new_vert = tuple(new_vert)
new_vert = mask_axes(tuple(co), new_vert, use_axes)
verts_out.append(new_vert)
return verts_out
if mask is not None:
mask = repeat_last_for_length(mask, len(vertices))
bvh = BVHTree.FromPolygons(vertices, faces)
for i in range(iterations):
bm = bmesh_from_pydata(vertices, [], faces, normal_update=True)
vertices = do_iteration(bvh, bm)
bm.free()
return vertices
def edges_relax(vertices, edges, faces, iterations, k, mask=None, method=NONE, target=AVERAGE, skip_boundary=True, use_axes={0,1,2}):
"""
supported shape preservation methods: NONE, NORMAL, BVH
"""
def do_iteration(bvh, bm, verts):
verts = np.asarray(verts)
v1s = verts[edges[:,0]]
v2s = verts[edges[:,1]]
edge_vecs = v2s - v1s
edge_lens = np.linalg.norm(edge_vecs, axis=1)
if target == MINIMUM:
target_len = np.min(edge_lens)
elif target == MAXIMUM:
target_len = np.max(edge_lens)
elif target == AVERAGE:
target_len = np.mean(edge_lens)
else:
raise Exception("Unsupported target edge length type")
forces = defaultdict(lambda: np.zeros((3,)))
counts = defaultdict(int)
for edge_idx, (v1_idx, v2_idx) in enumerate(edges):
edge_vec = edge_vecs[edge_idx]
edge_len = edge_lens[edge_idx]
d_len = (edge_len - target_len)/2.0
dv1 = d_len * edge_vec
dv2 = - d_len * edge_vec
forces[v1_idx] += dv1
forces[v2_idx] += dv2
counts[v1_idx] += 1
counts[v2_idx] += 1
target_verts = verts.copy()
for v_idx in range(len(verts)):
if skip_boundary and bm.verts[v_idx].is_boundary:
continue
if mask is not None and not mask[v_idx]:
continue
count = counts[v_idx]
if count:
forces[v_idx] /= count
target_verts[v_idx] += k*forces[v_idx]
if method == NONE:
verts_out = target_verts.tolist()
elif method == NORMAL:
verts_out = []
for bm_vert in bm.verts:
normal = bm_vert.normal
dv = mathutils.Vector(target_verts[bm_vert.index]) - bm_vert.co
dv = dv - dv.project(normal)
new_vert = tuple(bm_vert.co + dv)
verts_out.append(new_vert)
elif method == BVH:
verts_out = []
for vert in target_verts:
new_vert, normal, idx, dist = bvh.find_nearest(vert)
verts_out.append(tuple(new_vert))
else:
raise Exception("Unsupported shape preservation method")
return map_mask_axes(verts, verts_out, use_axes)
if not edges or not edges[0]:
edges = polygons_to_edges([faces], unique_edges=True)[0]
edges = np.array(edges)
if mask is not None:
mask = repeat_last_for_length(mask, len(vertices))
bvh = BVHTree.FromPolygons(vertices, faces)
for i in range(iterations):
bm = bmesh_from_pydata(vertices, edges, faces, normal_update=True)
vertices = do_iteration(bvh, bm, vertices)
bm.free()
return vertices
def faces_relax(vertices, edges, faces, iterations, k, mask=None, method=NONE, target=AVERAGE, skip_boundary=True, use_axes={0,1,2}):
"""
supported shape preservation methods: NONE, NORMAL, BVH
"""
def do_iteration(bvh, bm):
areas = np.array([face.calc_area() for face in bm.faces])
vert_cos = np.array([tuple(vert.co) for vert in bm.verts])
if target == MINIMUM:
target_area = areas.min()
elif target == MAXIMUM:
target_area = areas.max()
elif target == AVERAGE:
target_area = areas.mean()
else:
raise Exception("Unsupported target face area type")
forces = defaultdict(lambda: np.zeros((3,)))
counts = defaultdict(int)
for bm_face in bm.faces:
face_vert_idxs = [vert.index for vert in bm_face.verts]
face_verts = vert_cos[face_vert_idxs]
mean = face_verts.mean(axis=0)
face_verts_0 = face_verts - mean
src_area = areas[bm_face.index]
scale = sqrt(target_area / src_area)
dvs = (scale - 1) * face_verts_0
for vert_idx, dv in zip(face_vert_idxs, dvs):
forces[vert_idx] += dv
counts[vert_idx] += 1
target_verts = vert_cos.copy()
for bm_vert in bm.verts:
idx = bm_vert.index
if skip_boundary and bm_vert.is_boundary:
continue
if mask is not None and not mask[idx]:
continue
count = counts[idx]
if count:
forces[idx] /= count
force = forces[idx]
target_verts[idx] += k*force
if method == NONE:
verts_out = target_verts.tolist()
elif method == NORMAL:
verts_out = []
for bm_vert in bm.verts:
idx = bm_vert.index
dv = mathutils.Vector(target_verts[idx]) - bm_vert.co
normal = bm_vert.normal
dv = dv - dv.project(normal)
new_vert = tuple(bm_vert.co + dv)
verts_out.append(new_vert)
elif method == BVH:
verts_out = []
for bm_vert in bm.verts:
new_vert, normal, idx, dist = bvh.find_nearest(bm_vert.co)
verts_out.append(tuple(new_vert))
else:
raise Exception("Unsupported shape preservation method")
return map_mask_axes(vert_cos, verts_out, use_axes)
if mask is not None:
mask = repeat_last_for_length(mask, len(vertices))
if not edges or not edges[0]:
edges = polygons_to_edges([faces], unique_edges=True)[0]
bvh = BVHTree.FromPolygons(vertices, faces)
for i in range(iterations):
bm = bmesh_from_pydata(vertices, edges, faces, normal_update=True)
vertices = do_iteration(bvh, bm)
bm.free()
return vertices
|
DolphinDream/sverchok
|
utils/relax_mesh.py
|
Python
|
gpl-3.0
| 9,318
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This file is currently hand-coded; I don't have a MESA header file to build
off.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: glxext_mesa.py 1579 2008-01-15 14:47:19Z Alex.Holkner $'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
glXSwapIntervalMESA = _link_function('glXSwapIntervalMESA', c_int, [c_int], 'MESA_swap_control')
|
Codlydodly/python-client
|
venv/lib/python2.7/site-packages/pyglet/gl/glxext_mesa.py
|
Python
|
mit
| 2,103
|
# -*- coding: utf-8 -*-
"""Family module for Wikidata."""
#
# (C) Pywikibot team, 2012-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import config
from pywikibot import family
# The Wikidata family
class Family(family.WikimediaFamily):
"""Family class for Wikidata."""
name = 'wikidata'
test_codes = ('test', 'beta')
langs = {
'wikidata': 'www.wikidata.org',
'test': 'test.wikidata.org',
'beta': 'wikidata.beta.wmflabs.org',
}
interwiki_forward = 'wikipedia'
category_redirect_templates = {
'wikidata': (
'Category redirect',
),
}
# Subpages for documentation.
doc_subpages = {
'_default': (('/doc', ), ['wikidata']),
}
# Disable cosmetic changes
config.cosmetic_changes_disable.update({
'wikidata': ('wikidata', 'test', 'beta')
})
def interface(self, code):
"""Return 'DataSite'."""
return 'DataSite'
def calendarmodel(self, code):
"""Default calendar model for WbTime datatype."""
return 'http://www.wikidata.org/entity/Q1985727'
def shared_geo_shape_repository(self, code):
"""Return Wikimedia Commons as the repository for geo-shapes."""
# Per geoShapeStorageFrontendUrl settings in Wikibase
return ('commons', 'commons')
def shared_tabular_data_repository(self, code):
"""Return Wikimedia Commons as the repository for tabular-datas."""
# Per tabularDataStorageFrontendUrl settings in Wikibase
return ('commons', 'commons')
def default_globe(self, code):
"""Default globe for Coordinate datatype."""
return 'earth'
def globes(self, code):
"""Supported globes for Coordinate datatype."""
return {
'ariel': 'http://www.wikidata.org/entity/Q3343',
'callisto': 'http://www.wikidata.org/entity/Q3134',
'ceres': 'http://www.wikidata.org/entity/Q596',
'deimos': 'http://www.wikidata.org/entity/Q7548',
'dione': 'http://www.wikidata.org/entity/Q15040',
'earth': 'http://www.wikidata.org/entity/Q2',
'enceladus': 'http://www.wikidata.org/entity/Q3303',
'eros': 'http://www.wikidata.org/entity/Q16711',
'europa': 'http://www.wikidata.org/entity/Q3143',
'ganymede': 'http://www.wikidata.org/entity/Q3169',
'gaspra': 'http://www.wikidata.org/entity/Q158244',
'hyperion': 'http://www.wikidata.org/entity/Q15037',
'iapetus': 'http://www.wikidata.org/entity/Q17958',
'io': 'http://www.wikidata.org/entity/Q3123',
'jupiter': 'http://www.wikidata.org/entity/Q319',
'lutetia': 'http://www.wikidata.org/entity/Q107556',
'mars': 'http://www.wikidata.org/entity/Q111',
'mercury': 'http://www.wikidata.org/entity/Q308',
'mimas': 'http://www.wikidata.org/entity/Q15034',
'miranda': 'http://www.wikidata.org/entity/Q3352',
'moon': 'http://www.wikidata.org/entity/Q405',
'oberon': 'http://www.wikidata.org/entity/Q3332',
'phobos': 'http://www.wikidata.org/entity/Q7547',
'phoebe': 'http://www.wikidata.org/entity/Q17975',
'pluto': 'http://www.wikidata.org/entity/Q339',
'rhea': 'http://www.wikidata.org/entity/Q15050',
'steins': 'http://www.wikidata.org/entity/Q150249',
'tethys': 'http://www.wikidata.org/entity/Q15047',
'titan': 'http://www.wikidata.org/entity/Q2565',
'titania': 'http://www.wikidata.org/entity/Q3322',
'triton': 'http://www.wikidata.org/entity/Q3359',
'umbriel': 'http://www.wikidata.org/entity/Q3338',
'venus': 'http://www.wikidata.org/entity/Q313',
'vesta': 'http://www.wikidata.org/entity/Q3030',
}
|
Wesalius/EloBot
|
pywikibot/families/wikidata_family.py
|
Python
|
gpl-3.0
| 3,979
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running dashd with -reindex and -reindex-chainstate options.
- Start a single node and generate 3 blocks.
- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3.
- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until
class ReindexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def reindex(self, justchainstate=False):
self.nodes[0].generate(3)
blockcount = self.nodes[0].getblockcount()
self.stop_nodes()
extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
self.start_nodes(extra_args)
wait_until(lambda: self.nodes[0].getblockcount() == blockcount)
self.log.info("Success")
def run_test(self):
self.reindex(False)
self.reindex(True)
self.reindex(False)
self.reindex(True)
if __name__ == '__main__':
ReindexTest().main()
|
dashpay/dash
|
test/functional/feature_reindex.py
|
Python
|
mit
| 1,372
|
# Copyright 2018 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import random
from oslo_utils import uuidutils
from octavia.common import constants
from octavia.common import data_models
import octavia.tests.unit.base as base
class TestDataModels(base.TestCase):
def setUp(self):
self.LB_ID = uuidutils.generate_uuid()
self.LISTENER_ID = uuidutils.generate_uuid()
self.PROJECT_ID = uuidutils.generate_uuid()
self.SERVER_GROUP_ID = uuidutils.generate_uuid()
self.CREATED_AT = datetime.datetime.now()
self.UPDATED_AT = datetime.datetime.utcnow()
self.VIP_IP = '192.0.2.10'
self.VIP_SUBNET_ID = uuidutils.generate_uuid()
self.VIP_NETWORK_ID = uuidutils.generate_uuid()
self.VIP_PORT_ID = uuidutils.generate_uuid()
self.VIP_QOS_ID = uuidutils.generate_uuid()
self.POOL_ID = uuidutils.generate_uuid()
self.AMP_ID = uuidutils.generate_uuid()
self.COMPUTE_ID = uuidutils.generate_uuid()
self.IMAGE_ID = uuidutils.generate_uuid()
self.COMPUTE_FLAVOR = uuidutils.generate_uuid()
self.LB_obj = data_models.LoadBalancer(
id=self.LB_ID,
project_id=self.PROJECT_ID,
name='test-lb',
description='test-lb-description',
provisioning_status='great',
operating_status='even-better',
enabled=True,
vip=None,
vrrp_group=1,
topology='infinite',
listeners=[],
amphorae=[],
pools=[],
server_group_id=self.SERVER_GROUP_ID,
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT)
self.VIP_obj = data_models.Vip(
load_balancer_id=self.LB_ID,
ip_address=self.VIP_IP,
subnet_id=self.VIP_SUBNET_ID,
network_id=self.VIP_NETWORK_ID,
port_id=self.VIP_PORT_ID,
qos_policy_id=self.VIP_QOS_ID)
self.POOL_obj = data_models.Pool(
id=self.POOL_ID,
project_id=self.PROJECT_ID,
name='test-pool',
description='test-pool-description',
load_balancer_id=self.LB_ID,
load_balancer=None,
protocol='avian',
lb_algorithm='UseAllofThem',
enabled=True,
provisioning_status='great',
operating_status='even-better',
members=[],
health_monitor=None,
session_persistence=None,
listeners=[],
l7policies=[],
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT)
self.SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type='adhesive',
cookie_name='chocolate',
pool=None)
self.AMP_obj = data_models.Amphora(
id=self.AMP_ID,
load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID,
status=constants.ACTIVE,
lb_network_ip=None,
vrrp_ip=None,
ha_ip=None,
vrrp_port_id=None,
ha_port_id=self.VIP_PORT_ID,
load_balancer=self.LB_obj,
role=constants.ROLE_MASTER,
cert_expiration=None,
cert_busy=False,
vrrp_interface=None,
vrrp_id=None,
vrrp_priority=constants.ROLE_MASTER_PRIORITY,
cached_zone=None,
created_at=self.CREATED_AT,
updated_at=self.UPDATED_AT,
image_id=self.IMAGE_ID,
compute_flavor=self.COMPUTE_FLAVOR
)
self.QUOTA_obj = data_models.Quotas(
project_id=self.PROJECT_ID,
load_balancer=None,
listener=None,
pool=None,
health_monitor=None,
member=None,
l7policy=None,
l7rule=None,
in_use_health_monitor=None,
in_use_listener=None,
in_use_load_balancer=None,
in_use_member=None,
in_use_pool=None,
in_use_l7policy=None,
in_use_l7rule=None
)
super().setUp()
def test_LoadBalancer_update(self):
new_id = uuidutils.generate_uuid()
new_project_id = uuidutils.generate_uuid()
new_server_group_id = uuidutils.generate_uuid()
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
new_name = 'new-test-lb'
new_description = 'new-test-lb-description'
new_provisioning_status = 'new-great'
new_operating_status = 'new-even-better'
new_enabled = False
new_vrrp_group = 2
new_topology = 'new-infinite'
reference_LB_obj = data_models.LoadBalancer(
id=new_id,
project_id=new_project_id,
name=new_name,
description=new_description,
provisioning_status=new_provisioning_status,
operating_status=new_operating_status,
enabled=new_enabled,
vip=None,
vrrp_group=new_vrrp_group,
topology=new_topology,
listeners=[],
amphorae=[],
pools=[],
server_group_id=new_server_group_id,
created_at=new_created_at,
updated_at=new_updated_at)
update_dict = {
'id': new_id,
'project_id': new_project_id,
'name': new_name,
'description': new_description,
'provisioning_status': new_provisioning_status,
'operating_status': new_operating_status,
'enabled': new_enabled,
'vrrp_group': new_vrrp_group,
'topology': new_topology,
'server_group_id': new_server_group_id,
'created_at': new_created_at,
'updated_at': new_updated_at
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_LB_obj, test_LB_obj)
def test_LoadBalancer_update_add_vip(self):
new_ip = '192.0.2.44'
new_subnet_id = uuidutils.generate_uuid()
new_network_id = uuidutils.generate_uuid()
new_port_id = uuidutils.generate_uuid()
new_qos_id = uuidutils.generate_uuid()
reference_VIP_obj = data_models.Vip(
load_balancer_id=self.LB_ID,
ip_address=new_ip,
subnet_id=new_subnet_id,
network_id=new_network_id,
port_id=new_port_id,
load_balancer=None,
qos_policy_id=new_qos_id
)
update_dict = {
'vip': {
'ip_address': new_ip,
'subnet_id': new_subnet_id,
'network_id': new_network_id,
'port_id': new_port_id,
'load_balancer': None,
'qos_policy_id': new_qos_id
}
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_VIP_obj, test_LB_obj.vip)
def test_LoadBalancer_update_vip_update(self):
new_id = uuidutils.generate_uuid()
new_ip = '192.0.2.44'
new_subnet_id = uuidutils.generate_uuid()
new_network_id = uuidutils.generate_uuid()
new_port_id = uuidutils.generate_uuid()
new_qos_id = uuidutils.generate_uuid()
reference_VIP_obj = data_models.Vip(
load_balancer_id=new_id,
ip_address=new_ip,
subnet_id=new_subnet_id,
network_id=new_network_id,
port_id=new_port_id,
qos_policy_id=new_qos_id
)
update_dict = {
'vip': {
'load_balancer_id': new_id,
'ip_address': new_ip,
'subnet_id': new_subnet_id,
'network_id': new_network_id,
'port_id': new_port_id,
'qos_policy_id': new_qos_id
}
}
test_LB_obj = copy.deepcopy(self.LB_obj)
test_LB_obj.vip = copy.deepcopy(self.VIP_obj)
test_LB_obj.update(update_dict)
self.assertEqual(reference_VIP_obj, test_LB_obj.vip)
def test_Pool_update(self):
new_id = uuidutils.generate_uuid()
new_project_id = uuidutils.generate_uuid()
new_name = 'new-test-pool'
new_description = 'new-test-pool-description'
new_lb_id = uuidutils.generate_uuid()
new_protocol = 'sneaker'
new_lb_algorithm = 'JustOne'
new_enabled = False
new_provisioning_status = 'new-great'
new_operating_status = 'new-even-better'
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
reference_Pool_obj = data_models.Pool(
id=new_id,
project_id=new_project_id,
name=new_name,
description=new_description,
load_balancer_id=new_lb_id,
protocol=new_protocol,
lb_algorithm=new_lb_algorithm,
enabled=new_enabled,
provisioning_status=new_provisioning_status,
operating_status=new_operating_status,
members=[],
health_monitor=None,
session_persistence=None,
listeners=[],
l7policies=[],
created_at=new_created_at,
updated_at=new_updated_at)
update_dict = {
'id': new_id,
'project_id': new_project_id,
'name': new_name,
'description': new_description,
'load_balancer_id': new_lb_id,
'protocol': new_protocol,
'lb_algorithm': new_lb_algorithm,
'enabled': new_enabled,
'provisioning_status': new_provisioning_status,
'operating_status': new_operating_status,
'created_at': new_created_at,
'updated_at': new_updated_at}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.update(update_dict)
self.assertEqual(reference_Pool_obj, test_Pool_obj)
def test_Pool_update_add_SP(self):
new_type = 'glue'
new_cookie_name = 'chip'
reference_SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type=new_type,
cookie_name=new_cookie_name,
pool=None)
update_dict = {
'session_persistence': {
'type': new_type,
'cookie_name': new_cookie_name
}
}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.update(update_dict)
self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence)
def test_Pool_update_delete_SP(self):
update_dict = {'session_persistence': {}}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj)
test_Pool_obj.session_persistence.pool = test_Pool_obj
test_Pool_obj.update(update_dict)
self.assertIsNone(test_Pool_obj.session_persistence)
def test_Pool_update_SP_update(self):
new_type = 'glue'
new_cookie_name = 'chip'
update_dict = {
'session_persistence': {
'type': new_type,
'cookie_name': new_cookie_name
}
}
test_Pool_obj = copy.deepcopy(self.POOL_obj)
reference_SP_obj = data_models.SessionPersistence(
pool_id=self.POOL_ID,
type=new_type,
cookie_name=new_cookie_name,
pool=test_Pool_obj)
test_Pool_obj.session_persistence = copy.deepcopy(self.SP_obj)
test_Pool_obj.session_persistence.pool = test_Pool_obj
test_Pool_obj.update(update_dict)
self.assertEqual(reference_SP_obj, test_Pool_obj.session_persistence)
def test_Amphora_update(self):
new_id = uuidutils.generate_uuid()
new_status = constants.ERROR
new_role = constants.ROLE_BACKUP
new_vrrp_priority = constants.ROLE_BACKUP_PRIORITY
new_created_at = self.CREATED_AT + datetime.timedelta(minutes=5)
new_updated_at = self.UPDATED_AT + datetime.timedelta(minutes=10)
new_image_id = uuidutils.generate_uuid()
new_compute_flavor = uuidutils.generate_uuid()
update_dict = {
'id': new_id,
'status': new_status,
'role': new_role,
'vrrp_priority': new_vrrp_priority,
'created_at': new_created_at,
'updated_at': new_updated_at,
'image_id': new_image_id,
'compute_flavor': new_compute_flavor
}
test_Amp_obj = copy.deepcopy(self.AMP_obj)
reference_Amp_obj = data_models.Amphora(
id=new_id,
load_balancer_id=self.LB_ID,
compute_id=self.COMPUTE_ID,
status=new_status,
lb_network_ip=None,
vrrp_ip=None,
ha_ip=None,
vrrp_port_id=None,
ha_port_id=self.VIP_PORT_ID,
load_balancer=self.LB_obj,
role=new_role,
cert_expiration=None,
cert_busy=False,
vrrp_interface=None,
vrrp_id=None,
vrrp_priority=constants.ROLE_BACKUP_PRIORITY,
cached_zone=None,
created_at=new_created_at,
updated_at=new_updated_at,
image_id=new_image_id,
compute_flavor=new_compute_flavor
)
test_Amp_obj.update(update_dict)
self.assertEqual(reference_Amp_obj, test_Amp_obj)
def test_Quota_update(self):
new_loadbalancer_quota = 10
new_listener_quota = 11
new_pool_quota = 12
new_healthmonitor_quota = 13
new_member_quota = 14
new_l7policy_quota = 15
new_l7rule_quota = 16
update_dict = {
'load_balancer': new_loadbalancer_quota,
'listener': new_listener_quota,
'pool': new_pool_quota,
'health_monitor': new_healthmonitor_quota,
'member': new_member_quota,
'l7policy': new_l7policy_quota,
'l7rule': new_l7rule_quota
}
test_Quota_obj = copy.deepcopy(self.QUOTA_obj)
reference_Quota_obj = data_models.Quotas(
project_id=self.PROJECT_ID,
load_balancer=new_loadbalancer_quota,
listener=new_listener_quota,
pool=new_pool_quota,
health_monitor=new_healthmonitor_quota,
member=new_member_quota,
l7policy=new_l7policy_quota,
l7rule=new_l7rule_quota,
in_use_health_monitor=None,
in_use_listener=None,
in_use_load_balancer=None,
in_use_member=None,
in_use_pool=None,
in_use_l7policy=None,
in_use_l7rule=None
)
test_Quota_obj.update(update_dict)
self.assertEqual(reference_Quota_obj, test_Quota_obj)
def test_ListenerStatistics_iadd(self):
# test incrementing add function
bytes_in1 = random.randrange(1000000000)
bytes_out1 = random.randrange(1000000000)
active_conns1 = random.randrange(1000000000)
total_conns1 = random.randrange(1000000000)
request_errors1 = random.randrange(1000000000)
stats_1 = data_models.ListenerStatistics(
listener_id=self.LISTENER_ID,
amphora_id=self.AMP_ID,
bytes_in=bytes_in1,
bytes_out=bytes_out1,
active_connections=active_conns1,
total_connections=total_conns1,
request_errors=request_errors1
)
bytes_in2 = random.randrange(1000000000)
bytes_out2 = random.randrange(1000000000)
active_conns2 = random.randrange(1000000000)
total_conns2 = random.randrange(1000000000)
request_errors2 = random.randrange(1000000000)
stats_2 = data_models.ListenerStatistics(
listener_id="listener 2",
amphora_id="amphora 2",
bytes_in=bytes_in2,
bytes_out=bytes_out2,
active_connections=active_conns2,
total_connections=total_conns2,
request_errors=request_errors2
)
# test successful +=
stats_1 += stats_2
# not a delta, so it won't be incremented
self.assertEqual(stats_1.active_connections, active_conns1)
self.assertEqual(stats_1.listener_id, self.LISTENER_ID)
self.assertEqual(stats_1.amphora_id, self.AMP_ID)
# deltas will be incremented
self.assertEqual(stats_1.bytes_in, bytes_in1 + bytes_in2)
self.assertEqual(stats_1.bytes_out, bytes_out1 + bytes_out2)
self.assertEqual(stats_1.total_connections,
total_conns1 + total_conns2)
self.assertEqual(stats_1.request_errors,
request_errors1 + request_errors2)
# test incrementing an incompatible object
self.assertRaises(TypeError, stats_1.__iadd__, "boom")
|
openstack/octavia
|
octavia/tests/unit/common/test_data_models.py
|
Python
|
apache-2.0
| 17,864
|
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
# ---------------------------------------------------------------------------------------------------------- #
# A script using K-Means Clustering to classify handwritten digits. #
# Written by @tobinatore #
# #
# This script uses the following dataset: #
# Sklearn's own written digits dataset #
# ---------------------------------------------------------------------------------------------------------- #
def bench_k_means(estimator, name, data):
estimator.fit(data)
# A short explanation for every score:
# homogeneity: each cluster contains only members of a single class (range 0 - 1)
# completeness: all members of a given class are assigned to the same cluster (range 0 - 1)
# v_measure: harmonic mean of homogeneity and completeness
# adjusted_rand: similarity of the actual values and their predictions,
# ignoring permutations and with chance normalization
# (range -1 to 1, -1 being bad, 1 being perfect and 0 being random)
# adjusted_mutual_info: agreement of the actual values and predictions, ignoring permutations
# (range 0 - 1, with 0 being random agreement and 1 being perfect agreement)
# silhouette: uses the mean distance between a sample and all other points in the same class,
# as well as the mean distance between a sample and all other points in the nearest cluster
# to calculate a score (range: -1 to 1, with the former being incorrect,
# and the latter standing for highly dense clustering.
# 0 indicates overlapping clusters.
print('%-9s \t%i \thomogeneity: %.3f \tcompleteness: %.3f \tv-measure: %.3f \tadjusted-rand: %.3f \t'
'adjusted-mutual-info: %.3f \tsilhouette: %.3f'
% (name, estimator.inertia_,
metrics.homogeneity_score(y, estimator.labels_),
metrics.completeness_score(y, estimator.labels_),
metrics.v_measure_score(y, estimator.labels_),
metrics.adjusted_rand_score(y, estimator.labels_),
metrics.adjusted_mutual_info_score(y, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean')))
def plot(kmeans, data):
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .01 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
# Loading and preparing the data
digits = load_digits()
data = scale(digits.data)
y = digits.target
# Number of clusters
k = len(np.unique(y))
samples, features = data.shape
# Defining the classifier
classifier = KMeans(n_clusters=k, init='k-means++', n_init=10, max_iter=300)
# Computing the score of this classifier
bench_k_means(classifier, "kmeans++", data)
plot(classifier, data)
|
HoussemCharf/FunUtils
|
machine learning/k_means_clustering.py
|
Python
|
mit
| 4,867
|
#!/usr/bin/python
# Begin sequences.py
#---------------------------------------------------------------------------
"""
Originally written by Jesse Bloom, 2004.
Updated by Zach Sailer, 2017."""
#---------------------------------------------------------------------------
import random, shelve, os
#---------------------------------------------------------------------------
class SequenceError(Exception):
"""Error with a lattice protein sequence."""
pass
#---------------------------------------------------------------------------
# codes for all residues
_residues = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P',
'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
assert len(_residues) == 20
def hamming_distance(seq1, seq2):
"""Returns the Hamming distance between two sequences."""
if len(seq1) != len(seq2):
raise SequenceError("Sequences differ in length.")
d = 0
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
d += 1
return d
def find_differences(s1, s2):
"""Return the index of differences between two sequences."""
indices = list()
for i in range(len(s1)):
if s1[i] != s2[i]:
indices.append(i)
return indices
def random_sequence(length):
"""Returns a random sequence of the specified length."""
if not (isinstance(length, int) and length > 0):
raise SequenceError("Invalid sequence length of %r." % length)
s = [random.choice(_residues) for i in range(length)]
return s
def mutate_sequence(seq, mutrate):
"""Mutates a protein sequence.
Parameters
----------
seq :
is a protein sequence, specified as either a string or a list.
mutrate :
Mutates each residue in 'seq' to some different residue with
probability 'mutrate'. So 'mutrate' is the per residue
mutation rate.
Returns
-------
newseq :
the new sequence as a list."""
mutated = False
for ires in range(len(seq)):
if random.random() < mutrate:
if not mutated:
mutated = True
newseq = list(seq)
newres = random.choice(_residues)
while newres == seq[ires]:
newres = random.choice(_residues)
newseq[ires] = newres
if mutated:
return newseq
else:
return seq
def n_mutants(seq, nmutations, nsequences):
"""Returns sequences with a specified number of mutations.
Parameters
----------
seq :
is a string or list specifying the protein we wish to mutate.
nmutations :
is the number of mutations each mutant of 'seq' should
have. It must be <= 'len(seq)' and > 0.
nsequences :
is the number of mutant sequences to make. It can be
'ALL', in which case we make all possible mutants with 'nmutations',
or it can be some positive integer in which case we make this
many randomly chosen mutants with 'nmutations' mutations.
'ALL' is only a valid option only when 'nmutations' is 1 or 2.
Return
------
seqlist : list
List of mutant sequences n mutations away.
"""
if not (0 < nmutations <= len(seq)):
raise SequenceError("Invalid 'nmutations' of %r." % nmutations)
seqlist = []
if nsequences == 'ALL':
if nmutations == 1:
for ires in range(len(seq)):
for mutres in _residues:
if mutres != seq[ires]:
newseq = list(seq)
newseq[ires] = mutres
seqlist.append(newseq)
elif nmutations == 2:
for ires in range(len(seq)):
for imutres in _residues:
if imutres != seq[ires]:
for jres in range(ires + 1, len(seq)):
for jmutres in _residues:
if jmutres != seq[jres]:
newseq = list(seq)
newseq[ires] = imutres
newseq[jres] = jmutres
seqlist.append(newseq)
else:
raise SequenceError("'nsequences' cannot be 'ALL' when 'nmutations' is %r." % nmutations)
elif isinstance(nsequences, int) and nsequences > 0:
for imutant in range(nsequences):
newseq = list(seq)
for imut in range(nmutations):
ires = random.choice(range(len(seq)))
while newseq[ires] != seq[ires]:
ires = random.choice(range(len(seq)))
mutres = random.choice(_residues)
while mutres == seq[ires]:
mutres = random.choice(_residues)
newseq[ires] = mutres
seqlist.append(newseq)
else:
raise SequenceError("Invalid 'nsequences' of %r." % nsequences)
return seqlist
|
Zsailer/latticeproteins
|
latticeproteins/sequences.py
|
Python
|
gpl-3.0
| 4,953
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 17 18:39:09 2017
@author: rstreet
"""
from . import query_db
from . import observation_classes
from . import utilities
from . import rome_fields_dict
from . import observing_tools
import copy
from . import obs_conditions_tolerances
def build_rea_obs(script_config,log=None,tap_list=None):
"""Function to define the observations to be taken for the REA
microlensing program, based on the targets extracted from the database
as recommended by TAP"""
if tap_list == None:
tap_list = query_db.get_tap_list(log=log)
(default_obs_sequence, tolerances) = rea_obs_sequence()
rea_obs = []
for s in range(0,len(default_obs_sequence['sites']),1):
site_code = default_obs_sequence['sites'][s]
(site_obs_sequence, tolerances) = rea_obs_sequence(site_code)
if log != None:
log.info('Building observation requests for site '+site_code+ ':')
for target in tap_list:
if 'Outside' not in str(target.field):
rome_field=rome_fields_dict.field_dict[str(target.field)]
(site_obs_sequence, tolerances) = rea_obs_sequence(site_code)
site_obs_sequence['filters'] = [ str(target.passband) ]
(ts_submit, ts_expire) = observation_classes.get_obs_dates(site_obs_sequence['TTL_'+str(target.priority)+'_days'])
if log!=None:
log.info('Site observing sequence: '+repr(site_obs_sequence))
target_obs_sequence = observing_tools.review_filters_for_observing_conditions(site_obs_sequence,rome_field,
ts_submit, ts_expire, tolerances,
log=log)
if log!=None:
log.info('Target observing sequence: '+repr(target_obs_sequence))
if len(target_obs_sequence['filters']) > 0:
obs = observation_classes.ObsRequest()
obs.name = str(target.field)
obs.ra = rome_field[2]
obs.dec = rome_field[3]
obs.site = target_obs_sequence['sites'][0]
obs.observatory= target_obs_sequence['domes'][0]
obs.tel = target_obs_sequence['tels'][0]
obs.instrument = target_obs_sequence['instruments'][0]
obs.instrument_class = '1M0-SCICAM-SINISTRO'
obs.set_aperture_class()
obs.moon_sep_min = target_obs_sequence['moon_sep_min']
obs.filters = [ str(target.passband) ]
obs.exposure_times = [ float(target.texp) ]
obs.exposure_counts = [ int(target.nexp) ]
obs.cadence = float(target.tsamp)
obs.jitter = target_obs_sequence['jitter_hrs']
obs.priority = float(target.ipp)
obs.ttl = target_obs_sequence['TTL_'+str(target.priority)+'_days']
obs.user_id = script_config['user_id']
obs.proposal_id = script_config['proposal_id']
obs.token = script_config['token']
obs.focus_offset = [ 0.0 ]
#obs.request_type = str(target.priority)
obs.request_type = 'M'
obs.req_origin = 'obscontrol'
obs.get_group_id()
rea_obs.append(obs)
if log != None:
log.info(obs.summary())
else:
if log != None:
log.info('WARNING: No observations possible')
else:
if log != None:
log.info('WARNING: TAP selected a target outside the ROME fields')
if log != None:
log.info('\n')
return rea_obs
def rea_obs_sequence(site_code=None):
"""Function to define the observation sequence to be taken for all ROME
survey pointings"""
obs_sequence = {
'filters': [ [ 'SDSS-i'],[ 'SDSS-i'],[ 'SDSS-i'] ],
'defocus': [ 0.0, 0.0, 0.0 ],
'sites': ['lsc', 'cpt', 'coj'],
'domes': ['domc', 'domb', 'domb'],
'tels': [ '1m0', '1m0', '1m0' ],
'instruments': ['fa03', 'fa14', 'fa11'],
'moon_sep_min': [ 30.0, 30.0, 30.0 ],
'TTL_N_days': 1.0,
'TTL_A_days': 0.5,
'TTL_L_days': 1.0,
'TTL_B_days': 2.0,
'jitter_hrs': 1.0,
}
tolerances = obs_conditions_tolerances.get_obs_tolerances()
if site_code != None:
s = obs_sequence['sites'].index(site_code)
site_obs_sequence = {}
for key, value in obs_sequence.items():
if type(value) == type([]):
if type(value[s]) == type([]):
site_obs_sequence[key] = value[s]
else:
site_obs_sequence[key] = [ value[s] ]
else:
site_obs_sequence[key] = value
return site_obs_sequence, tolerances
else:
return obs_sequence, tolerances
def get_rea_tsamp(priority):
"""Function to return the sampling rate in decimal hours defined by
the REA strategy, for a given priority.
"""
tsamp_strategy = { 'A': 0.25, # REA High
'L': 1.0, # REA Low
'B': 1.0, # REA Post-High
'N': 0.0, # None
}
if priority in tsamp_strategy.keys():
return tsamp_strategy[priority]
else:
return 0.0
def build_rea_hi_request(script_config, field, exptime, t_sample, log=None):
"""Function to compose a triggered observation in REA-HI mode for a
specific field"""
(default_obs_sequence, tolerances) = rea_obs_sequence()
if log != None:
log.info(' -> Received default_obs_sequence')
rea_obs = []
for s in range(0,len(default_obs_sequence['sites']),1):
site_code = default_obs_sequence['sites'][s]
if log != None:
log.info(' -> Preparing observations for site '+site_code)
(site_obs_sequence, tolerances) = rea_obs_sequence(site_code)
site_obs_sequence['filters'] = [ 'SDSS-i' ]
(ts_submit, ts_expire) = observation_classes.get_obs_dates(1.0)
rome_field=rome_fields_dict.field_dict[field.name]
target_obs_sequence = observing_tools.review_filters_for_observing_conditions(site_obs_sequence,rome_field,
ts_submit, ts_expire, tolerances,
log=log)
if log != None:
log.info(' -> Derived target_obs_sequence')
obs = observation_classes.ObsRequest()
obs.name = str(field.name)
obs.ra = field.field_ra
obs.dec = field.field_dec
obs.site = target_obs_sequence['sites'][0]
obs.observatory= target_obs_sequence['domes'][0]
obs.tel = target_obs_sequence['tels'][0]
obs.instrument = target_obs_sequence['instruments'][0]
obs.instrument_class = '1M0-SCICAM-SINISTRO'
obs.set_aperture_class()
obs.moon_sep_min = target_obs_sequence['moon_sep_min']
obs.filters = [ 'SDSS-i' ]
obs.exposure_times = [ exptime ]
obs.exposure_counts = [ 1 ]
obs.cadence = float(t_sample)
obs.jitter = target_obs_sequence['jitter_hrs']
obs.priority = 1.1
obs.ttl = 1.0
obs.user_id = script_config['user_id']
obs.proposal_id = script_config['proposal_id']
obs.token = script_config['token']
obs.focus_offset = [ 0.0 ]
#obs.request_type = str(target.priority)
obs.request_type = 'A'
obs.req_origin = 'www'
obs.get_group_id()
rea_obs.append(obs)
if log != None:
log.info(' -> Completed obs request for site '+site_code)
return rea_obs
|
ytsapras/robonet_site
|
scripts/rea_obs.py
|
Python
|
gpl-2.0
| 8,387
|
#!/usr/bin/env python
"""Imaging library"""
from setuptools import find_packages, setup
setup(name = 'imaginglib',
version = '0.1',
description = "Imaging Library",
long_description = "For resizing, cropping, rotating and converting images",
platforms = ["Linux"],
author="Kumari Shalini",
author_email="shaliniroy012@gmail.com",
url="https://shaliniroy012.wordpress.com/",
license = "MIT",
packages=find_packages()
)
|
shiminsh/imaginglib
|
setup.py
|
Python
|
mit
| 456
|
import unittest
import string
import base64
import random
from sneakers.encoders.b64 import B64
class TestB64(unittest.TestCase):
def setUp(self):
self.randText = ''.join([random.choice(string.letters) for i in range(10)])
self.b64 = B64()
def test_encode(self):
encoded = self.b64.encode(self.randText)
self.assertEqual(encoded, base64.b64encode(self.randText))
def test_decode(self):
encoded = base64.b64encode(self.randText)
decoded = self.b64.decode(encoded)
self.assertEqual(decoded, self.randText)
|
davinerd/sneaky-creeper
|
sneakers/tests/test_encoders/test_b64.py
|
Python
|
mit
| 578
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Ido Abramovich <ido.deluge@gmail.com>
# Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from optparse import make_option
from twisted.internet import defer
import deluge.component as component
from deluge.ui.client import client
from deluge.ui.console.main import BaseCommand
torrent_options = {
"max_download_speed": float,
"max_upload_speed": float,
"max_connections": int,
"max_upload_slots": int,
"private": bool,
"prioritize_first_last": bool,
"is_auto_managed": bool,
"stop_at_ratio": bool,
"stop_ratio": float,
"remove_at_ratio": bool,
"move_on_completed": bool,
"move_on_completed_path": str
}
class Command(BaseCommand):
"""Show and manage per-torrent options"""
option_list = BaseCommand.option_list + (
make_option("-s", "--set", action="store", nargs=2, dest="set", help="set value for key"),
)
usage = "Usage: manage <torrent-id> [<key1> [<key2> ...]]\n"\
" manage <torrent-id> --set <key> <value>"
def handle(self, *args, **options):
self.console = component.get("ConsoleUI")
if options['set']:
return self._set_option(*args, **options)
else:
return self._get_option(*args, **options)
def _get_option(self, *args, **options):
def on_torrents_status(status):
for torrentid, data in status.items():
self.console.write('')
if 'name' in data:
self.console.write('{!info!}Name: {!input!}%s' % data.get('name'))
self.console.write('{!info!}ID: {!input!}%s' % torrentid)
for k, v in data.items():
if k != 'name':
self.console.write('{!info!}%s: {!input!}%s' % (k, v))
def on_torrents_status_fail(reason):
self.console.write('{!error!}Failed to get torrent data.')
torrent_ids = []
torrent_ids.extend(self.console.match_torrent(args[0]))
request_options = []
for opt in args[1:]:
if opt not in torrent_options:
self.console.write('{!error!}Unknown torrent option: %s' % opt)
return
request_options.append(opt)
if not request_options:
request_options = [opt for opt in torrent_options.keys()]
request_options.append('name')
d = client.core.get_torrents_status({"id": torrent_ids}, request_options)
d.addCallback(on_torrents_status)
d.addErrback(on_torrents_status_fail)
return d
def _set_option(self, *args, **options):
deferred = defer.Deferred()
torrent_ids = []
torrent_ids.extend(self.console.match_torrent(args[0]))
key = options["set"][0]
val = options["set"][1] + " " .join(args[1:])
if key not in torrent_options:
self.console.write("{!error!}The key '%s' is invalid!" % key)
return
val = torrent_options[key](val)
def on_set_config(result):
self.console.write("{!success!}Torrent option successfully updated.")
deferred.callback(True)
self.console.write("Setting %s to %s for torrents %s.." % (key, val, torrent_ids))
for tid in torrent_ids:
if key == "move_on_completed_path":
client.core.set_torrent_move_completed_path(tid, val).addCallback(on_set_config)
elif key == "move_on_completed":
client.core.set_torrent_move_completed(tid, val).addCallback(on_set_config)
elif key == "is_auto_managed":
client.core.set_torrent_auto_managed(tid, val).addCallback(on_set_config)
elif key == "remove_at_ratio":
client.core.set_torrent_remove_at_ratio(tid, val).addCallback(on_set_config)
elif key == "prioritize_first_last":
client.core.set_torrent_prioritize_first_last(tid, val).addCallback(on_set_config)
else:
client.core.set_torrent_options(torrent_ids, {key: val}).addCallback(on_set_config)
break
return deferred
def complete(self, line):
# We use the ConsoleUI torrent tab complete method
return component.get("ConsoleUI").tab_complete_torrent(line)
|
rkokkelk/Gulliver
|
deluge/ui/console/commands/manage.py
|
Python
|
gpl-3.0
| 4,581
|
import json
from django.http import HttpResponse, JsonResponse
from django.core.mail import send_mail
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.views.generic import CreateView
from django.utils.translation import ugettext as _
import forms
class AjaxableResponseMixin(object):
"""
Mixin to add AJAX support to a form.
Must be used with an object-based FormView (e.g. CreateView)
"""
def form_invalid(self, form):
response = super(AjaxableResponseMixin, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
# We make sure to call the parent's form_valid() method because
# it might do some processing (in the case of CreateView, it will
# call form.save() for example).
response = super(AjaxableResponseMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'pk': self.object.pk,
}
return JsonResponse(data)
else:
return response
class FeedbackFormView(CreateView):
"""
View for creating a new feedback
"""
template_name = "feedback/feedback.html"
form_class = forms.FeedbackForm
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
kwargs = super(FeedbackFormView, self).get_form_kwargs()
if isinstance(self.request.user, object):
kwargs['user'] = self.request.user
if 'data' in kwargs.keys():
post = kwargs['data'].copy()
if isinstance(self.request.user, User):
post['user'] = self.request.user.pk
post['url'] = self.request.get_full_path()
kwargs['data'] = post
if getattr(settings, 'FEEDBACK_FORM_URL', False):
path = settings.FEEDBACK_FORM_URL
else:
path = self.request.get_full_path()
if self.request.META['HTTP_REFERER'] != '%s://%s%s' % (
self.request.is_secure() and 'https' or 'http',
Site.objects.get_current().domain,
path):
kwargs['is_right_url'] = False
return kwargs
def get_success_url(self):
"""
Returns the supplied URL.
"""
return self.request.get_full_path()
def form_valid(self, form):
"""
If the form is valid, save the model.
"""
def send_notification():
"""
Sends feedback notification email
"""
result = {}
data = form.cleaned_data
try:
send_mail(
'Feedback received: {}'.format(data['subject']),
'email: {} \n\n {}'.format(data['email'], data['text']),
settings.SERVER_EMAIL,
[settings.FEEDBACK_EMAIL],
fail_silently=False, )
except:
result = {'error': _('Failed to send email')}
return result
super(FeedbackFormView, self).form_valid(form)
result = {}
if hasattr(settings, 'FEEDBACK_EMAIL'):
result = send_notification()
if self.request.is_ajax():
response = HttpResponse(json.dumps(result))
else:
response = self.render_to_response(result)
return response
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
if self.request.is_ajax():
response = HttpResponse(json.dumps({'errors': form.errors}))
else:
response = super(FeedbackFormView, self).form_invalid(form)
return response
|
eugena/django-mere-feedback
|
feedback/views.py
|
Python
|
bsd-3-clause
| 3,945
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.backend.jvm.targets.benchmark import Benchmark
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.java.util import execute_java
class BenchmarkRun(JvmToolTaskMixin, JvmTask):
_CALIPER_MAIN = 'com.google.caliper.Runner'
@classmethod
def register_options(cls, register):
super(BenchmarkRun, cls).register_options(register)
register('--target', help='Name of the benchmark class. This is a mandatory argument.')
register('--memory', type=bool, help='Enable memory profiling.')
register('--debug', type=bool,
help='Run the benchmark tool with in process debugging.')
cls.register_jvm_tool(register,
'benchmark-tool',
classpath=[
# TODO (Eric Ayers) Caliper is old. Add jmh support?
# The caliper tool is shaded, and so shouldn't interfere with Guava 16.
JarDependency(org='com.google.caliper', name='caliper', rev='0.5-rc1'),
],
classpath_spec='//:benchmark-caliper-0.5',
main=cls._CALIPER_MAIN)
cls.register_jvm_tool(register,
'benchmark-agent',
classpath=[
JarDependency(org='com.google.code.java-allocation-instrumenter',
name='java-allocation-instrumenter',
rev='2.1',
intransitive=True),
],
classpath_spec='//:benchmark-java-allocation-instrumenter-2.1')
@classmethod
def subsystem_dependencies(cls):
return super(BenchmarkRun, cls).subsystem_dependencies() + (DistributionLocator,)
def __init__(self, *args, **kwargs):
super(BenchmarkRun, self).__init__(*args, **kwargs)
# TODO(Steve Gury):
# Find all the target classes from the Benchmark target itself
# https://jira.twitter.biz/browse/AWESOME-1938
if not self.get_options().target:
raise ValueError('Mandatory argument --target must be specified.')
self.args.insert(0, self.get_options().target)
if self.get_options().memory:
self.args.append('--measureMemory')
if self.get_options().debug:
self.args.append('--debug')
def _is_benchmark(self, target):
return isinstance(target, Benchmark)
def execute(self):
targets = self.context.targets(predicate=self._is_benchmark)
if not targets:
raise TaskError('No jvm targets specified for benchmarking.')
# For rewriting JDK classes to work, the JAR file has to be listed specifically in
# the JAR manifest as something that goes in the bootclasspath.
# The MANIFEST list a jar 'allocation.jar' this is why we have to rename it
agent_tools_classpath = self.tool_classpath('benchmark-agent')
agent_jar = agent_tools_classpath[0]
allocation_jar = os.path.join(os.path.dirname(agent_jar), "allocation.jar")
# TODO(Steve Gury): Find a solution to avoid copying the jar every run and being resilient
# to version upgrade
shutil.copyfile(agent_jar, allocation_jar)
os.environ['ALLOCATION_JAR'] = str(allocation_jar)
benchmark_tools_classpath = self.tool_classpath('benchmark-tool')
# Collect a transitive classpath for the benchmark targets.
classpath = self.classpath(targets, benchmark_tools_classpath)
java_executor = SubprocessExecutor(DistributionLocator.cached())
exit_code = execute_java(classpath=classpath,
main=self._CALIPER_MAIN,
jvm_options=self.jvm_options,
args=self.args,
workunit_factory=self.context.new_workunit,
workunit_name='caliper',
workunit_labels=[WorkUnitLabel.RUN],
executor=java_executor,
create_synthetic_jar=self.synthetic_classpath)
if exit_code != 0:
raise TaskError('java {} ... exited non-zero ({})'.format(self._CALIPER_MAIN, exit_code))
|
landism/pants
|
src/python/pants/backend/jvm/tasks/benchmark_run.py
|
Python
|
apache-2.0
| 4,864
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '0.1.0'
|
kezabelle/django-thadminjones
|
thadminjones/__init__.py
|
Python
|
gpl-2.0
| 69
|
from django import forms
class SearchForm(forms.Form):
params = forms.CharField(
label='Search field',
widget=forms.TextInput(
attrs={
"class": "form-control input-lg",
"autofocus": "true",
"placeholder": "Enter disease or disease area (e.g., cancer and asthma)",
}))
from_year = forms.IntegerField(
initial="1982", max_value=2014, min_value=1455,
widget=forms.TextInput(
attrs={
"class": "form-control input_year",
}))
to_year = forms.IntegerField(
initial="2014", max_value=2014, min_value=1455,
widget=forms.TextInput(
attrs={
"class": "form-control input_year",
}))
|
rflbras/lmtt
|
stats/forms.py
|
Python
|
mit
| 783
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-28 15:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('laboratory', '0020_auto_20170116_1147'),
]
operations = [
migrations.AddField(
model_name='object',
name='imdg_code',
field=models.CharField(blank=True, choices=[('1', 'Explosives'), ('2', 'Gases'), ('3', 'Flammable liquids'), ('4', 'Flammable solids'), ('5', 'Oxidizing substances and organic peroxides'), ('6', 'Toxic and infectious substances'), ('7', 'Radioactive material'), ('8', 'Corrosive substances'), ('9', 'Miscellaneous dangerous substances and articles')], max_length=1, null=True),
),
]
|
solvo/organilab
|
src/laboratory/migrations/0021_object_imdg_code.py
|
Python
|
gpl-3.0
| 792
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## CALL:
### Import modules ...
from ext import *
import sync
## Get handle ...
try : HANDLE = int(sys.argv[1])
except : HANDLE = -1
PBTYPES_LIST = [tl(TAG_DLG_PBTT1), tl(TAG_DLG_PBTT2), tl(TAG_DLG_PBTT3), tl(TAG_DLG_PBTT4), tl(TAG_DLG_PBTTRAN)]
## Player ...
class CPlayer(xbmc.Player):
def __init__(self):
Core = addon.PCORE
if Core == 'Auto' : xbmc.Player.__init__( self, xbmc.PLAYER_CORE_AUTO )
elif Core == 'VideoPlayer' : xbmc.Player.__init__( self, xbmc.PLAYER_CORE_VideoPlayer )
elif Core == 'DVDPlayer' : xbmc.Player.__init__( self, xbmc.PLAYER_CORE_DVDPLAYER )
elif Core == 'MPlayer' : xbmc.Player.__init__( self, xbmc.PLAYER_CORE_MPLAYER )
elif Core == 'Custom' : xbmc.Player.__init__( self, addon.PCOREVAL )
else : xbmc.Player.__init__( self )
def wait_openlink(self, splash):
spath = setLower(splash)
wtime = 0
errc = 0
while True:
if self.isPlaying() and spath != setLower(self.getPlayingFile()) : break
wait(1); wtime += 1
if wtime > addon.DEDLPTIME : return False
return True
def wait_buffering(self):
wait(1)
buflevel = 0
while buflevel < 3:
_oldpos = self.getTime() if self.isPlaying() else 0
while self.isPlaying() and _oldpos == self.getTime(): wait(1)
buflevel += 1
def seek(self, pos):
if addon.WAITBSEEK : wait(addon.WAITBSEEK)
if self.isPlaying() : self.seekTime(pos)
#GUI.seekPlay(pos)
def try_ISP(self, strmurl, currentCont, skipgo):
if not skipgo : GUI.goTarget(strmurl)
wtime = 0
errorn = 0
while True:
if self.isPlaying() : break
if currentCont != LI.getCpath():
if currentCont != Empty : errorn = 1; break
wait(1); wtime += 1
if wtime > addon.DEDLPTIME : errorn = 2; break
if errorn : wait(1); GUI.back();
return errorn
def wait_folder(self, currentcont):
wtime = 0
lopen = False
while True:
if wtime > addon.DEDLPTIME : return 2
wait(1); wtime += 1
if not lopen:
if currentcont == LI.getCpath() : continue
else:
currentcont = LI.getCpath()
lopen = True
if not LI.itemsCount() : return 1
if self.isPlaying() : break
if currentcont != LI.getCpath() : return 2
return 0
def wait_or_stay(self, currentcont):
wtime = 0
lopen = False
while True:
if wtime > addon.DEDLPTIME : return 1
wait(1); wtime += 1
if not lopen:
if currentcont == LI.getCpath() : continue
else:
currentcont = LI.getCpath()
lopen = True
if self.isPlaying() : break
if currentcont != LI.getCpath() : return 1
return 0
## Get playback type (manual) ...
def pbTypeSelector(self, strmurl, url_prefix):
#xbmcplugin.endOfDirectory(HANDLE, True, False, False)
PBTYPES_LIST_L = PBTYPES_LIST + [tl(TAG_DLG_PBTT5)]
autodetect = False
tName = prefixToName(url_prefix)
GUI.dlgOk(tl(TAG_DLG_PBT1), title=tName)
result = GUI.dlgSel(PBTYPES_LIST_L, tl(TAG_DLG_PBT2))
if result == 5:
autodetect = True
GUI.msg(tl(TAG_DLG_PBTAD1), tl(TAG_DLG_PBTAD2))
wait(2)
currentCont = LI.getCpath()
GUI.goTarget(strmurl)
PTYPE = 0
wtime = 0
wplayback = False
while True:
if self.isPlaying() : wplayback = True; break
if currentCont != LI.getCpath():
if currentCont != Empty:
if LI.itemsCount() > 0 : PTYPE = 3; break
else : break
wait(1); wtime += 1
if wtime > 30:
if GUI.dlgYn(tl(TAG_DLG_PBTADTIMEO), title=tName) : wtime = 0
else : break
if not PTYPE:
if not wplayback:
GUI.back()
PTYPE = 1
else : PTYPE = 2
else : GUI.back()
else : PTYPE = result + 1
wait(1)
if PTYPE == 1 : GUI.dlgOk(tl(TAG_DLG_PBTADTCLAS), title=tName)
elif PTYPE == 2 :
if autodetect == False : GUI.goTarget(strmurl)
GUI.dlgOk(tl(TAG_DLG_PBTADTISP), title=tName)
elif PTYPE == 3 : GUI.dlgOk(tl(TAG_DLG_PBTADTFOLD), title=tName)
elif PTYPE == 4 : GUI.dlgOk(tl(TAG_DLG_PBTALT), title=tName)
elif PTYPE == 5 : GUI.dlgOk(tl(TAG_DLG_PBTTRANI), title=tName)
return PTYPE
## Emegrency running ...
def simplerun(strmurl):
listitem = xbmcgui.ListItem (path=strmurl)
listitem.setProperty('IsPlayable', 'true')
xbmcplugin.setResolvedUrl(HANDLE, True, listitem)
GUI.msg(tl(TAG_ERR_DEFEPS))
## Run splash video ...
def runsplash(medinfo, infpar):
splashPath = DOS.join(addon.path, *TAG_PAR_SPLASH_FILE)
splashLI = xbmcgui.ListItem (path=splashPath)
splashLI.setProperty('IsPlayable', 'true')
splashLI.setArt(medinfo.art)
splashLI.setIconImage(medinfo.img)
splashLI.setThumbnailImage(medinfo.img)
splashLI.setInfo('video', infpar)
splashLI.setInfo('video', {'Title':Space})
xbmcplugin.setResolvedUrl(HANDLE, True, splashLI)
del splashLI
return splashPath
## Main function ...
def callSTRM(strmtype, strmurl, strmfile):
## Stop theme ...
GUI.stopPlay()
wait(1)
## Exit if empty URL ...
if not strmurl : GUI.dlgOk(tl(TAG_ERR_DEDLINK)); return
## Get playback main type (0 - predefined) ...
SPBMETHOD = addon.PBMETHOD
if SPBMETHOD == 'Classic only' : PBMETOD = 1
elif SPBMETHOD == 'Alternate only (CORE dependent)' : PBMETOD = 2
else : PBMETOD = 0
## Init ...
PTYPE = 0
player = CPlayer()
prePath = LIB.tvsf if strmtype == str(TAG_TYP_TVS) else LIB.mov
#print (' @@@@@ '+prePath+' :: '+strmfile+' :: '+DOS.join(prePath, strmfile))
## Check playback launching from Kodi library ...
if not DOS.exists(DOS.join(prePath, strmfile)):
## If not from library ...
simplerun(strmurl)
del player
return
else:
## Get strm file ...
strmfileS = DOS.getdir(strmfile)
strmfldrS = DOS.getdir(DOS.gettail(strmfile))
## PreInit values ...
listitem = None
pli = Empty
try : playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
except : playlist = Empty
## Try get media info ...
try:
## Get medinfo ...
medinfo = CMedInfo(strmfldrS, strmfileS, strmtype)
## Init main listitem (playable element) ...
listitem = xbmcgui.ListItem (path=strmurl)
listitem.setProperty('IsPlayable', 'true')
listitem.setArt(medinfo.art)
listitem.setIconImage(medinfo.img)
listitem.setThumbnailImage(medinfo.img)
## PreInit values ...
splashPath = Empty
skipgo = False
keepcontainer = LI.getCpath()
## Transmit information to player ...
if strmtype == str(TAG_TYP_TVS):
infpar = {'Title': medinfo.title, 'Genre': medinfo.genre, 'Year': medinfo.year, 'Rating': medinfo.rating, 'Plot': medinfo.plot,
'Country': medinfo.country, 'tvshowtitle': medinfo.showtitle, 'director': medinfo.director,
'votes': medinfo.votes, 'mpaa': medinfo.mpaa, 'studio': medinfo.studio, 'writer': medinfo.writer, 'season': medinfo.season,
'episode': medinfo.episode, 'originaltitle': medinfo.originaltitle, 'premiered': medinfo.date, 'aired': medinfo.date,
'date': medinfo.date, 'cast': medinfo.cast, 'castandrole': medinfo.castandrole}
else:
infpar = {'Title': medinfo.title, 'Genre': medinfo.genre, 'Year': medinfo.year, 'Rating': medinfo.rating, 'Plot': medinfo.plot,
'Country': medinfo.country, 'director': medinfo.director,
'votes': medinfo.votes, 'mpaa': medinfo.mpaa, 'studio': medinfo.studio, 'writer': medinfo.writer,
'originaltitle': medinfo.originaltitle, 'premiered': medinfo.date, 'aired': medinfo.date,
'date': medinfo.date, 'cast': medinfo.cast, 'castandrole': medinfo.castandrole}
## Clear playlist if movie was running ...
if playlist : playlist.clear()
listitem.setInfo('video', infpar)
## Emergency running ...
except:
simplerun(strmurl)
del player
return
## PLAYBACK TYPES:
url_prefix = getURLPrefix(strmurl)
### Predefined type ...
if PBMETOD == 0:
if url_prefix:
pTable = playersTable(strmurl)
PTYPE = pTable.getPType()
if PTYPE == -1:
splashPath = runsplash(medinfo, infpar)
wait(1); player.stop()
PTYPE = player.pbTypeSelector(strmurl, url_prefix)
pTable.setPType(url_prefix, PTYPE)
if PTYPE == 1 : del listitem, player; return
elif PTYPE == 2 : skipgo = True
else : PTYPE = 1
### Classic type ...
if PBMETOD == 1 or PTYPE == 1:
xbmcplugin.setResolvedUrl(HANDLE, True, listitem)
### ISP type ...
if PTYPE == 2:
if not splashPath:
splashPath = runsplash(medinfo, infpar)
wait(1); player.stop()
wait(1); errorn = player.try_ISP(strmurl, keepcontainer, skipgo)
if errorn:
if errorn == 1 : GUI.dlgOk(tl(TAG_ERR_INCPBTYPE) % (prefixToName(url_prefix) if url_prefix else tl(TAG_DLG_NPDIRL)), title=medinfo.title)
if errorn == 2 : GUI.dlgOk(tl(TAG_ERR_DEDLINK), title=medinfo.title)
del listitem, player
return
### Folder type ...
if PTYPE == 3:
if not splashPath:
splashPath = runsplash(medinfo, infpar)
wait(1); player.stop()
wait(1); GUI.goTarget(strmurl)
### Alternate type ...
if PBMETOD == 2 or PTYPE == 4:
if not splashPath:
splashPath = runsplash(medinfo, infpar)
wait(1); player.stop()
wait(1); player.play(strmurl, listitem)
### ISPSAW
if PTYPE == 5:
pli = getPLI(playlist)
cwnd = xbmcgui.getCurrentWindowId()
if not splashPath:
splashPath = runsplash(medinfo, infpar)
wait(1); player.stop()
xbmc.executebuiltin('ActivateWindow(%s, %s)' % (cwnd, strmurl))
## Wait link opening for classic and alternate types ...
if PTYPE not in [2, 3, 5] and not player.wait_openlink(splashPath):
GUI.dlgOk(tl(TAG_ERR_DEDLINK), title=medinfo.title)
del listitem, player
return
## Wait link opening for folder type ...
elif PTYPE == 3:
errorn = player.wait_folder(keepcontainer)
if errorn:
if errorn == 1:
GUI.dlgOk(tl(TAG_ERR_INCPBTYPE) % (prefixToName(url_prefix) if url_prefix else tl(TAG_DLG_NPDIRL)), title=medinfo.title)
if player.isPlaying() : player.stop()
if errorn == 2 : GUI.dlgOk(tl(TAG_ERR_DEDLINK), title=medinfo.title)
del listitem, player
return
## Auto back to video library ...
elif PTYPE == 5:
errorn = player.wait_or_stay(keepcontainer)
xbmc.executebuiltin('ActivateWindow(%s, %s)' % (cwnd, keepcontainer))
setPLI(playlist, pli)
if errorn:
GUI.dlgOk(tl(TAG_ERR_DEDLINK), title=medinfo.title)
del listitem, player
return
'''
## Forced closing dialogs ...
eodgenMethod = addon.EODGENM
if eodgenMethod == 'Suppression' : wait(3); GUI.dlgOk(tl(TAG_DLG_SUPPRES)); wait(3); GUI.closeDlgs(); wait(3)
'''
## Set focus to player ...
wait(2); GUI.FocusPayer()
## Run Playback Control ...
wtime1 = 0
possleep = True
if addon.PLAYBCONT:
pnTimer = 5
### Resume ...
pbSet = False
fargs = timefromsec(medinfo.pos, TAG_PAR_TIMENUMFORMAT, TAG_PAR_TIMESEP)
if addon.RESDLG and medinfo.pos and not addon.AUTORES:
pbm = GUI.dlgResume([tl(TAG_MNU_SFRBEGIN), tl(TAG_MNU_RFROM) % (fargs[0],fargs[1],fargs[2],fargs[3],fargs[4]), tl(TAG_MNU_CLOSEDLG)], title=medinfo.title)
if pbm == 0 : pbSet = True; medinfo.resetpos()
elif pbm == 1 : pbSet = True
elif pbm == 2 : pbSet = False
pnTimer = 0
if (addon.AUTORES and medinfo.pos) or pbSet : player.seek(medinfo.pos)
### Buffering ...
player.wait_buffering()
### Get plaing file for control ...
if player.isPlaying() : playing_file = player.getPlayingFile()
else : del listitem, player; return
if addon.USENOWPLAY:
PBM = [PBTYPES_LIST[0], PBTYPES_LIST[3]]
inf = tl(TAG_DLG_NPINFO).replace('**', NewLine) % (medinfo.title, medinfo.year, tl(TAG_DLG_NPINFRAT), medinfo.rating, tl(TAG_DLG_NPINFSRC),
prefixToName(url_prefix) if url_prefix else tl(TAG_DLG_NPDIRL),
tl(TAG_DLG_NPINFPBT), PBTYPES_LIST[PTYPE-1] if PTYPE else PBM[PBMETOD-1])
nowPlay(inf, medinfo.img, addon.NOWPLAYTIME, pnTimer, player.isPlaying)
### Keep playback control while playback ...
cp_totime = 0; cp_time = 0; cp_watched = 0
while player.isPlaying():
### Get playback position (skip if skipping by time is ON) ...
if wtime1 >= addon.POSUPD and not possleep:
### Cancel if file name was changed ...
if playing_file != player.getPlayingFile() : break
cp_time = player.getTime()
cp_totime = player.getTotalTime()
cp_watched = medinfo.setpos(cp_time, cp_totime, addon.WPERC)
wtime1 = 0
### Inc. timer ...
wait(1); wtime1 += 1
### Turn off skipping by time ...
if wtime1 > addon.POSSLEEP : possleep = False
## End operations ...
if PTYPE == 5 and playlist and cp_totime-cp_time < addon.POSUPD : player.play(playlist)
wait(1); del listitem, player
if PTYPE == 3:
wait(1)
if keepcontainer != LI.getCpath() : GUI.back()
#if addon.USEWS and addon.ACSSTKN and cp_time : sync.dbxSync().watchSync(upload=True)
if addon.USEWS and addon.ACSSTKN and (cp_time or cp_watched):
dbx = sync.dbxSync()
dbx.putChangesRecord(strmfile, cp_watched, cp_time, cp_totime, True if strmtype == str(TAG_TYP_MOV) else False)
dbx.sendWatchedInfo()
del dbx
#GUI.msg('>> END')
## Playlist restoring ...
def getPLI(playlist):
if not playlist : return Empty
vidItems = LI.vidItems()
return vidItems.getOnlyNexts()
def setPLI(playlist, pli):
if not pli : return
playlist.clear()
for itm in pli:
playlist.add(itm)
def nowPlay(text, img=Empty, showtime=5, pretime=0, stopIf=None):
GUI.Thrd(GUI.dlgNowPlayX, text, img, showtime, pretime, stopIf)
## Arguments Parsing ...
def parseArgs():
try : argv1 = sys.argv[1]
except : argv1 = Empty
argv1 = int(argv1.replace(TAG_PAR_ACTION, Empty)) if argv1 and argv1.startswith(TAG_PAR_ACTION) else Empty
if argv1 : return [argv1]
kwargs = get_params()
if not kwargs : return [TAG_CND_NOACTION]
callSTRM(**kwargs)
return [TAG_CND_PLAY]
## Get add-on params (as link) ...
def get_params():
param=[]
try : paramstring=sys.argv[2]
except : paramstring=Empty
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?#','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&#')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=#')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
## Media info class ...
class CMedInfo:
def __init__(self, fname, fname2, stype):
self.fname = fname
self.fname2 = fname2
self.type = int(stype)
self.id = 0
self.fid = 0
self.w = 0
self.pos = 0
self.path = Empty
self.patheps = Empty
self.title = Empty
self.year = Empty
self.rating = Empty
self.genre = Empty
self.plot = Empty
self.img = Empty
self.season = Empty
self.votes = Empty
self.episode = Empty
self.showtitle = Empty
self.writer = Empty
self.originaltitle = Empty
self.studio = Empty
self.mpaa = Empty
self.director = Empty
self.date = Empty
self.art = Empty
self.cast = []
self.castandrole = []
self.cmd_mov = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "properties": ["file", "thumbnail", "title", "year", "rating", "genre", "plot", "country", "director", "originaltitle", "writer", "studio", "mpaa", "votes", "cast", "art"] }, "id": 1}'
self.cmd_tvs_tvs = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "properties": ["file", "genre", "year", "studio", "mpaa"] }, "id": 1}'
self.cmd_tvs_eps = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": { "properties": ["season", "plot", "file", "rating", "votes", "episode", "showtitle", "writer", "originaltitle", "director", "firstaired", "art", "cast"], "tvshowid":%s }, "id": 1}'
self.cmd_tvs_sea = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetSeasons", "params": { "properties": ["season", "thumbnail"], "tvshowid":%s }, "id": 1}'
self.cmd_mov_w = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": { "properties": ["playcount", "resume"], "movieid":%s }, "id": 1}'
self.cmd_tvs_w = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": { "properties": ["playcount", "resume"], "episodeid": %s }, "id": 1}'
self.cmd_set_mov = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid":%s, "resume": {"position":%s, "total":%s} }, "id": 1}'
self.cmd_set_tvs = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid":%s, "resume": {"position":%s, "total":%s} }, "id": 1}'
self.cmd_set_movw = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetMovieDetails", "params": {"movieid":%s, "playcount": %s }, "id": 1}'
self.cmd_set_tvsw = '{"jsonrpc": "2.0", "method": "VideoLibrary.SetEpisodeDetails", "params": {"episodeid":%s, "playcount": %s }, "id": 1}'
if self.type == TAG_TYP_TVS : self.gettvs()
elif self.type == TAG_TYP_MOV : self.getmov()
def getmov(self):
self.path = DOS.join(LIB.mov, self.fname2)
movList = self.js(self.cmd_mov, 'movies')
for mov in movList:
if DOS.compath ( mov['file'], self.path ) :
self.img = mov['thumbnail']
self.title = mov['title']
self.year = mov['year']
self.rating = mov['rating']
self.genre = mov['genre']
self.plot = mov['plot']
self.fid = mov['movieid']
self.country = mov['country']
self.director = mov['director']
self.originaltitle = mov['originaltitle']
self.writer = mov['writer']
self.studio = mov['studio']
self.mpaa = mov['mpaa']
self.votes = mov['votes']
self.art = mov['art']
cast = mov['cast']
for itm in cast:
self.cast.append(itm['name'])
if itm['role'] : self.castandrole.append(itm['name'] + tl(TAG_TTL_CAST) % (itm['role']))
break
mvd = self.js(self.cmd_mov_w % self.fid, 'moviedetails')
self.w = mvd['playcount']
self.pos = mvd['resume']['position']
def gettvs(self):
self.path = LIB.tvs(self.fname)
self.patheps = DOS.join(self.path, self.fname2)
tvsList = self.js(self.cmd_tvs_tvs, 'tvshows')
for tvs in tvsList:
if DOS.compath ( tvs['file'], self.path ) :
self.id = tvs['tvshowid']
self.genre = tvs['genre']
self.year = tvs['year']
self.country = Empty
self.studio = tvs['studio']
self.mpaa = tvs['mpaa']
break
epsList = self.js(self.cmd_tvs_eps % (self.id), 'episodes')
for eps in epsList:
if DOS.compath ( eps['file'], self.patheps ) :
self.title = eps['label']
self.rating = eps['rating']
self.plot = eps['plot']
self.season = eps['season']
self.fid = eps['episodeid']
self.votes = eps['votes']
self.episode = eps['episode']
self.showtitle = eps['showtitle']
self.writer = eps['writer']
self.originaltitle = eps['originaltitle']
self.director = eps['director']
self.date = eps['firstaired']
self.art = eps['art']
cast = eps['cast']
for itm in cast:
self.cast.append(itm['name'])
if itm['role'] : self.castandrole.append(itm['name'] + tl(TAG_TTL_CAST) % (itm['role']))
break
seaList = self.js(self.cmd_tvs_sea % (str(self.id)), 'seasons')
for sea in seaList:
if sea['season'] == self.season : self.img = sea['thumbnail']
epd = self.js(self.cmd_tvs_w % self.fid, 'episodedetails')
self.w = epd['playcount']
self.pos = epd['resume']['position']
def js(self, cmd, key):
return eval(xbmc.executeJSONRPC(cmd))['result'][key]
def setpos(self, pos, total, wperc=0):
if self.w : return 1
if self.type == TAG_TYP_TVS : cmd = self.cmd_set_tvs; cmd2 = self.cmd_set_tvsw
elif self.type == TAG_TYP_MOV : cmd = self.cmd_set_mov; cmd2 = self.cmd_set_movw
watched = 0
if addon.WCHF and wperc:
if total > 0 and 1.0*pos/total*100 > wperc :
xbmc.executeJSONRPC(cmd2 % (str(self.fid), '1'))
pos = total = 0
watched = 1
#else : xbmc.executeJSONRPC(cmd2 % (str(self.fid), '0'))
self._setpos(cmd, pos, total)
return watched
def _setpos(self, cmd, pos, total):
xbmc.executeJSONRPC(cmd % (str(self.fid), str(pos), str(total)))
def resetpos(self):
self.pos = 0
self.setpos(0, 0, 0)
class playersTable():
def __init__(self, strmurl=Empty):
self._SEP1 = TAG_PAR_TVSPACK_LSEP
self._SEP2 = TAG_PAR_TVSPACK_SSEP + NewLine
self._file_name = TAG_PAR_PTYPETABLE_FILE
self._path = addon.libpath
self.strmurl = strmurl
self.pTable = []
self.loadTable()
def getPType(self):
if not self.strmurl : return -2
if not self.pTable : return -1
for plug, num in self.pTable:
if self.strmurl.startswith(plug) : return int(num)
return -1
def setPType(self, plug, ptype):
newType = True
if self.pTable:
for idx, rec in enumerate(self.pTable):
if rec[0] == plug:
self.pTable[idx][1] = str(ptype)
newType = False
break
if newType : self.pTable.append([plug, str(ptype)])
self.saveTable()
def removePType(self, plug):
if self.pTable:
for idx, rec in enumerate(self.pTable):
if rec[0] == plug:
self.pTable.pop(idx)
break
self.saveTable()
def loadTable(self):
unpack = DOS.file(self._file_name, self._path, fType=FRead)
if unpack == -1 or not unpack : return
self.pTable = [rec.split(self._SEP1) for rec in unpack.split(self._SEP2)]
def saveTable(self):
pack = self._SEP2.join([self._SEP1.join([rec1, rec2]) for rec1, rec2 in self.pTable])
DOS.file(self._file_name, self._path, pack, FWrite)
def getall(self):
if self.pTable : return [rec[0] for rec in self.pTable], [rec[1] for rec in self.pTable]
else : return Empty, Empty
## Get source add-on name (URL prefix) ...
def getURLPrefix(strmurl):
prefix = Empty
PPREFIX = 'plugin://'
if strmurl.startswith(PPREFIX):
tmpURL = strmurl.replace(PPREFIX, Empty)
fidx = tmpURL.find('/')
if fidx > 0 : prefix = PPREFIX + tmpURL[:fidx+1]
return prefix
def prefixToName(url_prefix):
turl = url_prefix
turl = turl.replace('plugin://plugin.', Empty)
turl = turl.replace('video.', Empty)
turl = turl.replace('/', Empty)
turl = turl.replace('.', Space)
turl = setCapAll(turl)
return turl
|
Taifxx/xxtrep
|
context.addtolib/resources/lib/call.py
|
Python
|
gpl-3.0
| 28,359
|
import factory
from django.utils import timezone
from edc_constants.constants import YES, NO
from td_maternal.models import MaternalEligibility
class MaternalEligibilityFactory(factory.DjangoModelFactory):
class Meta:
model = MaternalEligibility
report_datetime = timezone.now()
age_in_years = 26
|
botswana-harvard/tshilo-dikotla
|
td_maternal/tests/factories/maternal_eligibility_factory.py
|
Python
|
gpl-2.0
| 324
|
def test_thisIsAFunction_first_test(self):
# Given: these special conditions
obj = aClass()
parameter1 = "aString"
parameter2 = "anotherString"
# When: this specific thing happens
result = obj.thisIsAFunction(parameter1, parameter2)
# Then: this thing happens
expected = "expected String"
self.assertEqual(expected, result)
def test_thisIsAFunction_testing_this_other_thing(self):
/** Given: this other circumstance,
* and when the thing can't be controlled
*/
obj = aClass()
/** When: thisIsAFunction is called,
* but the main thing was not initialized
*/
result = obj.thisIsAFunction()
/** Then: we are in a pickle, because
* we have no idea if this other thing happened
*/
expected = False
self.assertEqual(expected, result)
def test_thisIsAFunction_third_test (self):
# Given: other one line conditions
obj = aClass()
parameter1 = "aString"
parameter2 = "anotherString"
# When: this setting is not set
result = obj.thisIsAFunction(parameter1, parameter2)
# Then: this other thing happens
expected = "expected String"
self.assertEqual(expected, result)
public function test_thisIsAFunction_php_fct($parameter1) {
// Given: One Line php comment
$obj = aClass();
$parameter1 = "aString";
$parameter2 = "anotherString";
// When: php second comment
$result = $obj->thisIsAFunction($parameter1, $parameter2);
// Then: php thired comment
$expected = "expected String";
$this->assertEqual($expected, $result);
}
|
anconaesselmann/ClassesAndTests
|
classes_and_testsTest/DocumentationFromUnitTestsTestData/TestData.py
|
Python
|
mit
| 1,992
|
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import visibility_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
IeVisibilityTests.webserver = webserver
IeVisibilityTests.driver = webdriver.Ie()
class IeVisibilityTests(visibility_tests.VisibilityTests):
pass
def teardown_module(module):
IeVisibilityTests.driver.quit()
IeVisibilityTests.webserver.stop()
|
gx1997/chrome-loongson
|
third_party/webdriver/python/test/selenium/webdriver/ie/test_ie_visibility_tests.py
|
Python
|
bsd-3-clause
| 1,172
|
'''implements some basic number theory stuff: gcd, euler-phi function, inverts numbers modulo n.
'''
import functools
big_primes = 512927357,674506081
def find_a_factor(n):
for i in xrange(2, n):
if n%i == 0:
return n/i, i
return n, "Prime!"
def invert(a,n):
# multiplicative inverse of a mod n
return get_generator(a,n)[1] % n
def phi(n):
# euler-phi function the number of 0<k<n s.t. gcd(k,n)=1
return sum(gcd(i,n)==1
for i in xrange(1,n))
# This memoize decorator is from the PythonDecoratorLibrary:
# https://wiki.python.org/moin/PythonDecoratorLibrary
#
# It caches the results of coprime. This could lead us into memory
# issues, but probably speeds up the code. (Might no longer be
# relevant with the switch from old_coprime to coprime....)
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
@memoize
def old_coprime(a,b):
for i in xrange(2,min(a,b)+1):
if not a%i and not b%i:
return False
return True
@memoize
def coprime(a,b):
return gcd(a,b)==1
def divide(a,b):
return a/b, a%b
def euclidean_alg(a,b):
out = []
r = 1
while r>0:
q,r = divide(a,b)
out.append((a,q,b,r))
a,b = b,r
out.pop()
return out
def get_generator(a,b):
eqs = euclidean_alg(a,b)
if not eqs:
return 0
greatest_cd = eqs[-1][-1]
eq = set_eq2remainder(eqs.pop())
while eqs:
eq2 = eqs.pop()
eq = resort(eq2,eq)
# assert lin_comb(*eq)==greatest_cd
return eq
def set_eq2remainder(eq):
eq = list(eq)
eq = eq[:3]
eq.insert(1,1)
eq[2]*=-1
return eq
def resort((a,q,b,r),(b1,s,t,r1)):
assert b==b1
assert r==r1
return a,t,(s-t*q),b
def lin_comb(a,b,c,d):
return a*b + c*d
def gcd(*a):
return reduce(gcd_inner, a)
def gcd_inner(a,b):
while b!=0:
t = b
b = a%b
a = t
return a
def run_tests():
assert lin_comb(*get_generator(173,101))==1
assert (invert(101,173)*101)%173==1
import random
for _ in xrange(1000):
a,b = random.randint(1,100),random.randint(1,100)
if a%b==0: continue
assert (invert(a,b)*a)%b == gcd(a,b)
def r(): return random.randint(1,10**4),random.randint(1,10**4)
random.seed(0)
s = sum( coprime(*r()) for _ in xrange(10**4))
random.seed(0)
t = sum(old_coprime(*r()) for _ in xrange(10**4))
assert s==t
if __name__=="__main__":
run_tests()
|
gabegaster/connectedness
|
number_theory.py
|
Python
|
mit
| 2,698
|
#!/usr/bin/python
# --------------------------------------------------------------------------
#
# MIT License
#
# --------------------------------------------------------------------------
import pytest
import pickle
from cybld import cybld_ipc_message
# --------------------------------------------------------------------------
class TestCyBldIpcMessage:
def test_cybld_cmd_exec(self):
sut = cybld_ipc_message.CyBldIpcMessage()
assert(sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.unknown)
assert(sut.codeword is None)
assert(sut.cmd_number is None)
assert(sut.setcmd_param is None)
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.exec_cmd
assert(sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.exec_cmd)
sut.codeword = "mysecret"
assert(sut.codeword == "mysecret")
def test_cybld_cmd_set(self):
sut = cybld_ipc_message.CyBldIpcMessage()
assert(sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.unknown)
assert(sut.codeword is None)
assert(sut.cmd_number is None)
assert(sut.setcmd_param is None)
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.set_cmd
assert(sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.set_cmd)
sut.setcmd_param = "newcmd"
assert(sut.setcmd_param == "newcmd")
sut.codeword = "mysecret"
assert(sut.codeword == "mysecret")
def test_cybld_ipc_message_picke(self):
sut = cybld_ipc_message.CyBldIpcMessage()
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.set_cmd
sut.cmd_number = 1
sut.setcmd_param = "param"
sut.codeword = "mysecret"
assert(sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.set_cmd)
assert(sut.cmd_number == 1)
assert(sut.setcmd_param == "param")
assert(sut.codeword == "mysecret")
pickled_sut = pickle.dumps(sut)
restored_sut = pickle.loads(pickled_sut)
assert(restored_sut.cmd_type == cybld_ipc_message.CyBldIpcMessageType.set_cmd)
assert(restored_sut.cmd_number == 1)
assert(restored_sut.setcmd_param == "param")
assert(restored_sut.codeword == "mysecret")
def test_cybld_cmd_sanity_checks(self):
with pytest.raises(Exception) as ex1:
sut = cybld_ipc_message.CyBldIpcMessage()
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.exec_cmd
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.exec_cmd
assert 'AssertionError' in str(ex1)
with pytest.raises(Exception) as ex2:
sut = cybld_ipc_message.CyBldIpcMessage()
sut.cmd_type = cybld_ipc_message.CyBldIpcMessageType.exec_cmd
sut.setcmd_param = "newcmd"
assert 'AssertionError' in str(ex2)
with pytest.raises(Exception) as ex3:
sut = cybld_ipc_message.CyBldIpcMessage()
sut.codeword = "mysecret"
sut.codeword = "myothersecret"
assert 'AssertionError' in str(ex3)
|
dcvetko/cybld
|
tests/test_cybld_ipc_message.py
|
Python
|
mit
| 3,079
|
import os
import jsonschema
import pytest
import numpy as np
import numpy.testing as npt
from io import StringIO
import logging
import pandas as pd
from pyxrf.xanes_maps.xanes_maps_api import (
_build_xanes_map_api,
_build_xanes_map_param_default,
_build_xanes_map_param_schema,
build_xanes_map,
check_elines_activation_status,
adjust_incident_beam_energies,
subtract_xanes_pre_edge_baseline,
_save_spectrum_as_csv,
)
from pyxrf.core.yaml_param_files import (
_parse_docstring_parameters,
_verify_parsed_docstring,
create_yaml_parameter_file,
read_yaml_parameter_file,
)
def _get_xanes_energy_axis():
r"""
Generates a reasonable range of energy values for testing of XANES processing.
The set of energies contains values in pre- and post-edge regions.
"""
eline = "Fe_K"
eline_activation_energy = 7.1115 # keV, an approximate value that makes test suceed
e_min, e_max, de = 7.05, 7.15, 0.001
incident_energies = np.mgrid[e_min : e_max : ((e_max - e_min) / de + 1) * 1j]
incident_energies = np.round(incident_energies, 5) # Nicer view for debugging
return incident_energies, eline, eline_activation_energy
def test_check_elines_activation_status():
r"""Tests for ``check_elines_activation_status``"""
incident_energies, eline, eline_activation_energy = _get_xanes_energy_axis()
activation_status = [_ >= eline_activation_energy for _ in incident_energies]
# Send incident energies as an array
activation_status_output = check_elines_activation_status(np.asarray(incident_energies), eline)
assert (
activation_status == activation_status_output
), "Activation status of some energy values is determined incorrectly"
# Send incident energies as a list
activation_status_output = check_elines_activation_status(list(incident_energies), eline)
assert (
activation_status == activation_status_output
), "Activation status of some energy values is determined incorrectly"
# Empty list of energy should yield empty list of flags
activation_status_output = check_elines_activation_status([], eline)
assert not activation_status_output, "Empty list of incident energies is processed incorrectly"
def test_adjust_incident_beam_energies():
r"""Tests for ``adjust_incident_beam_energies``"""
incident_energies, eline, eline_activation_energy = _get_xanes_energy_axis()
incident_energies = np.random.permutation(incident_energies)
threshold = np.min(incident_energies[incident_energies >= eline_activation_energy])
ie_adjusted = np.clip(incident_energies, a_min=threshold, a_max=None)
# Send incident energies as an array
ie_adjusted_output = adjust_incident_beam_energies(np.asarray(incident_energies), eline)
np.testing.assert_almost_equal(
ie_adjusted_output, ie_adjusted, err_msg="Incident energies are adjusted incorrectly"
)
# Send incident energies as a list
ie_adjusted_output = adjust_incident_beam_energies(list(incident_energies), eline)
np.testing.assert_almost_equal(
ie_adjusted_output, ie_adjusted, err_msg="Incident energies are adjusted incorrectly"
)
def _get_sim_pre_edge_spectrum(incident_energies, eline_activation_energy, pre_edge_upper_keV, img_dims):
r"""
Generate spectrum for testing baseline removal function
``img_dims`` is the size of the image, e.g. [5, 10] is 5x10 pixel image.
The spectrum points are always placed along axis 0.
"""
n_pts = incident_energies.shape[0]
n_pixels = np.prod(img_dims)
n_pre_edge = np.sum(incident_energies < eline_activation_energy + pre_edge_upper_keV)
spectrum = np.random.rand(n_pts, n_pixels)
spectrum_no_base = spectrum
for n in range(n_pixels):
spectrum[n_pre_edge:n_pts, n] += 2
v_bs = np.median(spectrum[0:n_pre_edge, n])
spectrum_no_base[:, n] = spectrum[:, n] - v_bs
if n_pixels == 1:
spectrum = np.squeeze(spectrum, axis=1)
spectrum_no_base = np.squeeze(spectrum_no_base, axis=1)
else:
spectrum = np.reshape(spectrum, np.insert(img_dims, 0, n_pts))
spectrum_no_base = np.reshape(spectrum_no_base, np.insert(img_dims, 0, n_pts))
return spectrum, spectrum_no_base
def test_subtract_xanes_pre_edge_baseline1():
r"""Tests for ``subtract_xanes_pre_edge_baseline``"""
pre_edge_upper_keV_default = -0.01 # Relative location of the pre-edge upper boundary
pre_edge_upper_keV = pre_edge_upper_keV_default
incident_energies, eline, eline_activation_energy = _get_xanes_energy_axis()
# Tests with a single spectrum with default 'pre_edge_upper_keV'
# 1. Allow negative output values in the results with subtracted baseline
spectrum, spectrum_no_base = _get_sim_pre_edge_spectrum(
incident_energies, eline_activation_energy, pre_edge_upper_keV, [3, 5]
)
spectrum_out = subtract_xanes_pre_edge_baseline(spectrum, incident_energies, eline, non_negative=False)
np.testing.assert_almost_equal(
spectrum_out, spectrum_no_base, err_msg="Baseline subtraction from 1D XANES spectrum failed"
)
# 2. Non-negative value only (default)
spectrum_no_base = np.clip(spectrum_no_base, a_min=0, a_max=None)
spectrum_out = subtract_xanes_pre_edge_baseline(spectrum, incident_energies, eline)
np.testing.assert_almost_equal(
spectrum_out, spectrum_no_base, err_msg="Baseline subtraction from 1D XANES spectrum failed"
)
# Test for the case when no pre-edge points are detected (RuntimeError)
# Add 1 keV to the incident energy, in this case all energy values activate the line
spectrum, spectrum_no_base = _get_sim_pre_edge_spectrum(
incident_energies, eline_activation_energy, pre_edge_upper_keV, [3, 7]
)
with pytest.raises(RuntimeError, match="No pre-edge points were found"):
subtract_xanes_pre_edge_baseline(spectrum, incident_energies + 1, eline)
# fmt: off
@pytest.mark.parametrize("p_generate, p_test", [
({"pre_edge_upper_keV": -0.01, "img_dims": [1]}, {"pre_edge_upper_keV": -0.01}),
({"pre_edge_upper_keV": -0.008, "img_dims": [1]}, {"pre_edge_upper_keV": -0.008}),
({"pre_edge_upper_keV": -0.01, "img_dims": [7]}, {"pre_edge_upper_keV": -0.01}),
({"pre_edge_upper_keV": -0.01, "img_dims": [3, 7]}, {"pre_edge_upper_keV": -0.01}),
({"pre_edge_upper_keV": -0.01, "img_dims": [5, 3, 7]}, {"pre_edge_upper_keV": -0.01}),
({"pre_edge_upper_keV": -0.013, "img_dims": [5, 3, 7]}, {"pre_edge_upper_keV": -0.013}),
])
# fmt: on
def test_subtract_xanes_pre_edge_baseline2(p_generate, p_test):
r"""
Tests for 'subtract_xanes_pre_edge_baseline':
Successful tests for different combination of parameters
"""
incident_energies, eline, eline_activation_energy = _get_xanes_energy_axis()
spectrum, spectrum_no_base = _get_sim_pre_edge_spectrum(
incident_energies, eline_activation_energy, **p_generate
)
spectrum_no_base = np.clip(spectrum_no_base, a_min=0, a_max=None)
spectrum_out = subtract_xanes_pre_edge_baseline(spectrum, incident_energies, eline, **p_test)
np.testing.assert_almost_equal(
spectrum_out, spectrum_no_base, err_msg="Baseline subtraction from 1D XANES spectrum failed"
)
def test_parse_docstring_parameters__build_xanes_map_api():
"""Test that the docstring of ``build_xanes_map_api`` and ``_build_xanes_map_param_default``
are consistent: parse the docstring and match with the dictionary"""
parameters = _parse_docstring_parameters(_build_xanes_map_api.__doc__)
_verify_parsed_docstring(parameters, _build_xanes_map_param_default)
def test_create_yaml_parameter_file__build_xanes_map_api(tmp_path):
# Some directory
yaml_dirs = ["param", "file", "directory"]
yaml_fln = "parameter.yaml"
file_path = os.path.join(tmp_path, *yaml_dirs, yaml_fln)
create_yaml_parameter_file(
file_path=file_path,
function_docstring=_build_xanes_map_api.__doc__,
param_value_dict=_build_xanes_map_param_default,
dir_create=True,
)
param_dict_recovered = read_yaml_parameter_file(file_path=file_path)
# Validate the schema of the recovered data
jsonschema.validate(instance=param_dict_recovered, schema=_build_xanes_map_param_schema)
assert (
_build_xanes_map_param_default == param_dict_recovered
), "Parameter dictionary read from YAML file is different from the original parameter dictionary"
def test_build_xanes_map_1(tmp_path):
"""Basic test: creating new parameter file"""
# Successful test
yaml_dir = "param"
yaml_fln = "parameter.yaml"
yaml_fln2 = "parameter2.yaml"
# Create the directory
os.makedirs(os.path.join(tmp_path, yaml_dir), exist_ok=True)
file_path = os.path.join(tmp_path, yaml_dir, yaml_fln)
file_path2 = os.path.join(tmp_path, yaml_dir, yaml_fln2)
# The function will raise an exception if it fails
build_xanes_map(parameter_file_path=file_path, create_parameter_file=True, allow_exceptions=True)
# Also check if the file is there
assert os.path.isfile(file_path), f"File '{file_path}' was not created"
# Try creating file with specifying 'file_path' as the first argument (not kwarg)
build_xanes_map(file_path2, create_parameter_file=True, allow_exceptions=True)
# Also check if the file is there
assert os.path.isfile(file_path2), f"File '{file_path2}' was not created"
# Try to create file that already exists
with pytest.raises(IOError, match=r"File .* already exists"):
build_xanes_map(parameter_file_path=file_path, create_parameter_file=True, allow_exceptions=True)
# Try to create file that already exists with disabled exceptions
# (the function is expected to exit without raising the exception)
build_xanes_map(parameter_file_path=file_path, create_parameter_file=True)
# Try to create file in non-existing directory
file_path3 = os.path.join(tmp_path, "some_directory", "yaml_fln")
with pytest.raises(IOError, match=r"Directory .* does not exist"):
build_xanes_map(parameter_file_path=file_path3, create_parameter_file=True, allow_exceptions=True)
# Try creating the parameter file without specifying the path
with pytest.raises(RuntimeError, match="parameter file path is not specified"):
build_xanes_map(create_parameter_file=True, allow_exceptions=True)
# Specify scan ID instead of the path as the first argument
with pytest.raises(RuntimeError, match="parameter file path is not specified"):
build_xanes_map(1000, create_parameter_file=True, allow_exceptions=True)
def test_build_xanes_map_2():
"""Try calling the function with invalid (not supported) argument"""
with pytest.raises(RuntimeError, match=r"The function is called with invalid arguments:.*\n.*some_arg1"):
build_xanes_map(some_arg1=65, allow_exceptions=True)
with pytest.raises(
RuntimeError, match=r"The function is called with invalid arguments:.*\n.*some_arg1.*\n.*some_arg2"
):
build_xanes_map(some_arg1=65, some_arg2="abc", allow_exceptions=True)
def test_build_xanes_map_3():
"""Test passing arguments to ``_build_xanes_map_api``"""
# The function should fail, because the emission line is not specified
with pytest.raises(ValueError):
build_xanes_map(allow_exceptions=True)
# The function is supposed to fail, because 'xrf_subdir' is not specified
with pytest.raises(ValueError, match="The parameter 'xrf_subdir' is None or contains an empty string"):
build_xanes_map(emission_line="Fe_K", xrf_subdir="", allow_exceptions=True)
# The function should succeed if exceptions are not allowed
build_xanes_map(emission_line="Fe_K", xrf_subdir="")
def test_build_xanes_map_4(tmp_path):
"""Load parameters from YAML file"""
# Successful test
yaml_dir = "param"
yaml_fln = "parameter.yaml"
# Create the directory
os.makedirs(os.path.join(tmp_path, yaml_dir), exist_ok=True)
file_path = os.path.join(tmp_path, yaml_dir, yaml_fln)
# Create YAML file
build_xanes_map(parameter_file_path=file_path, create_parameter_file=True, allow_exceptions=True)
# Now start the program and load the file, the call should fail, because 'xrf_subdir' is empty str
with pytest.raises(ValueError, match="The parameter 'xrf_subdir' is None or contains an empty string"):
build_xanes_map(emission_line="Fe_K", parameter_file_path=file_path, xrf_subdir="", allow_exceptions=True)
# Repeat the same operation with exceptions disabled. The operation should succeed.
build_xanes_map(emission_line="Fe_K", parameter_file_path=file_path, xrf_subdir="")
# fmt: off
@pytest.mark.parametrize("kwargs", [
{},
{"wd": None, "msg_info": "header line 1"},
{"wd": ".", "msg_info": "line1\nline2"},
{"wd": "test_dir", "msg_info": "line1\n line2"},
{"wd": ("test_dir1", "test_dir2"), "msg_info": "line1\n line2"}])
# fmt: on
def test_save_spectrum_as_csv_1(tmp_path, caplog, kwargs):
"""Save data file, then read it and verify that the data match"""
fln = "output.csv"
os.chdir(tmp_path) # Make 'tmp_path' current directory
fln_full = fln
if ("wd" in kwargs) and (kwargs["wd"] is not None):
if isinstance(kwargs["wd"], tuple):
kwargs["wd"] = os.path.join(*kwargs["wd"])
fln_full = os.path.join(kwargs["wd"], fln)
fln_full = os.path.abspath(fln_full)
n_pts = 50
energy = np.random.rand(n_pts)
spectrum = np.random.rand(n_pts)
caplog.set_level(logging.INFO)
# Save CSV file
_save_spectrum_as_csv(fln=fln, energy=energy, spectrum=spectrum, **kwargs)
assert f"Selected spectrum was saved to file '{fln_full}'" in str(
caplog.text
), "Incorrect reporting of the event of the correctly saved file"
caplog.clear()
# Now read the CSV file as a string
with open(fln_full, "r") as f:
s = f.read()
# Check if the comment lines were written in the file
if "msg_info" in kwargs:
# Find the match for each line in 'msg_info'
for s_msg in kwargs["msg_info"].split("\n"):
assert f"# {s_msg}" in s, "Mismatch between original and loaded comment lines"
# Remove comments (lines that start with #, may contain spaces at the beginning of the string)
s = "\n".join([_ for _ in s.split("\n") if not _.strip().startswith("#")])
dframe = pd.read_csv(StringIO(s))
assert tuple(dframe.columns) == (
"Incident Energy, keV",
"XANES spectrum",
), f"Incorrect column labels: {tuple(dframe.columns)}"
data = dframe.values
energy2, spectrum2 = data[:, 0], data[:, 1]
npt.assert_array_almost_equal(energy, energy2, err_msg="Recovered energy array is different from the original")
npt.assert_array_almost_equal(
spectrum, spectrum2, err_msg="Recovered spectrum array is different from the original"
)
def test_save_spectrum_as_csv_2(tmp_path, caplog):
"""Failing cases"""
fln = "output.csv"
os.chdir(tmp_path) # Make 'tmp_path' current directory
n_pts = 50
energy = np.random.rand(n_pts)
spectrum = np.random.rand(n_pts)
caplog.set_level(logging.INFO)
_save_spectrum_as_csv(fln=fln, energy=None, spectrum=spectrum)
assert "The array 'energy' is None" in str(caplog.text)
caplog.clear()
_save_spectrum_as_csv(fln=fln, spectrum=spectrum)
assert "The array 'energy' is None" in str(caplog.text)
caplog.clear()
_save_spectrum_as_csv(fln=fln, energy=energy, spectrum=None)
assert "The array 'spectrum' is None" in str(caplog.text)
caplog.clear()
_save_spectrum_as_csv(fln=fln, energy=energy)
assert "The array 'spectrum' is None" in str(caplog.text)
caplog.clear()
spectrum = spectrum[:-1]
_save_spectrum_as_csv(fln=fln, energy=energy, spectrum=spectrum)
assert "Arrays 'energy' and 'spectrum' have different size:" in str(caplog.text)
caplog.clear()
|
NSLS-II-HXN/PyXRF
|
pyxrf/xanes_maps/tests/test_xanes_maps_api.py
|
Python
|
bsd-3-clause
| 16,067
|
"""
This file is part of contribution to the OAD Data Science Toolkit.
This is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this software. If not, see <http://www.gnu.org/licenses/>.
File name: EvaluationStats.py
Created: April 11th, 2018
Author: Dr. Rob Lyon
Contact: rob@scienceguyrob.com or robert.lyon@manchester.ac.uk
Web: <http://www.scienceguyrob.com>
Computes and stores the performance statistics of a classifier
given a confusion matrix containing it's true positive, false positive,
true negative and false negative rates.
Designed to run on python 2.4 or later.
"""
from numpy import sqrt
# ******************************
#
# CLASS DEFINITION
#
# ******************************
class ClassifierStats:
"""
Computes the performance statistics of a BINARY classifier only.
"""
# ****************************************************************************************************
#
# Constructor.
#
# ****************************************************************************************************
def __init__(self,confusionMatrix):
"""
Default constructor.
Parameters:
confusionMatrix - a matrix containing the performance of a given BINARY classifier on
a training set. For example given the matrix,
[[15528 731]
[ 249 1390]]
Where,
TrueNegatives = confusionMatrix[0][0] # Negatives correctly receiving negative label.
FalseNegatives = confusionMatrix[0][1] # Positives incorrectly receiving negative label.
FalsePositives = confusionMatrix[1][0] # Negatives incorrectly receiving positive label.
TruePositives = confusionMatrix[1][1] # Positives correctly receiving positive label.
Will not evaluate the performance of multi-class classifiers.
"""
# The accuracy of the classifier.
self.accuracy = 0.0
# The precision of the classifier. Precision is the fraction of retrieved instances that are relevant.
self.precision = 0
# The recall of the classifier. Recall is the fraction of relevant instances that are retrieved.
self.recall = 0.0
# The precision of the map. Specificity relates to the ability of the map to identify negative results.
self.specificity = 0.0
# The negative predictive value (NPV) is a summary statistic
# defined as the proportion of input patterns identified as negative,
# that are correctly identified as such. A high NPV means that when the
# classifier yields a negative result, it is most likely correct in its assessment.
self.negativePredictiveValue = 0.0
# The Matthews correlation coefficient is used in machine learning
# as a measure of the quality of binary (two-class) classifications.
# It takes into account true and false positives and negatives and
# is generally regarded as a balanced measure which can be used even
# if the classes are of very different sizes. The MCC is in essence a
# correlation coefficient between the observed and predicted binary
# classifications; it returns a value between -1 and +1. A coefficient
# of +1 represents a perfect prediction, 0 no better than random prediction
# and -1 indicates total disagreement between prediction and observation.
# The statistic is also known as the phi coefficient.
self.matthewsCorrelation = 0.0
# The F1 score (also F-score or F-measure) is a measure of a classifier's accuracy.
# It considers both the precision p and the recall r of the classifier to compute
# the score: p is the number of correct results divided by the number of all
# returned results and r is the number of correct results divided by the
# number of results that should have been returned.
self.fScore = 0.0
# The g-mean is a measure of useful for data sets with skewed class distributions.
# In other words when most examples belong to one class (say the negative), then this
# metric helps asses performance irrespective of the imbalance. It evaluates the
# inductive bias in terms of the ratio between positive and negative accuracy.
self.gmean = 0.0
# The kappa statistic. Cohen's kappa coefficient is a statistical measure of inter-rater
# agreement or inter-annotator agreement for qualitative (categorical) items. It is
# generally thought to be a more robust measure than simple percent agreement
# calculation since k takes into account the agreement occurring by chance.
self.kappa = 0.0
# The true positives.
self.TP = 0.0
# The true negatives.
self.TN = 0.0
# The false positives.
self.FP = 0.0
# The false negatives.
self.FN = 0.0
# The area under the roc curve.
self.auroc = float('NaN')
# The area under the precision-recall curve.
self.auprc = float('NaN')
self.load(confusionMatrix)
self.calculate()
# ****************************************************************************************************
def load(self,confusionMatrix):
"""
Loads data from the confusion matrix into the correct variables.
Parameters:
confusionMatrix - a matrix containing the performance of a given BINARY classifier on
a training set. For example given the matrix,
[[15528 731]
[ 249 1390]]
Where,
TrueNegatives = confusionMatrix[0][0] # Negatives correctly receiving negative label.
FalseNegatives = confusionMatrix[0][1] # Positives incorrectly receiving negative label.
FalsePositives = confusionMatrix[1][0] # Negatives incorrectly receiving positive label.
TruePositives = confusionMatrix[1][1] # Positives correctly receiving positive label.
Will not evaluate the performance of multi-class classifiers.
Returns: N/A.
"""
self.TN = float(confusionMatrix[0][0]) # Negatives correctly receiving negative label.
self.FN = float(confusionMatrix[0][1]) # Positives incorrectly receiving negative label.
self.FP = float(confusionMatrix[1][0]) # Negatives incorrectly receiving positive label.
self.TP = float(confusionMatrix[1][1]) # Positives correctly receiving positive label.
# ****************************************************************************************************
def calculate(self):
"""
Computes the values of the statistics describing classifier performance.
Parameters: None.
Returns: N/A.
"""
try:
self.accuracy = (self.TP + self.TN) / (self.TP + self.FP + self.FN + self.TN)
except ZeroDivisionError as error:
self.accuracy = float('Nan')
try:
self.precision = (self.TP) / (self.TP + self.FP)
except ZeroDivisionError as error:
self.precision = float('Nan')
try:
self.recall = (self.TP) / (self.TP + self.FN)
except ZeroDivisionError as error:
self.recall = float('Nan')
try:
self.specificity = (self.TN) / (self.FP+self.TN)
except ZeroDivisionError as error:
self.specificity = float('Nan')
try:
self.negativePredictiveValue = (self.TN) / (self.FN + self.TN)
except ZeroDivisionError as error:
self.negativePredictiveValue = float('Nan')
try:
self.matthewsCorrelation = ((self.TP * self.TN) - (self.FP * self.FN)) /\
sqrt((self.TP+self.FP) * (self.TP+self.FN) * (self.TN+self.FP) * (self.TN+self.FN))
except ZeroDivisionError as error:
self.matthewsCorrelation = float('Nan')
try:
self.fScore = 2 * ((self.precision * self.recall) / (self.precision + self.recall))
except ZeroDivisionError as error:
self.fscore = float('Nan')
# Kappa = (totalAccuracy - randomAccuracy) / (1 - randomAccuracy)
#
# where,
#
# totalAccuracy = (TP + TN) / (TP + TN + FP + FN)
#
# and
#
# randomAccuracy = (TN + FP) * (TN + FN) + (FN + TP) * (FP + TP) / (Total*Total).
total = self.TP + self.TN + self.FP + self.FN
totalAcc = (self.TP + self.TN) / (self.TP + self.TN + self.FP + self.FN)
randomAcc = (((self.TN + self.FP) * (self.TN + self.FN)) + ((self.FN + self.TP) * (self.FP + self.TP))) / (total*total)
try:
self.kappa = (totalAcc - randomAcc) / (1 - randomAcc)
except ZeroDivisionError as error:
self.kappa = float('Nan')
try:
self.gmean = sqrt( ( self.TP /( self.TP + self.FN ) ) * ( self.TN / ( self.TN + self.FP ) ) )
except ZeroDivisionError as error:
self.gmean = float('Nan')
# ****************************************************************************************************
def show(self):
"""
Prints classifier performance stats to standard output.
Parameters: None.
Returns: N/A.
"""
output ='{:<14}'.format("TP:") +"\t" + str(int(self.TP)) + "\n" +\
'{:<14}'.format("TN:") +"\t" + str(int(self.TN)) + "\n" +\
'{:<14}'.format("FP:") +"\t" + str(int(self.FP)) + "\n" +\
'{:<14}'.format("FN:") +"\t" + str(int(self.FN)) + "\n" +\
'{:<14}'.format("Accuracy:") +"\t" + str(self.accuracy * 100) + "\n" +\
'{:<14}'.format("Precision:") +"\t" + str(self.precision * 100) + "\n" +\
'{:<14}'.format("Recall:") +"\t" + str(self.recall * 100) + "\n" +\
'{:<14}'.format("Specificity:")+"\t" + str(self.specificity * 100) + "\n" +\
'{:<14}'.format("NPV:") +"\t" + str(self.negativePredictiveValue * 100) + "\t(Negative Predictive Value)\n" +\
'{:<14}'.format("MCC:") +"\t" + str(self.matthewsCorrelation) + "\t(Matthews Correlation Coefficient)\n" +\
'{:<14}'.format("F-Score:") +"\t" + str(self.fScore) +"\n" +\
'{:<14}'.format("Kappa:") +"\t" + str(self.kappa) +"\n" +\
'{:<14}'.format("G-Mean:" ) +"\t" + str(self.gmean) +"\n" +\
'{:<14}'.format("AUROC:" ) +"\t" + str(self.auroc) +"\n" +\
'{:<14}'.format("AUPRC:" ) +"\t" + str(self.auprc) +"\n"
print (output)
# ****************************************************************************************************
# ******************************
# Getters
# ******************************
def getAccuracy(self):
"""
Accuracy of the classifier where accuracy = (TP + TN) / (TP + FP + FN + TN).
Parameters: None.
Returns: accuracy as a float.
"""
return float(self.accuracy)
def getPrecision(self):
"""
Precision of the classifier where precision = (TP) / (TP + FP).
Parameters: None.
Returns: precision as a float.
"""
return float(self.precision)
def getRecall(self):
"""
Recall of the classifier where recall = (TP) / (TP + FN).
Parameters: None.
Returns: recall as a float.
"""
return float(self.recall)
def getSpecificity(self):
"""
Specificity of the classifier where specificity = (TN) / (FP+TN).
Parameters: None.
Returns: specificity as a float.
"""
return float(self.specificity)
def getMatthewsCorrelation(self):
"""
Matthew's Correlation Coefficient of the classifier where,
matthewsCorrelation = ((TP * TN) - (FP * FN)) / sqrt((TP+FP) * (TP+FN) * (TN+FP) * (TN+FN)).
Parameters: None.
Returns: mcc as a float.
"""
return float(self.matthewsCorrelation)
def getfScore(self):
"""
F-Score of the classifier where fScore = 2 * ((precision * recall) / (precision + recall)).
Parameters: None.
Returns: F-score as a float.
"""
return float(self.fScore)
def getNegativePredictiveValue(self):
"""
Negative predictive value of the classifier where negativePredictiveValue = (TN) / (FN + TN).
Parameters: None.
Returns: npv as a float.
"""
return float(self.negativePredictiveValue)
def getKappa(self):
"""
Cohen's Kappa of the classifier where,
kappa = (totalAccuracy - randomAccuracy) / (1 - randomAccuracy)
where,
totalAccuracy = (TP + TN) / (TP + TN + FP + FN)
and
randomAccuracy = (TN + FP) * (TN + FN) + (FN + TP) * (FP + TP) / (Total*Total).
Parameters: None.
Returns: kappa as a float.
"""
return float(self.kappa)
def getGMean(self):
"""
G-mean of the classifier where gmean = sqrt( ( TP /( TP + FN ) ) * ( TN / ( TN + FP ) ) ).
Parameters: None.
Returns: gmean as a float.
"""
return float(self.gmean)
def getAUROC(self):
"""
Area under the roc curve of the classifier.
Parameters: None.
Returns: auroc as a float.
"""
return float(self.auroc)
def setAUROC(self,auroc):
"""
Sets the Area under the roc curve of the classifier.
Parameters:
auroc - the area under the roc curve calculated externally.
Returns: None.
"""
self.auroc = auroc
def getAUPRC(self):
"""
Area under the precision-recall curve of the classifier.
Parameters: None.
Returns: auroc as a float.
"""
return float(self.auprc)
def setAUPRC(self,auprc):
"""
Sets the Area under the precision-recall curve of the classifier.
Parameters:
auprc - the area under the precision-recall curve calculated externally.
Returns: None.
"""
self.auprc = auprc
def getTP(self):
"""
True positives (TP) returned by the classifier.
Parameters: None.
Returns: true positives as an integer.
"""
return int(self.TP)
def getTN(self):
"""
True negatives (TN) returned by the classifier.
Parameters: None.
Returns: true negatives as an integer.
"""
return int(self.TN)
def getFP(self):
"""
False positives (FP) returned by the classifier.
Parameters: None.
Returns: false positives as an integer.
"""
return int(self.FP)
def getFN(self):
"""
False negatives (FN) returned by the classifier.
Parameters: None.
Returns: false negatives as an integer.
"""
return int(self.FN)
# ****************************************************************************************************
|
astro4dev/OAD-Data-Science-Toolkit
|
Teaching Materials/Machine Learning/Supervised Learning/Examples/PPC/EvaluationStats.py
|
Python
|
gpl-3.0
| 16,967
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# coala documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 6 21:18:18 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
from datetime import date
YEAR = date.today().year
# General information about the project.
project = 'coala'
copyright = '{0}, The coala Developers'.format(YEAR)
author = 'The coala Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from coalib.misc.Constants import VERSION
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'coala',
'github_repo': 'coala',
'github_banner': True,
'github_type': 'star',
'logo': '/images/coala_logo.svg',
'logo_name': True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static/']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'searchbox.html',
'navigation.html',
'relations.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coaladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'coala.tex', 'coala Documentation',
'The coala Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coala', 'coala Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'coala', 'coala Documentation',
author, 'coala', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
impmihai/coala
|
docs/conf.py
|
Python
|
agpl-3.0
| 9,703
|
#!/usr/bin/python
# (c) 2017-2022 Huwenbo Shi
import argparse, math, sys, logging, time
# main function
def main():
# get command line input
args = get_command_line()
# if shared sample size is 0 output 0
if args.ns == 0.0:
print 0.0
# parse cross-trait LDSC log
phenocor = None
log_f = open(getattr(args, 'ldsc-log'), 'r')
counter = 0
for line in log_f:
line = line.strip()
if counter > 0: counter += 1
if line.strip() == 'Genetic Covariance':
counter += 1
if counter == 5:
intercept = float(line.split()[1])
phenocor = intercept * math.sqrt(args.n1*args.n2) / args.ns
break
log_f.close()
# print the results
if phenocor != None:
print phenocor
# get command line
def get_command_line():
parser = argparse.ArgumentParser(description='Estimate phenotypic '\
'correlation from cross-trait LDSC intercept')
parser.add_argument('--ldsc-log', dest='ldsc-log', type=str,
help='Cross-trait LDSC log file', required=True)
parser.add_argument('--n1', dest='n1', type=float,
help='Sample size for GWAS 1', required=True)
parser.add_argument('--n2', dest='n2', type=float,
help='Sample size for GWAS 2', required=True)
parser.add_argument('--ns', dest='ns', type=float,
help='Number of shared samples', required=True)
args = parser.parse_args()
return args
if(__name__ == '__main__'):
main()
|
huwenboshi/hess
|
misc/estimate_phenocor.py
|
Python
|
gpl-3.0
| 1,529
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from cylc.cylc_subproc import procopen
from unittest.mock import call
from testfixtures import compare
from testfixtures.popen import PIPE, MockPopen
# Method could be a function
# pylint: disable=no-self-use
class TestSubprocessSafe(unittest.TestCase):
"""Unit tests for the parameter procopen utility function"""
def setUp(self):
self.Popen = MockPopen()
def test_sprocess_communicate_with_process(self):
foo = ' foo'
bar = ' bar'
cmd = ["echo", "this is a command" + foo + bar]
p = procopen(cmd, stdoutpipe=True)
stdout, _ = p.communicate()
compare(stdout, b"this is a command foo bar\n")
def test_sprocess_communicate_with_input(self):
command = "a command"
Popen = MockPopen()
Popen.set_command(command)
# only static input used with simulated mockpopen
# codacy mistakenly sees this as a call to popen
process = Popen(command, stdout=PIPE, stderr=PIPE, shell=True) # nosec
err, out = process.communicate('foo')
compare([
# only static input used with simulated mockpopen
# codacy mistakenly sees this as a call to popen
call.Popen(command, shell=True, stderr=-1, stdout=-1), # nosec
call.Popen_instance.communicate('foo'),
], Popen.mock.method_calls)
return err, out
def test_sprocess_safe_read_from_stdout_and_stderr(self):
command = "a command"
Popen = MockPopen()
# only static input used with simulated mockpopen
# codacy mistakenly sees this as a call to popen
Popen.set_command(command, stdout=b'foo', stderr=b'bar')
process = Popen(command, stdout=PIPE, stderr=PIPE, shell=True) # nosec
compare(process.stdout.read(), b'foo')
compare(process.stderr.read(), b'bar')
compare([
call.Popen(command, shell=True, stderr=PIPE, # nosec
stdout=PIPE),
], Popen.mock.method_calls)
def test_sprocess_safe_write_to_stdin(self):
command = "a command"
Popen = MockPopen()
Popen.set_command(command)
# only static input used with simulated mockpopen
# codacy mistakenly sees this as a call to popen
process = Popen(command, stdin=PIPE, shell=True) # nosec
process.stdin.write(command)
process.stdin.close()
compare([
# static input used with simulated mockpopen
# codacy mistakenly sees this as a call to popen
call.Popen(command, shell=True, stdin=PIPE), # nosec
call.Popen_instance.stdin.write(command),
call.Popen_instance.stdin.close(),
], Popen.mock.method_calls)
def test_sprocess_safe_wait_and_return_code(self):
command = "a command"
Popen = MockPopen()
Popen.set_command(command, returncode=3)
process = Popen(command)
compare(process.returncode, None)
compare(process.wait(), 3)
compare(process.returncode, 3)
compare([
call.Popen(command),
call.Popen_instance.wait(),
], Popen.mock.method_calls)
if __name__ == "__main__":
unittest.main()
|
matthewrmshin/cylc
|
lib/cylc/tests/test_cylc_subproc.py
|
Python
|
gpl-3.0
| 4,119
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Vladimir Chub
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
# Author: Vladimir Chub
"""
TUVAN TOOLS. Yet another python toolkit. ;]
"""
__author__ = 'Vladimir Chub'
__email__ = 'vartagg@users.noreply.github.com'
__version__ = '0.1.0'
from copy import copy
def dict_slice(dict_, keys, default_value=None, strict=False):
"""
Returns a part of dictionary dict_ containing the keys
:param default_value: The value to be assigned to key that cannot be found in the dictionary _dict
:param strict: If True and dict_ cannot accessed to some key from keys, KeyError will be raised. Default: False
>>> a = {'x': 1, 'y': 2, 'z': 3}
>>> dict_slice(a, ['x', 'y'])
{'y': 2, 'x': 1}
>>> b = {'y': 2, 'z': 3}
>>> dict_slice(b, ['x', 'y'])
{'y': 2, 'x': None}
>>> dict_slice(b, ['x', 'y'], 'donut hole')
{'y': 2, 'x': 'donut hole'}
>>> dict_slice(b, ['x', 'y'], strict=True) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError: 'x'
"""
fn = (lambda k: dict_.get(k, default_value)) if not strict else (lambda k: dict_[k])
return {k: fn(k) for k in keys}
def dict_besides(dict_, keys):
"""
Returns a part of dictionary dict_ not containing the keys
>>> a = {'x': 1, 'y': 2, 'z': 3, 'z1': 4}
>>> dict_besides(a, ['x', 'y'])
{'z': 3, 'z1': 4}
>>> dict_besides(dict(), ['x', 'y'])
{}
"""
return {k: dict_.get(k) for k in dict_ if k not in keys}
def are_unique(seq):
"""
Returns True if elements in seq are unique, and False otherwise
>>> are_unique([1, 2, 3])
True
>>> are_unique([1, 2, 3, 2])
False
>>> are_unique([])
True
"""
seen = set()
return not any(elem in seen or seen.add(elem) for elem in seq)
def dicts_union(dict1, dict2, strict=False):
"""
Returns the union of two dictionaries
:param strict: If True and keys of dictionaries are intersected, KeyError will be raised. Default: False
>>> dicts_union({'x': 'temp', 'y': 'foo'}, {'z': 'bar'}) == {'y': 'foo', 'x': 'temp', 'z': 'bar'}
True
>>> dicts_union({'x': 'temp', 'y': 'foo'}, {'x': 'bar'}) == {'y': 'foo', 'x': 'bar'}
True
>>> dicts_union({'x': 'temp', 'y': 'foo'}, {'x': 'not temp'}, True) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError: Some keys of dicts are intersected
"""
items = dict1.items() + dict2.items()
if strict and not are_unique([item[0] for item in items]):
raise KeyError('Some keys of dicts are intersected')
return dict(items)
def raise_function(exc):
"""
Helps raise custom exceptions inside lambdas
>>> foo = lambda x: x != 13 or raise_function(ValueError('Suddenly'))
>>> foo(13) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: Suddenly
"""
raise exc
def each(callback, items):
"""
>>> bar_list = []
>>> foo_list = [i for i in range(9)]
>>> each(lambda x: bar_list.append(x*3), foo_list)
>>> bar_list
[0, 3, 6, 9, 12, 15, 18, 21, 24]
"""
for item in items:
callback(item)
def chunks(l, n):
"""
>>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> [i for i in chunks(a, 5)]
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> [i for i in chunks(a, 4)]
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def weighted_choice(values_list, weights_list):
"""
Returns the weighted choice from list.
Each element of weights_list corresponds to each element of values_list with a same index
>>> vl = ['chicken', 'egg', 'philosopher_stone']
>>> wl = [1000000, 1000, 1]
>>> vd = {i: 0 for i in vl}
>>> for i in range(10000):
... value = weighted_choice(vl, wl)
... vd[value] += 1
>>> vd['chicken'] > vd['egg'] > vd['philosopher_stone']
True
"""
from random import random
assert len(values_list) == len(weights_list), 'Length of values_list must be equal to length of weights_list'
totals = []
running_total = 0
for w in weights_list:
running_total += w
totals.append(running_total)
rnd = random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return values_list[i]
class LazyDict(dict):
"""
A dict class with default value for unfound keys
>>> ldict = LazyDict(0)
>>> ldict
{}
>>> ldict['x'] += 1
>>> ldict
{'x': 1}
>>> ldict['x'] += 1
>>> ldict
{'x': 2}
>>> ldict['y'] += 10
>>> ldict == {'x': 2, 'y': 10}
True
>>> ldict['z'] == 0
True
>>> ldict == {'x': 2, 'y': 10, 'z': 0}
True
>>> ldict = LazyDict([])
>>> ldict
{}
>>> ldict['a'].append(20)
>>> ldict['a'].append(20)
>>> ldict
{'a': [20, 20]}
>>> ldict['b'].append(6)
>>> ldict == {'a': [20, 20], 'b': [6]}
True
"""
def __init__(self, default_value):
super(LazyDict, self).__init__()
self._default_value = default_value
def __getitem__(self, item):
if not item in self:
self.__setitem__(item, copy(self._default_value))
return super(LazyDict, self).__getitem__(item)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
vartagg/tuvan_tools
|
tuvan_tools.py
|
Python
|
bsd-3-clause
| 5,479
|
# repo.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from git.exc import InvalidGitRepositoryError, NoSuchPathError
from git.cmd import Git
from git.util import Actor
from git.refs import *
from git.index import IndexFile
from git.objects import *
from git.config import GitConfigParser
from git.remote import (
Remote,
digest_process_messages,
finalize_process,
add_progress
)
from git.db import (
GitCmdObjectDB,
GitDB
)
from gitdb.util import (
join,
isfile,
hex_to_bin
)
from fun import (
rev_parse,
is_git_dir,
find_git_dir,
touch
)
import os
import sys
import re
DefaultDBType = GitDB
if sys.version_info[1] < 5: # python 2.4 compatiblity
DefaultDBType = GitCmdObjectDB
# END handle python 2.4
__all__ = ('Repo', )
class Repo(object):
"""Represents a git repository and allows you to query references,
gather commit information, generate diffs, create and clone repositories query
the log.
The following attributes are worth using:
'working_dir' is the working directory of the git command, wich is the working tree
directory if available or the .git directory in case of bare repositories
'working_tree_dir' is the working tree directory, but will raise AssertionError
if we are a bare repository.
'git_dir' is the .git repository directoy, which is always set."""
DAEMON_EXPORT_FILE = 'git-daemon-export-ok'
__slots__ = ( "working_dir", "_working_tree_dir", "git_dir", "_bare", "git", "odb" )
# precompiled regex
re_whitespace = re.compile(r'\s+')
re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
re_hexsha_shortened = re.compile('^[0-9A-Fa-f]{4,40}$')
re_author_committer_start = re.compile(r'^(author|committer)')
re_tab_full_line = re.compile(r'^\t(.*)$')
# invariants
# represents the configuration level of a configuration file
config_level = ("system", "global", "repository")
def __init__(self, path=None, odbt = DefaultDBType):
"""Create a new Repo instance
:param path: is the path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:param odbt: Object DataBase type - a type which is constructed by providing
the directory containing the database objects, i.e. .git/objects. It will
be used to access all object data
:raise InvalidGitRepositoryError:
:raise NoSuchPathError:
:return: git.Repo """
epath = os.path.abspath(os.path.expandvars(os.path.expanduser(path or os.getcwd())))
if not os.path.exists(epath):
raise NoSuchPathError(epath)
self.working_dir = None
self._working_tree_dir = None
self.git_dir = None
curpath = epath
# walk up the path to find the .git dir
while curpath:
if is_git_dir(curpath):
self.git_dir = curpath
self._working_tree_dir = os.path.dirname(curpath)
break
gitpath = find_git_dir(join(curpath, '.git'))
if gitpath is not None:
self.git_dir = gitpath
self._working_tree_dir = curpath
break
curpath, dummy = os.path.split(curpath)
if not dummy:
break
# END while curpath
if self.git_dir is None:
raise InvalidGitRepositoryError(epath)
self._bare = False
try:
self._bare = self.config_reader("repository").getboolean('core','bare')
except Exception:
# lets not assume the option exists, although it should
pass
# adjust the wd in case we are actually bare - we didn't know that
# in the first place
if self._bare:
self._working_tree_dir = None
# END working dir handling
self.working_dir = self._working_tree_dir or self.git_dir
self.git = Git(self.working_dir)
# special handling, in special times
args = [join(self.git_dir, 'objects')]
if issubclass(odbt, GitCmdObjectDB):
args.append(self.git)
self.odb = odbt(*args)
def __eq__(self, rhs):
if isinstance(rhs, Repo):
return self.git_dir == rhs.git_dir
return False
def __ne__(self, rhs):
return not self.__eq__(rhs)
def __hash__(self):
return hash(self.git_dir)
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.git_dir)
# Description property
def _get_description(self):
filename = join(self.git_dir, 'description')
return file(filename).read().rstrip()
def _set_description(self, descr):
filename = join(self.git_dir, 'description')
file(filename, 'w').write(descr+'\n')
description = property(_get_description, _set_description,
doc="the project's description")
del _get_description
del _set_description
@property
def working_tree_dir(self):
""":return: The working tree directory of our git repository
:raise AssertionError: If we are a bare repository"""
if self._working_tree_dir is None:
raise AssertionError( "Repository at %r is bare and does not have a working tree directory" % self.git_dir )
return self._working_tree_dir
@property
def bare(self):
""":return: True if the repository is bare"""
return self._bare
@property
def heads(self):
"""A list of ``Head`` objects representing the branch heads in
this repo
:return: ``git.IterableList(Head, ...)``"""
return Head.list_items(self)
@property
def references(self):
"""A list of Reference objects representing tags, heads and remote references.
:return: IterableList(Reference, ...)"""
return Reference.list_items(self)
# alias for references
refs = references
# alias for heads
branches = heads
@property
def index(self):
""":return: IndexFile representing this repository's index."""
return IndexFile(self)
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
return HEAD(self,'HEAD')
@property
def remotes(self):
"""A list of Remote objects allowing to access and manipulate remotes
:return: ``git.IterableList(Remote, ...)``"""
return Remote.list_items(self)
def remote(self, name='origin'):
""":return: Remote with the specified name
:raise ValueError: if no remote with such a name exists"""
return Remote(self, name)
#{ Submodules
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
return Submodule.list_items(self)
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
try:
return self.submodules[name]
except IndexError:
raise ValueError("Didn't find submodule named %r" % name)
# END exception handling
def create_submodule(self, *args, **kwargs):
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
return Submodule.add(self, *args, **kwargs)
def iter_submodules(self, *args, **kwargs):
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
return RootModule(self).traverse(*args, **kwargs)
def submodule_update(self, *args, **kwargs):
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
return RootModule(self).update(*args, **kwargs)
#}END submodules
@property
def tags(self):
"""A list of ``Tag`` objects that are available in this repo
:return: ``git.IterableList(TagReference, ...)`` """
return TagReference.list_items(self)
def tag(self,path):
""":return: TagReference Object, reference pointing to a Commit or Tag
:param path: path to the tag reference, i.e. 0.1.5 or tags/0.1.5 """
return TagReference(self, path)
def create_head(self, path, commit='HEAD', force=False, logmsg=None ):
"""Create a new head within the repository.
For more documentation, please see the Head.create method.
:return: newly created Head Reference"""
return Head.create(self, path, commit, force, logmsg)
def delete_head(self, *heads, **kwargs):
"""Delete the given heads
:param kwargs: Additional keyword arguments to be passed to git-branch"""
return Head.delete(self, *heads, **kwargs)
def create_tag(self, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
For more documentation, please see the TagReference.create method.
:return: TagReference object """
return TagReference.create(self, path, ref, message, force, **kwargs)
def delete_tag(self, *tags):
"""Delete the given tag references"""
return TagReference.delete(self, *tags)
def create_remote(self, name, url, **kwargs):
"""Create a new remote.
For more information, please see the documentation of the Remote.create
methods
:return: Remote reference"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote):
"""Delete the given remote."""
return Remote.remove(self, remote)
def _get_config_path(self, config_level ):
# we do not support an absolute path of the gitconfig on windows ,
# use the global config instead
if sys.platform == "win32" and config_level == "system":
config_level = "global"
if config_level == "system":
return "/etc/gitconfig"
elif config_level == "global":
return os.path.normpath(os.path.expanduser("~/.gitconfig"))
elif config_level == "repository":
return join(self.git_dir, "config")
raise ValueError( "Invalid configuration level: %r" % config_level )
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which exact file you whish to read to prevent reading multiple files for
instance
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
files = None
if config_level is None:
files = [ self._get_config_path(f) for f in self.config_level ]
else:
files = [ self._get_config_path(config_level) ]
return GitConfigParser(files, read_only=True)
def config_writer(self, config_level="repository"):
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration ,and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = sytem wide configuration file
global = user level configuration file
repository = configuration file for this repostory only"""
return GitConfigParser(self._get_config_path(config_level), read_only = False)
def commit(self, rev=None):
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: ``git.Commit``"""
if rev is None:
return self.head.commit
else:
return self.rev_parse(str(rev)+"^0")
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return ( c.tree for c in self.iter_commits(*args, **kwargs) )
def tree(self, rev=None):
"""The Tree object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
if rev is None:
return self.head.commit.tree
else:
return self.rev_parse(str(rev)+"^{tree}")
def iter_commits(self, rev=None, paths='', **kwargs):
"""A list of Commit objects representing the history of a given ref/commit
:parm rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:parm paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:parm kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA..revB" revision specifier
:return ``git.Commit[]``"""
if rev is None:
rev = self.head.commit
return Commit.iter_items(self, rev, paths, **kwargs)
def _get_daemon_export(self):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
return os.path.exists(filename)
def _set_daemon_export(self, value):
filename = join(self.git_dir, self.DAEMON_EXPORT_FILE)
fileexists = os.path.exists(filename)
if value and not fileexists:
touch(filename)
elif not value and fileexists:
os.unlink(filename)
daemon_export = property(_get_daemon_export, _set_daemon_export,
doc="If True, git-daemon may export this repository")
del _get_daemon_export
del _set_daemon_export
def _get_alternates(self):
"""The list of alternates for this repo from which objects can be retrieved
:return: list of strings being pathnames of alternates"""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if os.path.exists(alternates_path):
try:
f = open(alternates_path)
alts = f.read()
finally:
f.close()
return alts.strip().splitlines()
else:
return list()
def _set_alternates(self, alts):
"""Sets the alternates
:parm alts:
is the array of string paths representing the alternates at which
git should look for objects, i.e. /home/user/repo/.git/objects
:raise NoSuchPathError:
:note:
The method does not check for the existance of the paths in alts
as the caller is responsible."""
alternates_path = join(self.git_dir, 'objects', 'info', 'alternates')
if not alts:
if isfile(alternates_path):
os.remove(alternates_path)
else:
try:
f = open(alternates_path, 'w')
f.write("\n".join(alts))
finally:
f.close()
# END file handling
# END alts handling
alternates = property(_get_alternates, _set_alternates, doc="Retrieve a list of alternates paths or set a list paths to be used as alternates")
def is_dirty(self, index=True, working_tree=True, untracked_files=False):
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
if self._bare:
# Bare repositories with no associated working directory are
# always consired to be clean.
return False
# start from the one which is fastest to evaluate
default_args = ('--abbrev=40', '--full-index', '--raw')
if index:
# diff index against HEAD
if isfile(self.index.path) and self.head.is_valid() and \
len(self.git.diff('HEAD', '--cached', *default_args)):
return True
# END index handling
if working_tree:
# diff index against working tree
if len(self.git.diff(*default_args)):
return True
# END working tree handling
if untracked_files:
if len(self.untracked_files):
return True
# END untracked files
return False
@property
def untracked_files(self):
"""
:return:
list(str,...)
Files currently untracked as they have not been staged yet. Paths
are relative to the current working directory of the git command.
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore"""
# make sure we get all files, no only untracked directores
proc = self.git.status(porcelain=True,
untracked_files=True,
as_process=True)
# Untracked files preffix in porcelain mode
prefix = "?? "
untracked_files = list()
for line in proc.stdout:
if not line.startswith(prefix):
continue
filename = line[len(prefix):].rstrip('\n')
# Special characters are escaped
if filename[0] == filename[-1] == '"':
filename = filename[1:-1].decode('string_escape')
untracked_files.append(filename)
return untracked_files
@property
def active_branch(self):
"""The name of the currently active branch.
:return: Head to the active branch"""
return self.head.reference
def blame(self, rev, file):
"""The blame information for the given file at the given revision.
:parm rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [git.Commit, list: [<line>]]
A list of tuples associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
data = self.git.blame(rev, '--', file, p=True)
commits = dict()
blames = list()
info = None
for line in data.splitlines(False):
parts = self.re_whitespace.split(line, 1)
firstpart = parts[0]
if self.re_hexsha_only.search(firstpart):
# handles
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start
# 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates another line of blame with the same data
digits = parts[-1].split(" ")
if len(digits) == 3:
info = {'id': firstpart}
blames.append([None, []])
elif info['id'] != firstpart:
info = {'id': firstpart}
blames.append([commits.get(firstpart), []])
# END blame data initialization
else:
m = self.re_author_committer_start.search(firstpart)
if m:
# handles:
# author Tom Preston-Werner
# author-mail <tom@mojombo.com>
# author-time 1192271832
# author-tz -0700
# committer Tom Preston-Werner
# committer-mail <tom@mojombo.com>
# committer-time 1192271832
# committer-tz -0700 - IGNORED BY US
role = m.group(0)
if firstpart.endswith('-mail'):
info["%s_email" % role] = parts[-1]
elif firstpart.endswith('-time'):
info["%s_date" % role] = int(parts[-1])
elif role == firstpart:
info[role] = parts[-1]
# END distinguish mail,time,name
else:
# handle
# filename lib/grit.rb
# summary add Blob
# <and rest>
if firstpart.startswith('filename'):
info['filename'] = parts[-1]
elif firstpart.startswith('summary'):
info['summary'] = parts[-1]
elif firstpart == '':
if info:
sha = info['id']
c = commits.get(sha)
if c is None:
c = Commit( self, hex_to_bin(sha),
author=Actor._from_string(info['author'] + ' ' + info['author_email']),
authored_date=info['author_date'],
committer=Actor._from_string(info['committer'] + ' ' + info['committer_email']),
committed_date=info['committer_date'],
message=info['summary'])
commits[sha] = c
# END if commit objects needs initial creation
m = self.re_tab_full_line.search(line)
text, = m.groups()
blames[-1][0] = c
blames[-1][1].append( text )
info = {'id': sha}
# END if we collected commit info
# END distinguish filename,summary,rest
# END distinguish author|committer vs filename,summary,rest
# END distinguish hexsha vs other information
return blames
@classmethod
def init(cls, path=None, mkdir=True, **kwargs):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:parm mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:parm kwargs:
keyword arguments serving as additional options to the git-init command
:return: ``git.Repo`` (the newly created repo)"""
if mkdir and path and not os.path.exists(path):
os.makedirs(path, 0755)
# git command automatically chdir into the directory
git = Git(path)
output = git.init(**kwargs)
return Repo(path)
@classmethod
def _clone(cls, git, url, path, odb_default_type, progress, **kwargs):
# special handling for windows for path at which the clone should be
# created.
# tilde '~' will be expanded to the HOME no matter where the ~ occours. Hence
# we at least give a proper error instead of letting git fail
prev_cwd = None
prev_path = None
odbt = kwargs.pop('odbt', odb_default_type)
if os.name == 'nt':
if '~' in path:
raise OSError("Git cannot handle the ~ character in path %r correctly" % path)
# on windows, git will think paths like c: are relative and prepend the
# current working dir ( before it fails ). We temporarily adjust the working
# dir to make this actually work
match = re.match("(\w:[/\\\])(.*)", path)
if match:
prev_cwd = os.getcwd()
prev_path = path
drive, rest_of_path = match.groups()
os.chdir(drive)
path = rest_of_path
kwargs['with_keep_cwd'] = True
# END cwd preparation
# END windows handling
try:
proc = git.clone(url, path, with_extended_output=True, as_process=True, v=True, **add_progress(kwargs, git, progress))
if progress:
digest_process_messages(proc.stderr, progress)
#END handle progress
finalize_process(proc)
finally:
if prev_cwd is not None:
os.chdir(prev_cwd)
path = prev_path
# END reset previous working dir
# END bad windows handling
# our git command could have a different working dir than our actual
# environment, hence we prepend its working dir if required
if not os.path.isabs(path) and git.working_dir:
path = join(git._working_dir, path)
# adjust remotes - there may be operating systems which use backslashes,
# These might be given as initial paths, but when handling the config file
# that contains the remote from which we were clones, git stops liking it
# as it will escape the backslashes. Hence we undo the escaping just to be
# sure
repo = cls(os.path.abspath(path), odbt = odbt)
if repo.remotes:
repo.remotes[0].config_writer.set_value('url', repo.remotes[0].url.replace("\\\\", "\\").replace("\\", "/"))
# END handle remote repo
return repo
def clone(self, path, progress=None, **kwargs):
"""Create a clone from this repository.
:param path:
is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress: See 'git.remote.Remote.push'.
:param kwargs:
odbt = ObjectDatabase Type, allowing to determine the object database
implementation used by the returned Repo instance
All remaining keyword arguments are given to the git-clone command
:return: ``git.Repo`` (the newly cloned repo)"""
return self._clone(self.git, self.git_dir, path, type(self.odb), progress, **kwargs)
@classmethod
def clone_from(cls, url, to_path, progress=None, **kwargs):
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress: See 'git.remote.Remote.push'.
:param kwargs: see the ``clone`` method
:return: Repo instance pointing to the cloned directory"""
return cls._clone(Git(os.getcwd()), url, to_path, GitCmdObjectDB, progress, **kwargs)
def archive(self, ostream, treeish=None, prefix=None, **kwargs):
"""Archive the tree at the given revision.
:parm ostream: file compatible stream object to which the archive will be written
:parm treeish: is the treeish name/id, defaults to active branch
:parm prefix: is the optional prefix to prepend to each filename in the archive
:parm kwargs:
Additional arguments passed to git-archive
NOTE: Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python
:raise GitCommandError: in case something went wrong
:return: self"""
if treeish is None:
treeish = self.head.commit
if prefix and 'prefix' not in kwargs:
kwargs['prefix'] = prefix
kwargs['output_stream'] = ostream
self.git.archive(treeish, **kwargs)
return self
rev_parse = rev_parse
def __repr__(self):
return '<git.Repo "%s">' % self.git_dir
|
dbaxa/GitPython
|
git/repo/base.py
|
Python
|
bsd-3-clause
| 30,180
|
#!/usr/bin/env python
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Functional tests for MPP-16428 - Improve plan caching in PL/pgsql.
Tests that require plperl will first install plperl gppkg if
gppkg is not installed.
"""
import os
import sys
import inspect
import traceback
import tinctest
from tinctest.lib import Gpdiff
from mpp.lib.PSQL import PSQL
from mpp.lib.gpdbSystem import GpdbSystem
from tinctest.lib import local_path, run_shell_command
from mpp.gpdb.tests.package.procedural_language import ProceduralLanguage
from mpp.gpdb.tests.package.procedural_language.plpgsql_caching.functional import Plpg
from mpp.lib.gppkg.gppkg import Gppkg
from mpp.models import MPPTestCase
cmd = 'gpssh --version'
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command (cmd, 'check product version', res)
product_version = res['stdout'].split('gpssh version ')[1].split(' build ')[0]
plpg = Plpg()
class PlPgsqlCachingTest(MPPTestCase):
"""
@summary: Test class for MPP-16428 - Improve plan caching in PL/pgsql. Extends ProceduralLanguageTestCase
"""
def __init__(self, methodName):
self.pl = ProceduralLanguage()
self.gpdb = GpdbSystem()
super(PlPgsqlCachingTest, self).__init__(methodName)
@classmethod
def setUpClass(cls):
sql_files = ['negative_cachedplan_cmdutility_drop.sql', 'recursive_functions_droprecreatetable.sql', 'reexec_functioninwhere_droprecreateao.sql']
ans_files = ['negative_cachedplan_cmdutility_drop.ans', 'recursive_functions_droprecreatetable.ans', 'reexec_functioninwhere_droprecreateao.ans']
if product_version.startswith('4.2'):
plpg.pre_process(sql_files, ans_files)
def setUp(self):
self.do_test_fixture('setup')
def tearDown(self):
self.do_test_fixture('teardown')
def do_test_fixture(self, fixture):
"""
@summary: Runs a setup or teardown routine
@param fixture: Set to either 'setup' or 'teardown'. Used to determine sql file suffix.
"""
testcase1 = inspect.stack()[1][3]
testcase = self.id().split(".")[2]
init_file = local_path('init_file')
init_file_list = []
init_file_list.append(init_file)
if fixture == 'setup':
sqlfile = local_path(testcase + "_setup.sql")
outfile = local_path(testcase + "_setup.out")
ansfile = local_path(testcase + "_setup.ans")
elif fixture == 'teardown':
sqlfile = local_path(testcase + "_teardown.sql")
outfile = local_path(testcase + "_teardown.out")
ansfile = local_path(testcase + "_teardown.ans")
else:
raise Exception("do_test_fixture(): Invalid value for fixture. Acceptable values are 'setup' or 'teardown'")
# check if setup sql file exists
if os.path.isfile(sqlfile):
# if exists, run setup sql, and validate result
PSQL.run_sql_file(sql_file = sqlfile, out_file = outfile)
Gpdiff.are_files_equal(outfile, ansfile, match_sub=init_file_list)
else:
pass
def install_plperl(self):
"""
@summary: Installs plperl gppkg and creates plperl language in database if it doesn't exist
"""
gppkg = Gppkg()
gppkg.gppkg_install(product_version, 'plperl')
if self.pl.language_in_db('plperl') == False:
self.pl.create_language_in_db('plperl')
def check_plperl_env(self):
"""
@summary: Checks if environment is suitable for plperl test since it requires gppkg, which is only supported on RHEL and SuSE
"""
if self.pl.gppkg_os.find('rhel') < 0 and self.pl.gppkg_os.find('suse') < 0:
self.skipTest('TEST SKIPPED: Test requires plperl. gppkg is only supported on RHEL and SuSE. Skipping test')
def create_language_plpython(self):
"""
@summary: Creates plpython language in database if it doesn't exist
"""
if self.pl.language_in_db("plpythonu") == False:
tinctest.logger.info("plperl language doesn't exist in database. Creating language...")
self.pl.create_language_in_db("plpythonu")
def do_test(self, timeout=0, sqlfile=None, host=None, port=None, username=None, password=None, flags='-a', ans_version=False):
"""
@summary: Run a test case
@param timeout: Number of seconds to run sql file before timing out
@param sqlfile: The path to sql file (relative to TEST.py directory)
@param host: The GPDB master host name to use to connect to database
@param port: The GPDB port used to make connections to the database
@param username: The database username to use to connect to the database
@param password: The password for the database user used to connect to database
"""
(gpdb_version, build) = self.gpdb.GetGpdbVersion()
if sqlfile is None:
testcase = inspect.stack()[1][3]
filename = testcase.split('test_')[1]
sql_file = local_path(filename +".sql")
out_file = local_path(filename + ".out")
ans_file = local_path(filename + ".ans")
else:
sql_file = local_path(sqlfile)
out_file = local_path(sqlfile.split('.')[0] + '.out')
ans_file = local_path(sqlfile.split('.')[0] + '.ans')
if ans_version:
(gpdb_version, _) = self.gpdb.GetGpdbVersion()
if gpdb_version.startswith('4.3'):
ans_file = ans_file+'.4.3'
init_file = local_path('init_file')
init_file_list = []
init_file_list.append(init_file)
# run psql on file, and check result
PSQL.run_sql_file(sql_file=sql_file, out_file=out_file, timeout=timeout, host=host, port=port, username=username, password=password,flags=flags)
self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file, match_sub=init_file_list))
def test_reexec_functioninselect_droprecreateheap(self):
"""plpgsql caching: Re-execute function in select list. Function drops and recreates heap table."""
self.do_test()
def test_reexec_functioninwhere_droprecreateao(self):
"""plpgsql caching: Re-execute function in where clause list. Function drops and recreates AO/CO compressed table."""
self.do_test()
def test_cursors_droprecreate_tablereferencedbycursor(self):
"""plpgsql caching: Curors - Re-execute function that drops and recreates a heap table referenced by a cursor"""
self.do_test()
def test_cursors_forloop_droprecreatetable(self):
"""plpgsql caching: Cursors - Execute a function that has a for loop which drops and recreates a table at each iteration"""
self.do_test()
def test_temptables_droprecreate_temptable(self):
"""plpgsql caching: Temp Tables - Execute function that drops and recreates temp table."""
self.do_test()
def test_nestedfunc_1level_droprecreate_function(self):
"""plpgsql caching: Nested Functions - Drop-recreate a nested function in between function calls."""
self.do_test()
def test_nestedfunc_3levels_droprecreate_table(self):
"""plpgsql caching: Nested Functions - 3 levels of function calls, drop-recreate a table reference by one of the functions in between function calls."""
self.do_test()
def test_nestedfunc_3levels_redefinefunc(self):
"""plpgsql caching: Nested Functions - 3 levels of function calls, change contents of nested function in between function calls."""
self.do_test()
def test_reexec_function_splitpartition(self):
"""plpgsql caching: Partitioned Tables - Re-execute function after splitting partitioned table referenced in function."""
self.do_test()
def test_recursive_functions_droprecreatetable(self):
"""plpgsql caching: Recursive Functions - Drop and recreate AO table referenced in recursive plpgsql function"""
self.do_test()
def test_views_redefine_view(self):
"""plpgsql caching: Views - Change definition of view referenced in plpgsql function in between function calls."""
result = PSQL.run_sql_command("show optimizer", dbname="postgres", flags='-q -t')
optimizer = result.strip()
if optimizer == "on":
self.do_test()
else:
self.skipTest("MPP-20715: Function involving view gives wrong results with planner")
def test_masteronly_redefine_view(self):
"""plpgsql caching: Master Only - Change definition of view referenced in plpgsql function in between function calls."""
self.do_test()
def test_negative_cachedplan_segmentquery(self):
"""plpgsql caching: Negative Test - Cache is not cleared. Run function query that is only compiled and executed on segments"""
self.do_test()
def test_negative_cachedplan_cmdutility_drop(self):
"""plpgsql caching: Negative Test - Cache is not cleared. Run function that runs a CMD_UTILITY statement - drop table"""
self.do_test()
def test_negative_cachedplan_cmdutility_create(self):
"""plpgsql caching: Negative Test - Cache is not cleared. Run function that runs a CMD_UTILITY statement - create table"""
self.do_test()
def test_negative_plperl_split_between_prepexec(self):
"""plpgsql caching: Negative Test - Non-plpgsql function: plperl. Split partition in between prepare and execute functions"""
self.check_plperl_env()
self.install_plperl()
self.do_test()
def test_negative_nocaching_plperl_drop_between_exec(self):
"""plpgsql caching: Negative Test - Non-plpgsql function: plperl. No caching test. Re-execute function after drop and recreate table."""
self.check_plperl_env()
self.install_plperl()
self.do_test()
def test_negative_nocaching_plperl_drop_recreate(self):
"""plpgsql caching: Negative Test - Non-plpgsql function: plperl. No caching test. Re-execute function after drop and recreate table in function."""
self.check_plperl_env()
self.install_plperl()
self.do_test()
def test_negative_plpython_drop_between_preexec(self):
"""plpgsql caching: Negative Test - Non-plpgsql function: plpython. Drop and recreate table in between prepare and execute functions"""
self.create_language_plpython()
self.do_test()
def test_negative_nocaching_plpython_drop_between_exec(self):
"""plpgsql caching: Negative Test - Non-plpgsql function: plpython. No caching test. Re-execute function after drop and recreate table."""
self.create_language_plpython()
self.do_test()
def test_negative_unsupported_drop_between_prepexec(self):
"""plpgsql caching: Negative Test - Unsupported: Drop and recreate table in between PREPARE and EXECUTE"""
self.do_test(ans_version=True)
|
edespino/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/package/procedural_language/plpgsql_caching/functional/test_plpgsql.py
|
Python
|
apache-2.0
| 11,769
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import AccountBudgetProposalServiceTransport
from .grpc import AccountBudgetProposalServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AccountBudgetProposalServiceTransport]]
_transport_registry["grpc"] = AccountBudgetProposalServiceGrpcTransport
__all__ = (
"AccountBudgetProposalServiceTransport",
"AccountBudgetProposalServiceGrpcTransport",
)
|
googleads/google-ads-python
|
google/ads/googleads/v10/services/services/account_budget_proposal_service/transports/__init__.py
|
Python
|
apache-2.0
| 1,109
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sivigik.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Klafyvel/Sivigik
|
manage.py
|
Python
|
agpl-3.0
| 805
|
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
DEFAULT_PANEL_SIZE = 10
class BaseScenario(AcceleratorModel):
name = models.CharField(max_length=40)
judging_round = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label,
"JudgingRound"), blank=True, null=True,
on_delete=models.CASCADE)
description = models.TextField(max_length=512, blank=True)
judges = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name="scenarios",
through="ScenarioJudge")
applications = models.ManyToManyField(
swapper.get_model_name(AcceleratorModel.Meta.app_label, 'Application'),
related_name="scenarios",
through="ScenarioApplication")
# Default False and set True when selected. Only one may be True.
is_active = models.BooleanField(default=False)
panel_size = models.IntegerField(blank=True,
default=DEFAULT_PANEL_SIZE,
null=False)
max_panels_per_judge = models.IntegerField(blank=True, null=True)
min_panels_per_judge = models.IntegerField(blank=True,
default=0,
null=False)
sequence_number = models.PositiveIntegerField(
help_text="Indicate the order for this scenario within the round",
blank=True,
null=True)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_scenario'
abstract = True
|
masschallenge/django-accelerator
|
accelerator_abstract/models/base_scenario.py
|
Python
|
mit
| 1,708
|
from django import template
register = template.Library()
@register.filter
def truncate_lines(text):
return '\n'.join(text.split('\n')[:300])
|
nicksergeant/snipt
|
utils/templatetags/truncate_lines.py
|
Python
|
mit
| 149
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class OrderApp(CMSApp):
name = _("View Orders")
urls = ['shop.urls.order']
cache_placeholders = False
apphook_pool.register(OrderApp)
|
rfleschenberg/django-shop
|
shop/cms_app.py
|
Python
|
bsd-3-clause
| 343
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.opus_package import OpusPackage
class package(OpusPackage):
name = 'waterdemand'
required_opus_packages = ['opus_core']
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/waterdemand/opus_package_info.py
|
Python
|
gpl-2.0
| 272
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Word vector storage and similarity look-ups. Common code independent of the way the vectors are trained(Word2Vec, FastText, WordRank, VarEmbed etc)
The word vectors are considered read-only in this class.
Initialize the vectors by training e.g. Word2Vec::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with::
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname)
The vectors can also be instantiated from an existing file on disk in the original Google's word2vec C format as a KeyedVectors instance::
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the vectors. Some of them
are already built-in::
>>> word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> word_vectors.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> word_vectors.similarity('woman', 'man')
0.73723527
Correlation with human opinion on word similarity::
>>> word_vectors.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> word_vectors.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
double, uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, iteritems
from six.moves import xrange
from scipy import stats
try:
from keras.layers import Embedding
KERAS_INSTALLED = True
except ImportError:
KERAS_INSTALLED = False
logger = logging.getLogger(__name__)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class KeyedVectors(utils.SaveLoad):
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
self.syn0 = []
self.syn0norm = None
self.vocab = {}
self.index2word = []
self.vector_size = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(KeyedVectors, self).save(*args, **kwargs)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
`fname` is the file used to save the vectors in
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
`total_vec` is an optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
if total_vec is None:
total_vec = len(self.vocab)
vector_size = self.syn0.shape[1]
if fvocab is not None:
logger.info("storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (total_vec, vector_size, fname))
assert (len(self.vocab), vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.syn0.shape[0], len(result.vocab)
)
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
return result
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
return self.syn0norm[self.vocab[word].index]
else:
return self.syn0[self.vocab[word].index]
else:
raise KeyError("word '%s' not in vocabulary" % word)
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).',
diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info('At least one of the documents had no words that were'
'in the vocabulary. Aborting (returning inf).')
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if not t1 in docset1 or not t2 in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = set([self.vocab[word].index for word in positive+negative
if not isinstance(word, ndarray) and word in self.vocab])
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
If topn is False, similar_by_word returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
If topn is False, similar_by_vector returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_vector([1,2])
[('survey', 0.9942699074745178), ...]
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.word_vec(words)
return vstack([self.word_vec(word) for word in words])
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('Atleast one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)),
matutils.unitvec(array(v2).mean(axis=0)))
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = dict((w.upper(), v) for w, v in reversed(ok_vocab)) if case_insensitive else dict(ok_vocab)
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line.strip()))
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = set([a, b, c]) # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for index in matutils.argsort(sims, reverse=True):
predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.info('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.info('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.info('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000, case_insensitive=True,
dummy4unknown=False):
"""
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = dict((w.upper(), v) for w, v in reversed(ok_vocab)) if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except:
logger.info('skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(self.similarity(a, b)) # Similarity from the model
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug(
'Pearson correlation coefficient against %s: %f with p-value %f',
pairs, pearson[0], pearson[1]
)
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d' % oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def get_embedding_layer(self, train_embeddings=False):
"""
Return a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings
"""
if not KERAS_INSTALLED:
raise ImportError("Please install Keras to use this function")
weights = self.syn0
layer = Embedding(input_dim=weights.shape[0], output_dim=weights.shape[1], weights=[weights]) # No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
return layer
|
ELind77/gensim
|
gensim/models/keyedvectors.py
|
Python
|
lgpl-2.1
| 37,089
|
# -*- coding: utf-8 -*-
from shoop.apps import AppConfig
class PugConfig(AppConfig):
name = "shoop_pugme"
provides = {
"admin_module": [
"shoop_pugme.admin_module:PugAdminModule"
]
}
|
akx/shoop-pugme
|
shoop_pugme/apps.py
|
Python
|
mit
| 227
|
from distutils import spawn
import mock
import pytest
import requests
from pyepm import config as c
config = c.get_default_config()
has_solc = spawn.find_executable("solc")
solc = pytest.mark.skipif(not has_solc, reason="solc compiler not found")
COW_ADDRESS = '0xcd2a3d9f938e13cd947ec05abc7fe734df8dd826'
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False
def mock_json_response(status_code=200, error=None, result=None):
m = mock.MagicMock(spec=requests.Response)
m.status_code = status_code
base_json_response = {u'jsonrpc': u'2.0', u'id': u'c7c427a5-b6e9-4dbf-b218-a6f9d4f09246'}
json_response = dict(base_json_response)
if result:
json_response[u'result'] = result
elif error:
json_response[u'error'] = error
if status_code >= 400:
m.reason = 'Error Reason'
m.json.return_value = json_response
return m
|
etherex/pyepm
|
test/helpers.py
|
Python
|
mit
| 931
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
|
joequant/Fudge-Python
|
fudgemsg/tests/func_tests/__init__.py
|
Python
|
apache-2.0
| 782
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing backups.
"""
import operator
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.containers \
import utils as containers_utils
class CreateBackupForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Backup Name"))
description = forms.CharField(widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
container_name = forms.CharField(
max_length=255,
label=_("Container Name"),
validators=[containers_utils.no_slash_validator],
required=False)
volume_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
volume = api.cinder.volume_get(request, data['volume_id'])
force = False
if volume.status == 'in-use':
force = True
backup = api.cinder.volume_backup_create(request,
data['volume_id'],
data['container_name'],
data['name'],
data['description'],
force=force)
message = _('Creating volume backup "%s"') % data['name']
messages.info(request, message)
return backup
except Exception:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(request,
_('Unable to create volume backup.'),
redirect=redirect)
class RestoreBackupForm(forms.SelfHandlingForm):
volume_id = forms.ThemableChoiceField(label=_('Select Volume'),
required=False)
backup_id = forms.CharField(widget=forms.HiddenInput())
backup_name = forms.CharField(widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(RestoreBackupForm, self).__init__(request, *args, **kwargs)
try:
volumes = api.cinder.volume_list(request)
except Exception:
msg = _('Unable to lookup volume or backup information.')
redirect = reverse('horizon:project:backups:index')
exceptions.handle(request, msg, redirect=redirect)
raise exceptions.Http302(redirect)
volumes.sort(key=operator.attrgetter('name', 'created_at'))
choices = [('', _('Create a New Volume'))]
choices.extend((volume.id, volume.name) for volume in volumes)
self.fields['volume_id'].choices = choices
def handle(self, request, data):
backup_id = data['backup_id']
backup_name = data['backup_name'] or None
volume_id = data['volume_id'] or None
try:
restore = api.cinder.volume_backup_restore(request,
backup_id,
volume_id)
# Needed for cases when a new volume is created.
volume_id = restore.volume_id
message = _('Request for restoring backup %(backup_name)s '
'to volume with id: %(volume_id)s '
'has been submitted.')
messages.info(request, message % {'backup_name': backup_name,
'volume_id': volume_id})
return restore
except Exception:
msg = _('Unable to restore backup.')
redirect = reverse('horizon:project:backups:index')
exceptions.handle(request, msg, redirect=redirect)
|
BiznetGIO/horizon
|
openstack_dashboard/dashboards/project/backups/forms.py
|
Python
|
apache-2.0
| 4,530
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.