repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
guineawheek/Dozer | dozer/cogs/teams.py | Python | gpl-3.0 | 6,850 | 0.00365 | """Commands for making and seeing robotics team associations."""
import collections
import discord
from discord.ext.commands import BadArgument, guild_only
from ._utils import *
from ..asyncdb.orm import orm
from ..asyncdb import psqlt
# alter table team_numbers alter column team_number type text
class Teams(Cog):
"""Commands for making and seeing robotics team associations."""
@classmethod
def validate(cls, team_type, team_number):
"""Validate team input to be correct, and raise BadArgument if it's not."""
if not team_number.isalnum() or not team_number.isascii():
raise BadArgument("Team numbers must be alphanumeric!")
z = team_type.casefold()
if z not in ("fll", "ftc", 'frc', 'vrc', 'vex', 'vexu'):
raise BadArgument("Unrecognized team type " + team_type[:32])
if z in ("fll", "ftc", 'frc'):
if not team_number.isdigit():
raise BadArgument("FIRST team numbers must be numeric!")
if z == 'vexu':
if len(team_number) > 6:
raise BadArgument("Invalid VexU team number specified!")
if z == 'vex':
z = 'vrc'
if z == 'vrc':
if not (len(team_number) <= 2 and team_number[:-1].isdigit() and team_number[1].isalpha()):
raise BadArgument("Invalid Vex team number specified!")
return z, team_number.upper()
@command()
async def setteam(self, ctx, team_type, team_number):
"""Sets an association with your team in the database."""
team_type, team_number = self.validate(team_type, team_number)
dbcheck = await TeamNumbers.select_one(user_id=ctx.author.id, team_number=team_number, team_type=team_type)
if dbcheck is None:
dbtransaction = TeamNumbers(user_id=ctx.author.id, team_number=team_number, team_type=team_type)
await dbtransaction.insert()
await ctx.send("Team number set! Note that unlike FRC Dozer, this will not affect your nickname "
"when joining other servers.")
else:
raise BadArgument("You are already associated with that team!")
setteam.example_usage = """
`{prefix}setteam type team_number` - Creates an association in the database with a specified team
"""
@command()
async def removeteam(self, ctx, team_type, team_number):
"""Removes an association with a team in the database."""
team_type, team_number = self.validate(team_type, team_number)
results = await TeamNumbers.select_one(user_id=ctx.author.id, team_number=team_number, team_type=team_type)
if results is not None:
await results.delete()
await ctx.send("Removed association with {} team {}".format(team_type, team_number))
if results is None:
await ctx.send("Couldn't find any associations with that team!")
removeteam.example_usage = """
`{prefix}removeteam type team_number` - Removes your associations with a specified team
"""
@command()
@guild_only()
async def teamsfor(self, ctx, user: discord.Member = None):
"""Allows you to see the teams for the mentioned user. If no user is mentioned, your teams are displayed."""
if user is None:
user = ctx.author
teams = await TeamNumbers.select(user_id=user.id)
if not teams:
raise BadArgument("Couldn't find any team associations for that user!")
else:
e = discord.Embed(type='rich')
e.title = 'Teams for {}'.format(user.display_name)
e.description = "Teams: \n"
for i in teams:
e.description = "{} {} Team {} \n".format(e.description, i.team_type.upper(), i.team_number)
await ctx.send(embed=e)
teamsfor.example_usage = """
`{prefix}teamsfor member` - Returns all team associations with the mentioned user. Assumes caller if blank.
"""
@group(invoke_without_command=True)
@guild_only()
async def onteam(self, ctx, team_type, team_number):
"""Allows you to see who has associated themselves with a particular team."""
team_type, team_number = self.validate(team_type, team_number)
users = await TeamNumbers.select(team_number=team_number, team_type=team_type)
if not users:
await ctx.send("Nobody on that team found!")
else:
e = discord.Embed(type='rich')
e.title = 'Users on team {}'.format(team_number)
segments = ["Users: \n"]
for i in users:
user = ctx.guild.get_member(i.user_id)
if user is not None:
line = f"{user.display_name} {user.mention}\n"
if len(segments[-1]) + len(line) >= 1024:
segments.append(line)
else:
segments[-1] += line
#e.description = "{}{} {} \n".format(e.description, user.display_name, user.mention)
e.descriptio | n = segments[0]
for i, seg in enumerate(segments[1:], 1):
e.add_field(name=("more " * i).capitalize() + "users", value=seg)
await ctx.send(embed=e)
onteam.example_usage = """
`{prefix}onteam type team_number` - Returns a list of users associated with a given team type and number
"""
@on | team.command()
@guild_only()
async def top(self, ctx):
"""Show the top 10 teams by number of members in this guild."""
# adapted from the FRC Dozer's equivalent.
query = f"""SELECT team_type, team_number, count(*)
FROM {TeamNumbers.table_name()}
WHERE user_id = ANY($1) --first param: list of user IDs
GROUP BY team_type, team_number
ORDER BY count DESC, team_type, team_number
LIMIT 10"""
async with orm.acquire() as conn:
counts = await conn.fetch(query, [member.id for member in ctx.guild.members])
embed = discord.Embed(title=f'Top teams in {ctx.guild.name}', color=discord.Color.blue())
embed.description = '\n'.join(
f'{ent["team_type"].upper()} team {ent["team_number"]} '
f'({ent["count"]} member{"s" if ent["count"] > 1 else ""})' for ent in counts)
await ctx.send(embed=embed)
top.example_usage = """
`{prefix}onteam top` - List the 10 teams with the most members in this guild
"""
class TeamNumbers(orm.Model):
"""DB object for tracking team associations."""
__tablename__ = 'team_numbers'
__primary_key__ = ("user_id", "team_number", "team_type")
user_id: psqlt.bigint
team_number: psqlt.text
team_type: psqlt.text
def setup(bot):
"""Adds this cog to the main bot"""
bot.add_cog(Teams(bot))
|
BrysonMcI/Skitter | API-Gateway/python/wsgi.py | Python | mit | 105 | 0 | """ simple wsgi | script for the gateway """
from app import APP
if __name__ == "__main__":
APP.run() | |
eevee/cocos2d-mirror | cocos/particle.py | Python | bsd-3-clause | 17,917 | 0.01239 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2011 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (I | NCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Particle system engine'''
__docformat__ = 'restructuredtext'
import random
import pyglet
from pyglet.gl import *
import math
import copy
import numpy
import ctypes
from cocosnode import CocosNode
from euclid import Point2
# for dev and diagnostic, None means real automatic, True / False means
# return this value inconditionally
forced_point_sprites = None
def point_sprites_available():
"""returns a bool telling if point sprites are available
For development and diagonostic cocos.particle.forced_point_sprites could
be set to force the desired return value
"""
if forced_point_sprites is not None:
return forced_point_sprites
have_point_sprites = True
try:
glEnable(GL_POINT_SPRITE)
glDisable(GL_POINT_SPRITE)
except:
have_point_sprites = False
return have_point_sprites
class ExceptionNoEmptyParticle(Exception):
"""particle system have no room for another particle"""
pass
rand = lambda: random.random() * 2 - 1
# PointerToNumpy by Gary Herron
# from pyglet's user list
def PointerToNumpy(a, ptype=ctypes.c_float):
a = numpy.ascontiguousarray(a) # Probably a NO-OP, but perhaps not
return a.ctypes.data_as(ctypes.POINTER(ptype)) # Ugly and undocumented!
class Color( object ):
def __init__( self, r,g,b,a ):
self.r = r
self.g = g
self.b = b
self.a = a
def to_array(self):
return self.r, self.g, self.b, self.a
class ParticleSystem( CocosNode ):
"""
Base class for many flawors of cocos particle systems
The most easy way to customize is subclass and redefine some class members;
see particle_systems by example.
If you want to use a custom texture remember it should hold only one image,
so don't use texture = pyglet.resource.image(...) (it would produce an atlas,
ie multiple images in a texture); using texture = pyglet.image.load(...) is fine
"""
# type of particle
POSITION_FREE, POSITION_GROUPED = range(2)
#: is the particle system active ?
active = True
#: duration in seconds of the system. -1 is infinity
duration = 0
#: time elapsed since the start of the system (in seconds)
elapsed = 0
#: Gravity of the particles
gravity = Point2(0.0, 0.0)
#: position is from "superclass" CocosNode
#: Position variance
pos_var = Point2(0.0, 0.0)
#: The angle (direction) of the particles measured in degrees
angle = 0.0
#: Angle variance measured in degrees;
angle_var = 0.0
#: The speed the particles will have.
speed = 0.0
#: The speed variance
speed_var = 0.0
#: Tangential acceleration
tangential_accel = 0.0
#: Tangential acceleration variance
tangential_accel_var = 0.0
#: Radial acceleration
radial_accel = 0.0
#: Radial acceleration variance
radial_accel_var = 0.0
#: Size of the particles
size = 0.0
#: Size variance
size_var = 0.0
#: How many seconds will the particle live
life = 0
#: Life variance
life_var = 0
#: Start color of the particles
start_color = Color(0.0,0.0,0.0,0.0)
#: Start color variance
start_color_var = Color(0.0,0.0,0.0,0.0)
#: End color of the particles
end_color = Color(0.0,0.0,0.0,0.0)
#: End color variance
end_color_var = Color(0.0,0.0,0.0,0.0)
#: Maximum particles
total_particles = 0
#:texture for the particles
pic = pyglet.image.load('fire.png', file=pyglet.resource.file('fire.png'))
texture = pic.get_texture()
#:blend additive
blend_additive = False
#:color modulate
color_modulate = True
# position type
position_type = POSITION_GROUPED
def __init__(self, fallback=None):
"""
fallback can be None, True, False; default is None
False: use point sprites, faster, not always availabel
True: use quads, slower but always available)
None: autodetect, use the faster available
"""
super(ParticleSystem,self).__init__()
# particles
# position x 2
self.particle_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# direction x 2
self.particle_dir = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# rad accel x 1
self.particle_rad = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# tan accel x 1
self.particle_tan = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# gravity x 2
self.particle_grav = numpy.zeros( (self.total_particles, 2), numpy.float32 )
# colors x 4
self.particle_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )
# delta colors x 4
self.particle_delta_color = numpy.zeros( (self.total_particles, 4), numpy.float32 )
# life x 1
self.particle_life = numpy.zeros( (self.total_particles, 1), numpy.float32 )
self.particle_life.fill(-1.0)
# size x 1
self.particle_size = numpy.zeros( (self.total_particles, 1), numpy.float32 )
# start position
self.start_pos = numpy.zeros( (self.total_particles, 2), numpy.float32 )
#: How many particles can be emitted per second
self.emit_counter = 0
#: Count of particles
self.particle_count = 0
#: auto remove when particle finishes
self.auto_remove_on_finish = False
#: rendering mode; True is quads, False is point_sprites, None is auto fallback
if fallback is None:
fallback = not point_sprites_available()
self.fallback = fallback
if fallback:
self._fallback_init()
self.draw = self.draw_fallback
self.schedule( self.step )
def on_enter( self ):
super( ParticleSystem, self).on_enter()
#self.add_particle()
def draw( self ):
glPushMatrix()
self.transform()
# color preserve - at least nvidia 6150SE needs that
glPushAttrib(GL_CURRENT_BIT)
glPointSize( self.size )
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texture.id )
glEnable(GL_POINT_SPRITE)
glTexEnvi( GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE )
glEnableClientState(GL_VERTEX_ARRAY)
vertex_ptr = PointerToNumpy( s |
newvem/pytz | pytz/zoneinfo/Africa/Lubumbashi.py | Python | mit | 385 | 0.01039 | '''tzi | nfo timezone information for Africa/Lubumbashi.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class Lubumbashi(StaticTzInfo):
'''Africa/Lubumbashi timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Lubumbashi'
_utcoffset = timedelta(seconds=7200)
_tzname = 'CAT'
Lubum | bashi = Lubumbashi()
|
robehickman/simple-http-file-sync | shttpfs3/storage/versioned_storage.py | Python | mit | 21,979 | 0.011102 | import json, hashlib, os, os.path, shutil
from collections import defaultdict
from datetime import datetime
from typing import List, Dict, Any, cast
from typing_extensions import TypedDict
import shttpfs3.common as sfs
from shttpfs3.storage.server_db import get_server_db_instance_for_thread
import pprint
#+++++++++++++++++++++++++++++++++
class indexObject(TypedDict):
type: str
#+++++++++++++++++++++++++++++++++
class indexObjectTree(indexObject):
files: List[Dict[str, str]]
dirs: List[Dict[str, str]]
#+++++++++++++++++++++++++++++++++
class indexObjectCommit(indexObject):
parent: str
utc_date_time: int
commit_by: str
commit_message: str
tree_root: Any
changes: Any
#+++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++
class versioned_storage:
def __init__(self, base_path: str):
self.base_path = base_path
sfs.make_dirs_if_dont_exist(sfs.cpjoin(base_path, 'index') + '/')
sfs.make_dirs_if_dont_exist(sfs.cpjoin(base_path, 'files') + '/')
#===============================================================================
def write_index_object(self, object_type: str, contents: Dict[str, Any]) -> str:
new_object: indexObject = {'type' : object_type}
new_object.update(contents) #type: ignore
serialised = json.dumps(new_object)
object_hash = hashlib.sha256(bytes(serialised, encoding='utf8')).hexdigest()
target_base = sfs.cpjoin(self.base_path, 'index',object_hash[:2])
# Does an object with this hash already exist?
if os.path.isfile(sfs.cpjoin(target_base, object_hash[2:])):
return object_hash
# log items which do not exist for garbage collection
sdb = get_server_db_instance_for_thread(self.base_path)
sdb.gc_log_item(object_type, object_hash)
#----
sfs.make_dirs_if_dont_exist(target_base)
# TODO make write and move
sfs.file_put_contents(sfs.cpjoin(target_base, object_hash[2:]), bytes(serialised, encoding='utf8'))
return object_hash
#===============================================================================
def read_index_object(self, object_hash: str, expected_object_type: str) -> indexObject:
# Hashes must only contain hex digits
if not set(object_hash) <= set('0123456789abcdef'):
raise IOError('Invalid object hash')
# ==============
index_object: indexObject = json.loads(sfs.file_get_contents(sfs.cpjoin(self.base_path, 'index', object_hash[:2], object_hash[2:])))
if index_object['type'] != expected_object_type: raise IOError('Type of object does not match expected type')
return index_object
#===============================================================================
def read_tree_index_object(self, object_hash) -> indexObjectTree:
return cast(indexObjectTree, self.read_index_object(object_hash, 'tree'))
#===============================================================================
def read_commit_index_object(self, object_hash) -> indexObjectCommit:
return cast(indexObjectCommit, self.read_index_object(object_hash, 'commit'))
#===============================================================================
def build_dir_tree(self, files):
""" Convert a flat file dict into the tree format used for storage """
def helper(split_files):
this_dir = {'files' : {}, 'dirs' : {}}
dirs = defaultdict(list)
for fle in split_files:
index = fle[0]; fileinfo = fle[1]
if len(index) == 1:
fileinfo['path'] = index[0] # store only the file name instead of the whole path
this_dir['files'][fileinfo['path']] = fileinfo
elif len(index) > 1:
dirs[index[0]].append((index[1:], fileinfo))
for name,info in dirs.items():
this_dir['dirs'][name] = helper(info)
return this_dir
return helper([(name.split('/')[1:], file_info) for name, file_info in files.items()])
#===============================================================================
def flatten_dir_tree(self, tree):
""" Convert a file tree back into a flat dict """
result = {}
def helper(tree, leading_path = ''):
dirs = tree['dirs']; files = tree['files']
for name, file_info in files.items():
file_info['path'] = leading_path + '/' + name
result[file_info['path']] = file_info
for name, contents in dirs.items():
helper(contents, leading_path +'/'+ name)
helper(tree); return result
#===============================================================================
def print_dir_tree(self, tree, indent = ''):
dirs = tree['dirs']; files = tree['files']
for name in list(files.keys()): print(indent + name)
for name, contents in dirs.items():
print(indent + name + '/')
self.print_dir_tree(contents, indent + '---')
#===============================================================================
def read_dir_tree(self, file_hash):
""" Recursively read the directory structure beginning at hash """
json_d = self.read_tree_index_object(file_hash)
node = {'files' : json_d['files'], 'dirs' : {}}
for name, hsh in json_d['dirs'].items(): node['dirs'][name] = self.read_dir_tree(hsh)
return node
#===============================================================================
def write_dir_tree(self, tree):
""" Recur through dir tree data structure and write it as a set of objects """
dirs = tree['dirs']; files = tree['files']
child_dirs = {name : self.write_dir_tree(contents) for name, contents in dirs.items()}
return self.write_index_object('tree', {'files' : files, 'dirs': child_dirs})
#===============================================================================
def have_active_commit(self) -> bool:
""" Checks if there is an active commit owned by the specified user """
sdb = get_server_db_instance_for_thread(self.base_path)
return sdb.have_active_commit()
#===============================================================================
def get_head(self) -> str:
""" Gets the hash associated with the current head commit """
contents = sfs.file_or_default(sfs.cpjoin(self.base_path, 'head'), b'root')
return contents.decode('utf8')
#===============================================================================
# NOTE Everything below here must not be called concurrently, either from
# threads in a single process or from multiple processes
#===============================================================================
def begin(self) | -> None:
if self.have_active_commit(): raise Exception()
# Possable optimisations
# Don't store GC log in DB
# Don't flush GC log
# Don't store active files in DB, don't update active files dynamically, but do it in one go during commit
| active_files = {}
head = self.get_head()
if head != 'root':
commit = self.read_commit_index_object(head)
active_files = self.flatten_dir_tree(self.read_dir_tree(commit['tree_root']))
sdb = get_server_db_instance_for_thread(self.base_path)
sdb.begin_commit(active_files)
#===============================================================================
def fs_put_from_file(self, source_file: str, file_info) -> None:
if not self.have_active_commit(): raise Exception()
file_info['hash'] = file_hash = sfs.hash_file(source_file)
sdb = get_server_db_instance_for_thread(self.base_path)
target_base = sfs.cpjoin(self.base_path, 'files',file_hash[:2])
target = sfs.cpjoin(target_base, file_hash[2:])
if not os.path.isfile(target):
# log items which don't already exist so that we do not have to read the obje |
0x90/libnl | libnl/linux_private/netlink.py | Python | lgpl-2.1 | 10,685 | 0.001029 | """netlink.h.
https://github.com/thom311/libnl/blob/libnl3_2_25/include/linux/netlink.h
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation version 2.1
of the License.
"""
from libnl.misc import (bytearray_ptr, c_int, c_uint, c_uint16, c_uint32, c_ushort, SIZEOF_INT, SIZEOF_U16, SIZEOF_U32,
SIZEOF_UINT, SIZEOF_USHORT, Struct)
NETLINK_ROUTE = 0 # Routing/device hook.
NETLINK_GENERIC = 16
NLM_F_REQUEST = 1 # It is request message.
NLM_F_MULTI = 2 # Multipart message, termina | ted by NLMSG_DONE
NLM_F_ACK = 4 # Reply with ack, with zero or error code
NLM_F_ECHO = 8 # Echo this request
NLM_F_DUMP_INTR = 16 # Dump was inconsistent due to sequence change
# Modifiers to GET request.
NLM_F_ROOT = 0x100 # Specify tree root.
NLM_F_MATCH = 0x200 # Return all matching.
NLM_F_ATOMIC = 0x400 # Atomic GET.
NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
# Modifiers to NEW request.
NLM_F_REPLACE = 0x10 | 0 # Override existing.
NLM_F_EXCL = 0x200 # Do not touch, if it exists.
NLM_F_CREATE = 0x400 # Create, if it does not exist.
NLM_F_APPEND = 0x800 # Add to end of list.
class sockaddr_nl(Struct):
"""Netlink sockaddr class (C struct equivalent).
https://github.com/thom311/libnl/blob/libnl3_2_25/include/linux/netlink.h#L31
Instance variables:
nl_family -- AF_NETLINK (c_uint).
nl_pad -- zero (c_ushort).
nl_pid -- port ID (c_uint32).
nl_groups -- multicast groups mask (c_uint32).
"""
_REPR = '<{0}.{1} nl_family={2[nl_family]} nl_pad={2[nl_pad]} nl_pid={2[nl_pid]} nl_groups={2[nl_groups]}>'
SIGNATURE = (SIZEOF_UINT, SIZEOF_USHORT, SIZEOF_U32, SIZEOF_U32)
SIZEOF = sum(SIGNATURE)
def __init__(self, nl_family=0, nl_pad=0, nl_pid=0, nl_groups=0):
"""Constructor."""
super(sockaddr_nl, self).__init__()
self.nl_family = nl_family
self.nl_pad = nl_pad
self.nl_pid = nl_pid
self.nl_groups = nl_groups
def __iter__(self):
"""Yield pid and groups."""
yield self.nl_pid
yield self.nl_groups
@property
def nl_family(self):
"""AF_NETLINK."""
return c_uint.from_buffer(self.bytearray[self._get_slicers(0)]).value
@nl_family.setter
def nl_family(self, value):
"""Family setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_uint(value or 0))
@property
def nl_pad(self):
"""Zero."""
return c_ushort.from_buffer(self.bytearray[self._get_slicers(1)]).value
@nl_pad.setter
def nl_pad(self, value):
"""Pad setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_ushort(value or 0))
@property
def nl_pid(self):
"""Port ID."""
return c_uint32.from_buffer(self.bytearray[self._get_slicers(2)]).value
@nl_pid.setter
def nl_pid(self, value):
"""Port ID setter."""
self.bytearray[self._get_slicers(2)] = bytearray(c_uint32(value or 0))
@property
def nl_groups(self):
"""Port ID."""
return c_uint32.from_buffer(self.bytearray[self._get_slicers(3)]).value
@nl_groups.setter
def nl_groups(self, value):
"""Group setter."""
self.bytearray[self._get_slicers(3)] = bytearray(c_uint32(value or 0))
class nlmsghdr(Struct):
"""Netlink message header (holds actual payload of Netlink message).
https://github.com/thom311/libnl/blob/libnl3_2_25/include/linux/netlink.h#L38
<------- NLMSG_ALIGN(hlen) ------> <---- NLMSG_ALIGN(len) --->
+----------------------------+- - -+- - - - - - - - - - -+- - -+
| Header | Pad | Payload | Pad |
| struct nlmsghdr | | | |
+----------------------------+- - -+- - - - - - - - - - -+- - -+
<-------------- nlmsghdr->nlmsg_len ------------------->
Instance variables:
nlmsg_len -- length of message including header (c_uint32).
nlmsg_type -- message content (c_uint16).
nlmsg_flags -- additional flags (c_uint16).
nlmsg_seq -- sequence number (c_uint32).
nlmsg_pid -- sending process port ID (c_uint32).
payload -- payload and padding at the end (bytearay).
"""
_REPR = ('<{0}.{1} nlmsg_len={2[nlmsg_len]} nlmsg_type={2[nlmsg_type]} nlmsg_flags={2[nlmsg_flags]} '
'nlmsg_seq={2[nlmsg_seq]} nlmsg_pid={2[nlmsg_pid]} payload={2[payload]}>')
SIGNATURE = (SIZEOF_U32, SIZEOF_U16, SIZEOF_U16, SIZEOF_U32, SIZEOF_U32)
SIZEOF = sum(SIGNATURE)
def __init__(self, ba=None, nlmsg_len=None, nlmsg_type=None, nlmsg_flags=None, nlmsg_seq=None, nlmsg_pid=None):
"""Constructor."""
super(nlmsghdr, self).__init__(ba)
if nlmsg_len is not None:
self.nlmsg_len = nlmsg_len
if nlmsg_type is not None:
self.nlmsg_type = nlmsg_type
if nlmsg_flags is not None:
self.nlmsg_flags = nlmsg_flags
if nlmsg_seq is not None:
self.nlmsg_seq = nlmsg_seq
if nlmsg_pid is not None:
self.nlmsg_pid = nlmsg_pid
@property
def nlmsg_len(self):
"""Length of message including header."""
return c_uint32.from_buffer(self.bytearray[self._get_slicers(0)]).value
@nlmsg_len.setter
def nlmsg_len(self, value):
"""Length setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_uint32(value or 0))
@property
def nlmsg_type(self):
"""Message content."""
return c_uint16.from_buffer(self.bytearray[self._get_slicers(1)]).value
@nlmsg_type.setter
def nlmsg_type(self, value):
"""Message content setter."""
self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0))
@property
def nlmsg_flags(self):
"""Additional flags."""
return c_uint16.from_buffer(self.bytearray[self._get_slicers(2)]).value
@nlmsg_flags.setter
def nlmsg_flags(self, value):
"""Message flags setter."""
self.bytearray[self._get_slicers(2)] = bytearray(c_uint16(value or 0))
@property
def nlmsg_seq(self):
"""Sequence number."""
return c_uint32.from_buffer(self.bytearray[self._get_slicers(3)]).value
@nlmsg_seq.setter
def nlmsg_seq(self, value):
"""Sequence setter."""
self.bytearray[self._get_slicers(3)] = bytearray(c_uint32(value or 0))
@property
def nlmsg_pid(self):
"""Sending process port ID."""
return c_uint32.from_buffer(self.bytearray[self._get_slicers(4)]).value
@nlmsg_pid.setter
def nlmsg_pid(self, value):
"""Port ID setter."""
self.bytearray[self._get_slicers(4)] = bytearray(c_uint32(value or 0))
@property
def payload(self):
"""Payload and padding at the end (bytearray_ptr)."""
return bytearray_ptr(self.bytearray, self._get_slicers(4).stop)
NLMSG_ALIGNTO = c_uint(4).value
NLMSG_ALIGN = lambda len_: (len_ + NLMSG_ALIGNTO - 1) & ~(NLMSG_ALIGNTO - 1)
NLMSG_HDRLEN = NLMSG_ALIGN(nlmsghdr.SIZEOF)
NLMSG_LENGTH = lambda len_: len_ + NLMSG_ALIGN(NLMSG_HDRLEN)
NLMSG_SPACE = lambda len_: NLMSG_ALIGN(NLMSG_LENGTH(len_))
NLMSG_NOOP = 0x1 # Nothing.
NLMSG_ERROR = 0x2 # Error.
NLMSG_DONE = 0x3 # End of a dump.
NLMSG_OVERRUN = 0x4 # Data lost.
NLMSG_MIN_TYPE = 0x10 # < 0x10: reserved control messages.
class nlmsgerr(Struct):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/include/linux/netlink.h#L95.
Instance variables:
error -- c_int.
msg -- nlmsghdr class instance.
"""
_REPR = '<{0}.{1} error={2[error]} msg={2[msg]}>'
SIGNATURE = (SIZEOF_INT, nlmsghdr.SIZEOF)
SIZEOF = sum(SIGNATURE)
@property
def error(self):
"""Error integer value."""
return c_int.from_buffer(self.bytearray[self._get_slicers(0)]).value
@property
def msg(self):
"""Payload and padding at the end (nlmsghdr class instance)."""
return nlmsghdr(self.bytearray[self._get_slicers(1).start:])
NETLINK_ADD_MEMBERSHIP = 1
NETLINK_DROP_MEMBERSHIP = 2
NETLINK_PKTINFO = 3
NETLINK_BROADCAST_ERROR = 4 |
zookeepr/zookeepr | zkpylons/tests/functional/test_event_type.py | Python | gpl-2.0 | 274 | 0.010949 | from | .crud_helper import CrudHelper
class TestEventType(CrudHelper):
def test_permissions(self, app, db_session):
CrudHelper.test_permissions(self, app, db_session, dont_get_pages='view')
def test_view(self): |
pass # No view page, block crud helper
|
MicrosoftGenomics/FaST-LMM | fastlmm/util/standardizer/__init__.py | Python | apache-2.0 | 2,365 | 0.014799 | from .Beta import *
from .Unit import *
#import warnings
#warnings.warn("This __init__.py is deprecated. Pysnptools includes newer version", DeprecationWarning)
def factory(s):
s = s.capitalize()
if s == "Unit" or s=="Unit()":
return Unit()
if s=="Beta":
return Beta()
if s.startswith("Beta("):
standardizer = eval(s)
return standardizer
def standardize_with_lambda(snps, lambdax, blocksize = None):
if blocksize==None:
return lambdax(snps)
idx_start = 0
idx_stop = blocksize
while idx_start<snps.shape[1]:
#print idx_start
lambdax(snps[:,idx_start:idx_stop])
idx_start = idx_stop
idx_stop += blocksize
if idx_stop>snps.shape[1]:
idx_stop = snps.shape[1]
return snps
def standardize_unit_python(snps, returnStats=False):
'''
standardize snps to zero-mean and unit variance
'''
N = snps.shape[0]
S = snps.shape[1]
imissX = np.isnan(sn | ps)
snp_sum = np.nansum(snps,axis=0)
n_obs_sum = (~imissX).sum(0)
snp_mean = (snp_sum*1.0)/n_obs_sum
snps -= snp_mean
snp_std = np.sqrt(np.nansum(snps**2, axis=0)/n_obs_sum)
# avoid div by 0 when standardizing
if snp_std.__contains__(0.0):
logging.warn("A least one snps has only one value, that is, its standard | deviation is zero")
snp_std[snp_std == 0.0] = 1.0
snps /= snp_std
snps[imissX] = 0
if returnStats:
return snps,snp_mean,snp_std
return snps
def standardize_beta_python(snps, betaA, betaB):
'''
standardize snps with Beta prior
'''
N = snps.shape[0]
S = snps.shape[1]
imissX = np.isnan(snps)
snp_sum = np.nansum(snps,axis=0)
n_obs_sum = (~imissX).sum(0)
snp_mean = (snp_sum*1.0)/n_obs_sum
snps -= snp_mean
snp_std = np.sqrt(np.nansum(snps**2, axis=0)/n_obs_sum)
if snp_std.__contains__(0.0):
logging.warn("A least one snps has only one value, that is, its standard deviation is zero")
maf = snp_mean/2.0
maf[maf>0.5]=1.0- maf[maf>0.5]
# avoid div by 0 when standardizing
import scipy.stats as st
maf_beta = st.beta.pdf(maf, betaA, betaB)
snps*=maf_beta
snps[imissX] = 0.0
return snps
|
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/theano/tensor/nnet/conv3d2d.py | Python | mit | 12,138 | 0.000494 | import theano
from theano.gradient import DisconnectedType
from theano.gof import Op, Apply, TopoOptimizer
from theano import tensor
import theano.sandbox.cuda as cuda
from theano.tensor.opt import copy_stack_trace
def get_diagonal_subtensor_view(x, i0, i1):
"""
Helper function for DiagonalSubtensor and IncDiagonalSubtensor.
Notes
-----
It returns a partial view of x, not a partial copy.
"""
# We have to cast i0 and i0 to int because python 2.4 (and maybe later)
# do not support indexing with 0-dim, 'int*' ndarrays.
i0 = int(i0)
i1 = int(i1)
if x.shape[i0] < x.shape[i1]:
raise NotImplementedError('is this allowed?')
idx = [slice(None)] * x.ndim
idx[i0] = slice(x.shape[i1] - 1, None, None)
xview = x.__getitem__(tuple(idx))
strides = list(xview.strides)
strides[i1] -= strides[i0]
xview.strides = strides
return xview
class DiagonalSubtensor(Op):
"""
Return a form a nd diagonal subtensor.
Parameters
----------
x
n-d tensor
i0
Axis index in x
i1
Axis index in x
Notes
-----
Work on the GPU.
Extended summary
----------------
``x`` is some n-dimensional tensor, but this Op only deals with a
matrix-shaped slice, using axes i0 and i1. Without loss of
generality, suppose that ``i0`` picks out our ``row`` dimension,
and i1 the ``column`` dimension.
So the relevant part of ``x`` is some matrix ``u``. Suppose it has 7 rows
and 4 columns::
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
The view returned by this function is also a matrix. It's a thick,
diagonal ``stripe`` across u that discards the lower left triangle
and the upper right triangle:
[ x 0 0 0 ]
[ x x 0 0 ]
[ x x x 0 ]
[ 0 x x x ]
[ 0 0 x x ]
[ 0 0 0 x ]
In this case the return value would be this view of shape 3x4. The
returned view has the same number of dimensions as the input
``x``, and the only difference is that the shape along dimension
``i0`` has been reduced by ``shape[i1] - 1`` because of the
triangles that got chopped out.
The NotImplementedError is meant to catch the case where shape[i0]
is too small for the stripe to reach across the matrix, in which
case it's not clear what this function should do. Maybe always
raise an error. I'd look back to the call site in the Conv3D to
see what's necessary at that point.
"""
__props__ = ("inplace",)
def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.view_map = {0: [0]}
def make_node(self, x, i0, i1):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1], [x.type()])
def perform(self, node, inputs, output_storage):
xview = get_diagonal_subtensor_view(*inputs)
if self.inplace:
output_storage[0][0] = xview
else:
output_storage[0][0] = xview.copy()
def grad(self, inputs, g_outputs):
z = tensor.zeros_like(inputs[0])
gx = inc_diagonal_subtensor(z, inputs[1], inputs[2], g_outputs[0])
return [gx, DisconnectedType()(), DisconnectedType()()]
|
def connection_pattern(self, node):
rval = [[True], [False], [False]]
return rval
diagonal_subtensor = DiagonalSubtensor(False)
class IncDiagonalSubtensor(Op):
"""
The gradient of DiagonalSubtensor.
"""
__props__ = ("inplace",)
def __str__ | (self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return "%s" % self.__class__.__name__
def __init__(self, inplace=False):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def make_node(self, x, i0, i1, amt):
_i0 = tensor.as_tensor_variable(i0)
_i1 = tensor.as_tensor_variable(i1)
return Apply(self, [x, _i0, _i1, amt], [x.type()])
def perform(self, node, inputs, output_storage):
x, i0, i1, amt = inputs
if not self.inplace:
x = x.copy()
xview = get_diagonal_subtensor_view(x, i0, i1)
xview += amt
output_storage[0][0] = x
def grad(self, inputs, g_outputs):
x, i0, i1, amt = inputs
gy = g_outputs[0]
return [gy, DisconnectedType()(), DisconnectedType()(),
diagonal_subtensor(gy, i0, i1)]
def connection_pattern(self, node):
rval = [[True], [False], [False], [True]]
return rval
inc_diagonal_subtensor = IncDiagonalSubtensor(False)
def conv3d(signals, filters,
signals_shape=None, filters_shape=None,
border_mode='valid'):
"""
Convolve spatio-temporal filters with a movie.
It flips the filters.
Parameters
----------
signals
Timeseries of images whose pixels have color channels.
Shape: [Ns, Ts, C, Hs, Ws].
filters
Spatio-temporal filters.
Shape: [Nf, Tf, C, Hf, Wf].
signals_shape
None or a tuple/list with the shape of signals.
filters_shape
None or a tuple/list with the shape of filters.
border_mode
The only one tested is 'valid'.
Notes
-----
Another way to define signals: (batch, time, in channel, row, column)
Another way to define filters: (out channel,time,in channel, row, column)
For the GPU, you can use this implementation or
:func:`conv3d_fft <theano.sandbox.cuda.fftconv.conv3d_fft>`.
See Also
--------
Someone made a script that shows how to swap the axes between
both 3d convolution implementations in Theano. See the last
`attachment <https://groups.google.com/d/msg/theano-users/1S9_bZgHxVw/0cQR9a4riFUJ>`_
"""
if isinstance(border_mode, str):
border_mode = (border_mode, border_mode, border_mode)
if signals_shape is None:
_signals_shape_5d = signals.shape
else:
_signals_shape_5d = signals_shape
if filters_shape is None:
_filters_shape_5d = filters.shape
else:
_filters_shape_5d = filters_shape
_signals_shape_4d = (
_signals_shape_5d[0] * _signals_shape_5d[1],
_signals_shape_5d[2],
_signals_shape_5d[3],
_signals_shape_5d[4],
)
_filters_shape_4d = (
_filters_shape_5d[0] * _filters_shape_5d[1],
_filters_shape_5d[2],
_filters_shape_5d[3],
_filters_shape_5d[4],
)
if border_mode[1] != border_mode[2]:
raise NotImplementedError('height and width bordermodes must match')
conv2d_signal_shape = _signals_shape_4d
conv2d_filter_shape = _filters_shape_4d
if signals_shape is None:
conv2d_signal_shape = None
if filters_shape is None:
conv2d_filter_shape = None
out_4d = tensor.nnet.conv2d(
signals.reshape(_signals_shape_4d),
filters.reshape(_filters_shape_4d),
input_shape=conv2d_signal_shape,
filter_shape=conv2d_filter_shape,
border_mode=border_mode[1]) # ignoring border_mode[2]
# reshape the output to restore its original size
# shape = Ns, Ts, Nf, Tf, W-Wf+1, H-Hf+1
if border_mode[1] == 'valid':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] - _filters_shape_5d[3] + 1,
_signals_shape_5d[4] - _filters_shape_5d[4] + 1,
))
elif border_mode[1] == 'full':
out_tmp = out_4d.reshape((
_signals_shape_5d[0], # Ns
_signals_shape_5d[1], # Ts
_filters_shape_5d[0], # Nf
_filters_shape_5d[1], # Tf
_signals_shape_5d[3] + _filters_shape_5d[3] - 1,
_signals_shape_5d[4] + _filters_shape |
amwelch/a10sdk-python | a10sdk/core/slb/slb_template_tcp.py | Python | apache-2.0 | 3,902 | 0.005894 | from a10sdk.common.A10BaseClass import A10BaseClass
class Tcp(A10BaseClass):
"""Class Description::
L4 TCP switch config.
Class tcp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param initial_window_size: {"description": "Set the initial window size (number)", "format": "number", "type": "number", "maximum": 65535, "minimum": 1, "optional": true}
:param alive_if_activ | e: {"default": 0, "optional": true, "type": "number", "description": "keep connection alive if active traffic", "format": "flag"}
:param qos: {"description": "QOS level (number)", "format": "number", "type": "number", "maximum": 63, "minimum": 1, "optional": true}
:param name: {"description": "Fast TCP Template Name", "format": "string-rlx", "default": "default", "minLength": 1, "optional": false, "maxLength": 63, "type": "string | "}
:param reset_fwd: {"default": 0, "optional": true, "type": "number", "description": "send reset to server if error happens", "format": "flag"}
:param half_open_idle_timeout: {"description": "TCP Half Open Idle Timeout (sec), default off (half open idle timeout in second, default off)", "format": "number", "type": "number", "maximum": 60, "minimum": 1, "optional": true}
:param idle_timeout: {"description": "Idle Timeout value (default 120 seconds) (idle timeout in second, default 120)", "format": "number", "default": 120, "optional": true, "maximum": 2097151, "minimum": 1, "type": "number"}
:param force_delete_timeout: {"description": "The maximum time that a session can stay in the system before being delete (number (second))", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "not": "force-delete-timeout-100ms", "type": "number"}
:param lan_fast_ack: {"default": 0, "optional": true, "type": "number", "description": "Enable fast TCP ack on LAN", "format": "flag"}
:param insert_client_ip: {"default": 0, "optional": true, "type": "number", "description": "Insert client ip into TCP option", "format": "flag"}
:param reset_rev: {"default": 0, "optional": true, "type": "number", "description": "send reset to client if error happens", "format": "flag"}
:param force_delete_timeout_100ms: {"description": "The maximum time that a session can stay in the system before being delete (number in 100ms)", "format": "number", "optional": true, "maximum": 31, "minimum": 1, "not": "force-delete-timeout", "type": "number"}
:param half_close_idle_timeout: {"description": "TCP Half Close Idle Timeout (sec), default off (half close idle timeout in second, default off)", "format": "number", "type": "number", "maximum": 120, "minimum": 60, "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/template/tcp/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "tcp"
self.a10_url="/axapi/v3/slb/template/tcp/{name}"
self.DeviceProxy = ""
self.initial_window_size = ""
self.alive_if_active = ""
self.qos = ""
self.name = ""
self.reset_fwd = ""
self.half_open_idle_timeout = ""
self.idle_timeout = ""
self.force_delete_timeout = ""
self.lan_fast_ack = ""
self.insert_client_ip = ""
self.reset_rev = ""
self.force_delete_timeout_100ms = ""
self.half_close_idle_timeout = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
thiagof/treeio | treeio/identities/south_migrations/0003_related_accessentity.py | Python | mit | 12,940 | 0.00711 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Migrate Contacts to have relate to an AccessEntity instead of User/Group"
for contact in orm['identities.Contact'].objects.all():
if not contact.related_user and contact.related_group:
contact.related_user = contact.related_group.accessentity_ | ptr
contact.save()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.mode | ls.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", ' |
grahesh/Stock-Market-Event-Analysis | Examples/Event Analysis/Half-Yearly End/Half_Year_End_Analysis.py | Python | bsd-3-clause | 4,522 | 0.017249 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 03 10:16:39 2013
@author: Grahesh
"""
import pandas
from qstkutil import DataAccess as da
import numpy as np
import math
import copy
import qstkutil.qsdateutil as du
import datetime as dt
import qstkutil.DataAccess as da
import qstkutil.tsutil as tsu
import qstkstudy.EventProfiler as ep
"""
Accepts a list of symbols along with start and end date
Returns the Event Matrix which is a pandas Datamatrix
Event matrix has the following structure :
|IBM |GOOG|XOM |MSFT| GS | JP |
(d1)|nan |nan | 1 |nan |nan | 1 |
(d2)|nan | 1 |nan |nan |nan |nan |
(d3)| 1 |nan | 1 |nan | 1 |nan |
(d4)|nan | 1 |nan | 1 |nan |nan |
...................................
...................................
Also, d1 = start date
nan = no information about any event.
1 = status bit(positively confirms the event occurence)
"""
# Get the data from the data store
storename = "NSEData" # get data from our daily prices source
# Available field names: open, close, high, low, close, actual_close, volume
closefield = "close"
volumefield = "volume"
window = 10
def getHalfYearEndDates(timestamps):
newTS=[]
tempYear=timestamps[0].year
flag=1
for x in range(0, len(timestamps)-1):
if(timestamps[x].year==tempYear):
if(timestamps[x].month==4 and flag==1):
newTS.append(timestamps[x-1])
flag=0
if(timestamps[x].month==10):
newTS.append(timestamps[x-1])
tempYear=timestamps[x].year+1
flag=1
return newTS
def findEvents(symbols, startday,endday, marketSymbol,verbose=False):
# Reading the Data for the list of Symbols.
timeofday=dt.timedelta(hours=16)
timestamps = du.getNSEdays(startday,endday,timeofday)
endOfHalfYear=getHalfYearEndDates(timestamps)
dataobj = da.DataAccess | ('NSEData')
if verbose:
print __name__ + " reading data"
# Reading the Data
close = dataobj.get_data(timestamps, symbols, closefield)
# Com | pleting the Data - Removing the NaN values from the Matrix
close = (close.fillna(method='ffill')).fillna(method='backfill')
# Calculating Daily Returns for the Market
tsu.returnize0(close.values)
# Calculating the Returns of the Stock Relative to the Market
# So if a Stock went up 5% and the Market rised 3%. The the return relative to market is 2%
mktneutDM = close - close[marketSymbol]
np_eventmat = copy.deepcopy(mktneutDM)
for sym in symbols:
for time in timestamps:
np_eventmat[sym][time]=np.NAN
if verbose:
print __name__ + " finding events"
# Generating the Event Matrix
# Event described is : Analyzing half year events for given stocks.
for symbol in symbols:
for i in endOfHalfYear:
np_eventmat[symbol][i] = 1.0 #overwriting by the bit, marking the event
return np_eventmat
#################################################
################ MAIN CODE ######################
#################################################
symbols = np.loadtxt('NSE500port.csv',dtype='S13',comments='#', skiprows=1)
# You might get a message about some files being missing, don't worry about it.
#symbols =['SPY','BFRE','ATCS','RSERF','GDNEF','LAST','ATTUF','JBFCF','CYVA','SPF','XPO','EHECF','TEMO','AOLS','CSNT','REMI','GLRP','AIFLY','BEE','DJRT','CHSTF','AICAF']
#symbols=['NSE','3MINDIA.NS','AARTIIND.NS','ABAN.NS','ABB.NS','ABGSHIP.NS','ABIRLANUV.NS','ACC.NS','ADANIENT.NS','ADANIPORT.NS','ADANIPOWE.NS','ADVANTA.NS','ALLCARGO.NS','AIAENG.NS','AIL.NS','AZKOINDIA.NS']
startday = dt.datetime(2011,1,1)
endday = dt.datetime(2012,1,1)
eventMatrix = findEvents(symbols,startday,endday,marketSymbol='NSE500',verbose=True)
eventMatrix.to_csv('eventmatrix.csv', sep=',')
eventProfiler = ep.EventProfiler(eventMatrix,startday,endday,lookback_days=20,lookforward_days=20,verbose=True)
eventProfiler.study(filename="HalfYearEventStudy.jpg",plotErrorBars=True,plotMarketNeutral=True,plotEvents=False,marketSymbol='NSE500')
|
davebrent/consyn | consyn/cli/__init__.py | Python | gpl-3.0 | 2,350 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014, David Poulter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import click
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .. | models import Base
from ..settings import get_settings
__all__ = ["configurator", "main"]
settings = get_settings(__name__)
COMMANDS = [
"add",
"ls",
" | rm",
"cluster",
"mosaic",
"show",
"config"
]
class Config(object):
def __init__(self):
self.database = None
self.debug = False
self.verbose = False
self.session = None
configurator = click.make_pass_decorator(Config, ensure=True)
@click.group()
@click.option("--debug", default=False, is_flag=True,
help="Enables debug mode.")
@click.option("--verbose", default=False, is_flag=True,
help="Enables verbose mode.")
@click.option("--database", default=settings.get("database"),
help="Path to database.")
@configurator
def main(config, debug, verbose, database=settings.get("database")):
"""A concatenative synthesis command line tool."""
if "://" not in database:
database = "sqlite:///{}".format(os.path.abspath(database))
engine = create_engine(database)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
config.debug = debug
config.verbose = verbose
config.database = database
config.session = Session()
for cmd in COMMANDS:
try:
module = __import__("consyn.cli." + cmd.encode("ascii", "replace"),
None, None, 1)
main.add_command(getattr(module, "command"))
except ImportError:
continue
|
tatools/demo-python | demo_python_at/commons/printer.py | Python | apache-2.0 | 527 | 0 | from abc import ABC, abstractmethod
from demo_python_at.commons.message import | Message
class Printer(ABC):
"""Base class for all printers."""
@abstractmethod
def print(self, message: Message):
"""Abstract method for printing."""
pass
class StdoutPrinter(Printer):
"""Class that prints a message to console."""
def print(self, message: Message):
"""
Print given message in stdout.
:param message: Message class object
| """
print(message.data())
|
fanwenl/kindle-image | encryption/sha.py | Python | apache-2.0 | 119 | 0.008403 | import hashlib
f = open('file_pa | th', 'rb')
sh = hashlib.sha256()
| sh.update(f.read())
print(sh.hexdigest())
f.close() |
philanthropy-u/edx-platform | lms/djangoapps/grades/api/v1/tests/mixins.py | Python | agpl-3.0 | 4,459 | 0.00157 | """
Mixins classes being used by all test classes within this folder
"""
from datetime import datetime
from pytz import UTC
from lms.djangoapps.courseware.tests.factories import GlobalStaffFactory
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
class GradeViewTestMixin(SharedModuleStoreTestCase):
"""
Mixin class for grades related view tests
The following tests assume that the grading policy is the edX default one:
{
"GRADER": [
{
"drop_count": 2,
"min_count": 12,
"short_label": "HW",
"type": "Homework",
"weight": 0.15
},
{
"drop_count": 2,
"min_count": 12,
"type": "Lab",
"weight": 0.15
},
{
"drop_count": 0,
"min_count": 1,
"short_label": "Midterm",
"type": "Midterm Exam",
"weight": 0.3
},
{
"drop_count": 0,
"min_count": 1,
"short_label": "Final",
"type": "Final Exam",
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}
}
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(GradeViewTestMixin, cls).setUpClass()
cls.course = cls._create_test_course_with_default_grading_policy(
display_name='test course', run="Testing_course"
)
cls.empty_course = cls._create_test_course_with_default_grading_policy(
display_name='empty test course', run="Empty_testing_course"
)
cls.course_key = cls.course.id
def _create_user_enrollments(self, *users):
date = datetime(2013, 1, 22, tzinfo=UTC)
for user in users:
CourseEnrollmentFactory(
course_id=self.course.id,
user=user,
created=date,
)
def setUp(self):
super(GradeViewTestMixin, self).setUp()
self.password = 'test'
self.global_staff = GlobalStaffFactory.create()
self.student = UserFactory(password=self.password, username='student') |
self.other_student = UserFactory(password=self.password, username='other_student')
self._create_user_enrollments(self.st | udent, self.other_student)
@classmethod
def _create_test_course_with_default_grading_policy(cls, display_name, run):
"""
Utility method to create a course with a default grading policy
"""
course = CourseFactory.create(display_name=display_name, run=run)
_ = CourseOverviewFactory.create(id=course.id)
chapter = ItemFactory.create(
category='chapter',
parent_location=course.location,
display_name="Chapter 1",
)
# create a problem for each type and minimum count needed by the grading policy
# A section is not considered if the student answers less than "min_count" problems
for grading_type, min_count in (("Homework", 12), ("Lab", 12), ("Midterm Exam", 1), ("Final Exam", 1)):
for num in xrange(min_count):
section = ItemFactory.create(
category='sequential',
parent_location=chapter.location,
due=datetime(2017, 12, 18, 11, 30, 00),
display_name='Sequential {} {}'.format(grading_type, num),
format=grading_type,
graded=True,
)
vertical = ItemFactory.create(
category='vertical',
parent_location=section.location,
display_name='Vertical {} {}'.format(grading_type, num),
)
ItemFactory.create(
category='problem',
parent_location=vertical.location,
display_name='Problem {} {}'.format(grading_type, num),
)
return course
|
runozo/leaflet-adv-demo | run.py | Python | mit | 89 | 0 | from demo im | port app
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| |
chubbymaggie/simuvex | simuvex/s_pcap.py | Python | bsd-2-clause | 2,173 | 0.005983 | import dpkt
import socket
import logging
l = logging.getLogger("simuvex.s_pcap")
class PCAP(object):
def __init__(self,path, ip_port_tup, init=True):
self.path = path
self.packet_num = 0
self.pos = 0
self.in_streams = []
self.out_streams = []
#self.in_buf = ''
self.ip = ip_port_tup[0]
self.port = ip_port_tup[1]
if init:
self.initialize(self.path)
def initialize(self,path):
#import ipdb;ipdb.set_trace()
f = open(path)
pcap = dpkt.pcap.Reader(f)
for _,buf in pcap:
#data = dpkt.ethernet.Ethernet(buf).ip.data.data
ip = dpkt.ethernet.Ethernet(buf).ip
tcp = ip.data
myip = socket.inet_ntoa(ip.dst)
if myip is self.ip and tcp.dport is self.port and len(tcp.data) is not 0:
self.out_streams.append((len(tcp.data),tcp.data))
elif len(tcp.data) is not 0:
self.in_streams.append((len(tcp.data),tcp.data))
f.close()
def recv(self, length):
#import ipdb;ipdb.set_trace()
temp = 0
#import ipdb;ipdb.set_trace()
#pcap = self.pcap
initial_packet = self.packet_num
plength, pdata = self.in_streams[self.packet_num]
length = min(length, plength)
if self.pos is 0:
if plength > length:
temp = length
else:
self.packet_num += 1
packet_data = pdata[self.pos | :length]
self.pos += temp
else:
if (self.pos + length) >= plength:
rest = plength-self.pos
length = rest
self.pack | et_num += 1
packet_data = pdata[self.pos:plength]
if self.packet_num is not initial_packet:
self.pos = 0
return packet_data, length
def copy(self):
new_pcap = PCAP(self.path, (self.ip, self.port), init=False)
new_pcap.packet_num = self.packet_num
new_pcap.pos = self.pos
new_pcap.in_streams = self.in_streams
new_pcap.out_streams = self.out_streams
return new_pcap
|
Charlotte-Morgan/inasafe | safe/impact_function/postprocessors.py | Python | gpl-3.0 | 13,430 | 0 | # coding=utf-8
"""Postprocessors."""
# noinspection PyUnresolvedReferences
from qgis.core import QgsFeatureRequest
from safe.definitions.minimum_needs import minimum_needs_parameter
from safe.gis.vector.tools import (
create_field_from_definition, SizeCalculator)
from safe.processors import (
field_input_type,
keyword_input_type,
keyword_value_expected,
dynamic_field_input_type,
needs_profile_input_type,
layer_crs_input_value,
constant_input_type,
geometry_property_input_type,
layer_property_input_type,
size_calculator_input_value
)
from safe.utilities.i18n import tr
from safe.utilities.profiling import profile
from functools import reduce
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
def evaluate_formula(formula, variables):
"""Very simple formula evaluator. Beware the security.
:param formula: A simple formula.
:type formula: str
:param variables: A collection of variable (key and value).
:type variables: dict
:returns: The result of the formula execution.
:rtype: float, int
"""
for key, value in list(variables.items()):
if value is None or (hasattr(value, 'isNull') and value.isNull()):
# If one value is null, we return null.
return value
formula = formula.replace(key, str(value))
result = eval(formula)
return result
@profile
def run_single_post_processor(layer, post_processor):
"""Run single post processor.
If the layer has the output field, it will pass the post
processor calculation.
:param layer: The vector layer to use for post processing.
:type layer: QgsVectorLayer
:param post_processor: A post processor definition.
:type post_processor: dict
:returns: Tuple with True if success, else False with an error message.
:rtype: (bool, str)
"""
if not layer.editBuffer():
# Turn on the editing mode.
if not layer.startEditing():
msg = tr('The impact layer could not start the editing mode.')
return False, msg
# Calculate based on formula
# Iterate all possible output and create the correct field.
for output_key, output_value in list(post_processor['output'].items()):
# Get output attribute name
key = output_value['value']['key']
output_field_name = output_value['value']['field_name']
layer.keywords['inasafe_fields'][key] = output_field_name
# If there is already the output field, don't proceed
if layer.fields().lookupField(output_field_name) > -1:
msg = tr(
'The field name %s already exists.'
% output_field_name)
layer.rollBack()
return False, msg
# Add output attribute name to the layer
field = create_field_from_definition(output_value['value'])
result = layer.addAttribute(field)
if not result:
msg = tr(
'Error while creating the field %s.'
% output_field_name)
layer.rollBack()
return False, msg
# Get the index of output attribute
output_field_index = layer.fields().lookupField(output_field_name)
if layer.fields().lookupField(output_field_name) == -1:
msg = tr(
'The field name %s has not been created.'
% output_field_name)
layer.rollBack()
return False, msg
# Get the input field's indexes for input
input_indexes = {}
input_properties = {}
# Default parameters
default_parameters = {}
msg = None
# Iterate over every inputs.
for key, values in list(post_processor['input'].items()):
values = values if isinstance(values, list) else [values]
for value in values:
is_constant_input = (
value['type'] == constant_input_type)
is_field_input = (
value['type'] == field_input_type or
value['type'] == dynamic_field_input_type)
is_geometry_input = (
value['type'] == geometry_property_input_type)
is_keyword_input = (
value['type'] == keyword_input_type)
is_needs_input = (
value['type'] == needs_profile_input_type)
is_layer_property_input = (
value['type'] == layer_property_input_type)
if value['type'] == keyword_value_expected:
break
if is_constant_input:
default_parameters[key] = value['value']
break
elif is_field_input:
if value['type'] == dynamic_field_input_type:
key_template = value['value']['key']
field_param = value['field_param']
field_key = key_template % field_param
else:
field_key = value['value']['key']
inasafe_fields = layer.keywords['inasafe_fields']
name_field = inasafe_fields.get(field_key)
if not name_field:
msg = tr(
'%s has not been found in inasafe fields.'
% value['value']['key'])
continue
index = layer.fields().lookupField(name_field)
if index == -1:
fields = layer.fields().toList()
msg = tr(
'The field name %s has not been found in %s'
% (
name_field,
[f.name() for f in fields]
))
continue
input_indexes[key] = index
break
# For geometry, create new field that contain the value
elif is_geometry_input:
input_properties[key] = geometry_property_input_type['key']
break
# for keyword
elif is_keyword_input:
# See http://stackoverflow.com/questions/14692690/
# access-python-nested-dictionary-items-via-a-list-of-keys
value = reduce(
lambda d, k: d[k], value['value'], layer.keywords)
default_parameters[key] = value
break
# for needs profile
elif is_needs_input:
need_parameter = minimum_needs_parameter(
parameter_name=value['value'])
value = need_parameter.value
default_parameters[key] = value
break
# for layer property
elif is_layer_property_input:
| if value['value'] == layer_crs_input_value:
default_parameters[key] = layer.crs()
if value['value'] == size_c | alculator_input_value:
exposure = layer.keywords.get('exposure')
if not exposure:
keywords = layer.keywords.get('exposure_keywords')
exposure = keywords.get('exposure')
default_parameters[key] = SizeCalculator(
layer.crs(), layer.geometryType(), exposure)
break
else:
# executed when we can't find all the inputs
layer.rollBack()
return False, msg
# Create iterator for feature
request = QgsFeatureRequest().setSubsetOfAttributes(
list(input_indexes.values()))
iterator = layer.getFeatures(request)
inputs = input_indexes.copy()
inputs.update(input_properties)
# Iterat |
backbohne/docx-xslt | docxxslt/package.py | Python | mit | 1,498 | 0.000668 | from zipfile import ZipFile
class Package(object):
"""ZipFile wrapper to append/update/remove files from zip"""
def __init__(self, filename=None):
self.filename = filename
self.content = {}
def read(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'r') as zip:
for filename in zip.namelist():
self.content[filename] = zip.read(filename)
def write(self, filename=None):
filename = filename or self.filename
with ZipFile(filename, 'w') as zip:
for filename, content in self.content.items():
zip.writestr(filename, content)
def get(self, filename):
if self.has(filename):
return self.content[filename]
else:
raise IndexError("%s does not exists" % filename)
def has(self, filename):
return filename in self.content
def update(self, filename, content):
if self.has(filename):
self.content[filename] = content
else:
raise IndexError("%s does not exists" % filename)
def append(self, filename, content):
if self.has(filename):
raise IndexError("%s does already exis | ts" % filename)
else:
self.content[filena | me] = content
def remove(self, filename):
if self.has(filename):
del self.content[filename]
else:
raise IndexError("%s does not exists" % filename)
|
ppizarror/korektor | bin/easyprocess/about.py | Python | gpl-2.0 | 83 | 0 | #!/usr/bin/e | nv python
# -*- coding: utf-8 -*-
"""
About
"""
__version__ = '0.2 | .2'
|
eltoncarr/tubular | tubular/scripts/merge_branch.py | Python | agpl-3.0 | 3,106 | 0.003542 | #! /usr/bin/env python3
"""
Command-line script to merge a branch.
"""
from __future__ import absolute_import
import io
from os import path
import sys
import logging
import yaml
import click
import click_log
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.git_repo import LocalGitAPI # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
u'--org',
help=u'Org from the GitHub repository URL of https://github.com/<org>/<repo>',
default=u'edx'
)
@click.option(
u'--repo',
help=u'Repo name from the GitHub repository URL of https://github.com/<org>/<repo>'
)
@click.option(
u'--source_branch',
help=u'Source branch to be merged into the target branch in the PR.',
requ | ired=True
)
@click.option(
u'--target_branch',
help=u'Target branch into which the source branch will be merged in the PR.',
required=True
)
@click.option(
u'--fast_forward_only',
help=u'Either perform a fast-forward merge -or- fail if not possible.',
default=False,
is_flag | =True
)
@click.option(
u'--output_file',
help=u'File in which to write the script\'s YAML output',
default=u'target/merge_branch_sha.yml'
)
@click.option(
u'--reference-repo',
help=u'Path to a reference repo to use to speed up cloning',
)
@click_log.simple_verbosity_option(default=u'INFO')
@click_log.init()
def merge_branch(org,
repo,
source_branch,
target_branch,
fast_forward_only,
output_file,
reference_repo):
u"""
Merges the source branch into the target branch without creating a pull request for the merge.
Clones the repo in order to perform the proper git commands locally.
Args:
org (str):
repo (str):
source_branch (str):
target_branch (str):
fast_forward_only (bool): If True, the branch merge will be performed as a fast-forward merge.
If the merge cannot be performed as a fast-forward merge, the merge will fail.
"""
github_url = u'git@github.com:{}/{}.git'.format(org, repo)
with LocalGitAPI.clone(github_url, target_branch, reference_repo).cleanup() as local_repo:
merge_sha = local_repo.merge_branch(source_branch, target_branch, fast_forward_only)
local_repo.push_branch(target_branch)
with io.open(output_file, u'w') as stream:
yaml.safe_dump(
{
u'org_name': org,
u'repo_name': repo,
u'source_branch_name': source_branch,
u'target_branch_name': target_branch,
u'fast_forward_only': fast_forward_only,
u'sha': merge_sha
},
stream,
default_flow_style=False,
explicit_start=True
)
if __name__ == u"__main__":
merge_branch() # pylint: disable=no-value-for-parameter
|
numenta/nupic.research | src/nupic/research/frameworks/vernon/interfaces/__init__.py | Python | agpl-3.0 | 1,076 | 0 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this s | oftware code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warr | anty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from .distributed_aggregation import *
from .experiment import *
from .step_based_logging import *
|
robotology-dependencies/urdfdom | urdf_parser_py/src/urdf_parser_py/xml_reflection/core.py | Python | bsd-3-clause | 16,161 | 0.03756 | from urdf_parser_py.xml_reflection.basics import *
import sys
import copy
# @todo Get rid of "import *"
# @todo Make this work with decorators
# Is this reflection or serialization? I think it's serialization...
# Rename?
# Do parent operations after, to allow child to 'override' parameters?
# Need to make sure that duplicate entires do not get into the 'unset*' lists
def reflect(cls, *args, **kwargs):
""" Simple wrapper to add XML reflection to an xml_reflection.Object class """
cls.XML_REFL = Reflection(*args, **kwargs)
# Rename 'write_xml' to 'write_xml' to have paired 'load/dump', and make 'pre_dump' and 'post_load'?
# When dumping to yaml, include tag name?
# How to incorporate line number and all that jazz?
def on_error(message):
""" What to do on an error. This can be changed to raise an exception. """
sys.stderr.write(message)
skip_default = True
#defaultIfMatching = True # Not implemeneted yet
# Registering Types
value_types = {}
value_type_prefix = ''
def start_namespace(namespace):
"""
Basic mechanism to prevent conflicts for string types for URDF and SDF
@note Does not handle nesting!
"""
global value_type_prefix
value_type_prefix = namespace + '.'
def end_namespace():
global value_type_prefix
value_type_prefix = ''
def add_type(key, value):
if isinstance(key, str):
key = value_type_prefix + key
assert key not in value_types
value_types[key] = value
def get_type(cur_type):
""" Can wrap value types if needed """
if value_type_prefix and isinstance(cur_type, str):
# See if it exists in current 'namespace'
curKey = value_type_prefix + cur_type
value_type = value_types.get(curKey)
else:
value_type = None
if value_type is None:
# Try again, in 'global' scope
value_type = value_types.get(cur_type)
if value_type is None:
value_type = make_type(cur_type)
add_type(cur_type, value_type)
return value_type
def make_type(cur_type):
if isinstance(cur_type, ValueType):
return cur_type
elif isinstance(cur_type, str):
if cur_type.startswith('vector'):
extra = cur_type[6:]
if extra:
count = float(extra)
else:
count = None
return VectorType(count)
else:
raise Exception("Invalid value type: {}".format(cur_type))
elif cur_type == list:
return ListType()
elif issubclass(cur_type, Object):
return ObjectType(cur_type)
elif cur_type in [str, float]:
return BasicType(cur_type)
else:
raise Exception("Invalid type: {}".format(cur_type))
class ValueType(object):
""" Primitive value type """
def from_xml(self, node):
return self.from_string(node.text)
def write_xml(self, node, value):
""" If type has 'write_xml', this function should expect to have it's own XML already created
i.e., In Axis.to_sdf(self, node), 'node' would be the 'axis' element.
@todo Add function that makes an XML node completely independently?"""
node.text = self.to_string(value)
def equals(self, a, b):
return a == b
class BasicType(ValueType):
def __init__(self, cur_type):
self.type = cur_type
def to_string(self, value):
return str(value)
def from_string(self, value):
return self.type(value)
class ListType(ValueType):
def to_string(self, values):
return ' '.join(values)
def from_string(self, text):
return text.split()
def equals(self, aValues, bValues):
return len(aValues) == len(bValues) and all(a == b for (a, b) in zip(aValues, bValues))
class VectorType(ListType):
def __init__(self, count = None):
self.count = count
def check(self, values):
if self.count is not None:
assert len(values) == self.count, "Invalid vector length"
def to_string(self, values):
self.check(values)
raw = list(map(str, values))
return ListType.to_string(self, raw)
def from_string(self, text):
raw = ListType.from_string(self, text)
self.check(raw)
return list(map(float, raw))
class RawType(ValueType):
""" Simple, raw XML value. Need to bugfix putting this back into a document """
def from_xml(self, node):
return node
def write_xml(self, node, value):
# @todo rying to insert an element at root level seems to screw up pretty printing
children = xml_children(value)
list(map(node.append, children))
# Copy attributes
for (attrib_key, attrib_value) in value.attrib.iteritems():
node.set(attrib_key, attrib_value)
class SimpleElementType(ValueType):
"""
Extractor that retrieves data from an element, given a
specified attribute, casted to value_type.
"""
def __init__(self, attribute, value_type):
self.attribute = attribute
self.value_type = get_type(value_type)
def from_xml(self, node):
text = node.get(self.attribute)
return self.value_type.from_string(text)
def write_xml(self, node, value):
text = self.value_type.to_string(value)
node.set(self.attribute, text)
class ObjectType(ValueType):
def __init__(self, cur_type):
self.type = cur_type
def from_xml(self, node):
obj = self.type()
obj.read_xml(node)
return obj
def write_xml(self, node, obj):
obj.write_xml(node)
class FactoryType(ValueType):
def __init__(self, name, typeMap):
self.name = name
self.typeMap = typeMap
self.nameMap = {}
for (key, value) in typeMap.items():
# Reverse lookup
self.nameMap[value] = key
def from_xml(self, node):
cur_type = self.typeMap.get(node.tag)
if cur_type is None:
raise Exception("Invalid {} tag: {}".format(self.name, node.tag))
value_type = get_type(cur_type)
return value_type.from_xml(node)
def get_name(self, obj):
cur_type = type(obj)
name = self.nameMap.get(cur_type)
if name is None:
raise Exception("Invalid {} type: {}".format(self.name, cur_type))
return name
def write_xml(self, node, obj):
obj.write_xml(node)
class DuckTypedFactory(ValueType):
def __init__(self, name, typeOrder):
self.name = name
assert len(typeOrder) > 0
self.type_order = typeOrder
def from_xml(self, node):
error_set = []
for value_type in self.type_order:
try:
return value_type.from_xml(node)
except Exception, e:
error_set.append((value_type, e))
# Should have returned, we encountered errors
out = "Could not perform duck-typed parsing."
for (value_type, e) in error_set:
out += "\nValue Type: {}\nException: {}\n".format(value_type, e)
raise Exception(out)
def write_xml(self, node, obj):
obj.write_xml(node)
class Param(object):
""" Mirro | ring Gazebo's SDF api
@param xml_var: | Xml name
@todo If the value_type is an object with a tag defined in it's reflection, allow it to act as the default tag name?
@param var: Python class variable name. By default it's the same as the XML name
"""
def __init__(self, xml_var, value_type, required = True, default = None, var = None):
self.xml_var = xml_var
if var is None:
self.var = xml_var
else:
self.var = var
self.type = None
self.value_type = get_type(value_type)
self.default = default
if required:
assert default is None, "Default does not make sense for a required field"
self.required = required
self.is_aggregate = False
def set_default(self):
if self.required:
raise Exception("Required {} not set in XML: {}".format(self.type, self.xml_var))
elif not skip_default:
setattr(obj, self.var, self.default)
class Attribute(Param):
def __init__(self, xml_var, value_type, required = True, default = None, var = None):
Param.__init__(self, xml_var, value_type, required, default, var)
self.type = 'attribute'
def set_from_string(self, obj, value):
""" Node is the parent node in this case """
# Duplicate attributes cannot occur at this point
setattr(obj, self.var, self.value_type.from_string(value))
def add_to_xml(self, obj, node):
value = getattr(obj, self.var)
# Do not set with default value if value is None
if value is None:
if self.required:
raise Exception("Required attribute not set in object: {}".format(self.var))
elif not skip_default:
value = self.default
# Allow value type to handle None?
if value is not None:
node.set(self.xml_var, self.value_type.to_string(value))
# Add option if this requires a header? Like <joints> <joint/> .... </joints> ??? Not really... This would be a specific list type, not really aggr |
Juniper/tempest | tempest/tests/lib/common/test_cred_client.py | Python | apache-2.0 | 3,309 | 0 | # Copyright 2016 Hewlett Packard Enterprise Development LP
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.lib.common import cred_client
from tempest.tests import base
class TestCredClientV2(base.TestCase):
def setUp(self):
super(TestCredClientV2, self).setUp()
self.identity_client = mock.MagicMock()
self.projects_client = mock.MagicMock()
self.users_client = mock.MagicMock()
self.roles_client = mock.MagicMock()
self.creds_client = cred_client.V2CredsClient(self.identity_client,
self.projects_client,
self.users_client,
self.roles_client)
def test_create_project(self):
self.projects_client.create_tenant.return_value = {
'tenant': 'a_tenant'
}
res = self.creds_client.create_project('fake_name', 'desc')
self.assertEqual('a_tenant', res)
self.projects_client.create_tenant.assert_called_once_with(
name='fake_name', description='desc')
def test_delete_project(self):
self.creds_client.delete_project('fake_id')
self.projects_client.delete_tenant.assert_called_once_with(
'fake_id')
class TestCredClientV3(base.TestCase):
def setUp(self):
super(TestCredClientV3, self).setUp()
self.identity_client = mock.MagicMock()
self.projects_client = mock.MagicMock()
self.users_client = mock.MagicMock()
self.roles_client = mock.MagicMock()
self.domains_client = mock.MagicMock()
self.domains_client.list_domains.return_value = {
'domains': [{'id': 'fake_domain_id'}]
}
self.creds_client = cred_client.V3CredsClient(self.identity_client,
self.projects_client,
self.users_client,
self.roles_client,
self.domains_client,
'fake_domain')
def test_create_project(self):
self.projects_client.create_project.return_value = {
'project': 'a_tenant'
}
res = self.creds_client.create_project('fake_name', 'desc')
self.assertEqual('a_tenant', res)
self.projects_client.create_project.assert_called_once_with(
name='fake_name', description | ='desc', domain_id='fake_domain_id')
def test_delete_project(self):
self.creds_client.delete_project('fake_id')
self.projects_client.delete_project.assert | _called_once_with(
'fake_id')
|
florentchandelier/zipline | zipline/finance/trading.py | Python | apache-2.0 | 9,694 | 0.000309 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import logbook
import pandas as pd
from pandas.tslib import normalize_date
from six import string_types
from sqlalchemy import create_engine
from zipline.assets import AssetDBWriter, AssetFinder
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.loader import load_market_data
from zipline.utils.calendars import get_calendar
from zipline.utils.memoize import remember_last
log = logbook.Logger('Trading')
DEFAULT_CAPITAL_BASE = 1e5
class TradingEnvironment(object):
"""
The financial simulations in zipline depend on information
about the benchmark index and the risk free rates of return.
The benchmark index defines the benchmark returns used in
the calculation of performance metrics such as alpha/beta. Many
components, including risk, performance, transforms, and
batch_transforms, need access to a calendar of trading days and
market hours. The TradingEnvironment maintains two time keeping
facilities:
- a DatetimeIndex of trading days for calendar calculations
- a timezone name, which should be local to the exchange
hosting the benchmark index. All dates are normalized to UTC
for serialization and storage, and the timezone is used to
ensure proper rollover through daylight savings and so on.
User code will not normally need to use TradingEnvironment
directly. If you are extending zipline's core financial
components and need to use the environment, you must import the module and
build a new TradingEnvironment object, then pass that TradingEnvironment as
the 'env' arg to your TradingAlgorithm.
Parameters
----------
load : callable, optional
The function that returns benchmark returns and treasury curves.
The treasury curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
bm_symbol : str, optional
The benchmark symbol
exchange_tz : tz-coercable, optional
The timezone of the exchange.
trading_calendar : TradingCalendar, optional
The trading calendar to work with in this environment.
asset_db_path : str or sa.engine.Engine, optional
The path to the assets db or sqlalchemy Engine object to use to
construct an AssetFinder.
"""
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(
self,
load=None,
bm_symbol='SPY',
exchange_tz="US/Eastern",
trading_calendar=None,
asset_db_path=':memory:',
future_chain_predicates=CHAIN_PREDICATES,
local_benchmark=None,
environ=None,
):
self.bm_symbol = bm_symbol
self.local_benchmark = local_benchmark
self.environ = environ
if not load:
load = partial(load_market_data, local_benchmark=self.local_benchmark, environ=self.environ)
self.trading_calendar = trading_calendar
if not self.trading_calendar:
self.trading_calendar = get_calendar("NYSE")
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
self.exchange_tz = exchange_tz
if isinstance(asset_db_path, string_types):
asset_db_path = 'sqlite:///' + asset_db_path
self.engine = engine = create_engine(asset_db_path)
else:
self.engine = engine = asset_db_path
if engine is not None:
AssetDBWriter(engine).init_db()
self.asset_finder = AssetFinder(
engine,
future_chain_predicates=future_chain_predicates)
else:
self.asset_finder = None
def update_local_bench(self, local_benchmark):
load = partial(load_market_data, loca | l_benchmark=local_benchmark, environ=self.environ)
self.benchmark_returns, self.treasury_curves = load(
self.trading_calendar.day,
self.trading_calendar.schedule.index,
self.bm_symbol,
)
def write_data(self, **kwargs):
"""Write data into the | asset_db.
Parameters
----------
**kwargs
Forwarded to AssetDBWriter.write
"""
AssetDBWriter(self.engine).write(**kwargs)
class SimulationParameters(object):
def __init__(self, start_session, end_session,
trading_calendar,
capital_base=DEFAULT_CAPITAL_BASE,
emission_rate='daily',
data_frequency='daily',
arena='backtest'):
assert type(start_session) == pd.Timestamp
assert type(end_session) == pd.Timestamp
assert trading_calendar is not None, \
"Must pass in trading calendar!"
assert start_session <= end_session, \
"Period start falls after period end."
assert start_session <= trading_calendar.last_trading_session, \
"Period start falls after the last known trading day."
assert end_session >= trading_calendar.first_trading_session, \
"Period end falls before the first known trading day."
# chop off any minutes or hours on the given start and end dates,
# as we only support session labels here (and we represent session
# labels as midnight UTC).
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self._arena = arena
self._trading_calendar = trading_calendar
if not trading_calendar.is_session(self._start_session):
# if the start date is not a valid session in this calendar,
# push it forward to the first valid session
self._start_session = trading_calendar.minute_to_session_label(
self._start_session
)
if not trading_calendar.is_session(self._end_session):
# if the end date is not a valid session in this calendar,
# pull it backward to the last valid session before the given
# end date.
self._end_session = trading_calendar.minute_to_session_label(
self._end_session, direction="previous"
)
self._first_open = trading_calendar.open_and_close_for_session(
self._start_session
)[0]
self._last_close = trading_calendar.open_and_close_for_session(
self._end_session
)[1]
@property
def capital_base(self):
return self._capital_base
@property
def emission_rate(self):
return self._emission_rate
@property
def data_frequency(self):
return self._data_frequency
@data_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
@property
def arena(self):
return self._arena
@arena.setter
def arena(self, val):
self._arena = val
@property
def start_session(self):
return self._start_session
@property
def end_session(self):
return self._end_session
@property
def first_open(self):
return self._first_open
@property
de |
amdowell/python-brasil-2016 | samples/falcon/run.py | Python | mit | 517 | 0 | # encoding: utf-8
import falcon
from tornado import (httpserver, ioloop, wsgi)
from resources.hello import Hello
PORT = 5000
def r | outes():
# V1
routes = falcon.API()
routes.add_route('/api/v1/hello', Hello())
return routes
def execute(routes):
# Servidor WSGI
container = wsgi.WSGIContainer(routes)
http_server = httpserver.HTTPServer(container)
http_server.listen(PORT)
ioloop.IOLoop.instance().start()
if __name__ == '__main__': # pragma: no cover
| execute(routes())
|
PanDAWMS/panda-bigmon-core | core/iDDS/DAGvisualization.py | Python | apache-2.0 | 2,510 | 0.001195 | from core.views import initRequest
from django.shortcuts import render_to_response
from django.utils.cache import patch_response_headers
import urllib.request
from urllib.error import HTTPError, URLError
from core.settings.config import IDDS_HOST
import json
SELECTION_CRITERIA = '/monitor_request_relation'
def query_idds_server(request_id):
response = []
url = f"{IDDS_HOST}{SELECTION_CRITERIA}/{request_id}/null"
try:
response = urllib.request.urlopen(url).read()
except (HTTPError, URLError) as e:
print('Error: {}'.format(e.reason))
stats = json.loads(response)
return stats
def fill_nodes_edges(current_node):
nodes, edges = [], []
nodes.append(current_node['work']['workload_id'])
last_edge = {'start': None, 'finish': current_node['work']['workload_id']}
if 'next_works' in current_node:
for work in current_node.get('next_works'):
nodes_sub, edges_sub, last_edge_sub = fill_nodes_edges(work)
last_edge_sub['start'] = current_node['work']['workload_id']
nodes.extend(nodes_sub)
edges.extend(edges_sub)
edges.append(last_edge_sub)
return nodes, edges, last_edge
def daggraph(request):
initRequest(request)
requestid = int(request.session['requestParams']['requestid'])
stats = query_idds_server(requestid)
nodes_dag_vis = []
edges_dag_vis = []
if len(stats) > 0:
relation_map = stats[0]['relation_map']
if len(relation_map) > 0:
relation_map = relation_map[0]
nodes, edges, last_edge = fill_nodes_edges(relation_map)
for node in nodes:
nodes_dag_vis.append(
| {'group': 'nodes',
'data': {'id': str(node),
}
}
| )
for edge in edges:
edges_dag_vis.append({
'group': 'edges',
'data': {
'id': str(edge['start']) + '_to_' + str(edge['finish']),
'target': str(edge['finish']),
'source': str(edge['start'])
}
})
DAG = []
DAG.extend(nodes_dag_vis)
DAG.extend(edges_dag_vis)
data = {
'DAG': DAG
}
response = render_to_response('DAGgraph.html', data, content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
|
hailongqiu/new-deepin-media-player | src/widget/notebook.py | Python | gpl-3.0 | 8,517 | 0.004043 | #! /usr/bin/ python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Deepin, Inc.
# 2013 Hailong Qiu
#
# Author: Hailong Qiu <356752238@qq.com>
# Maintainer: Hailong Qiu <356752238@qq.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
from gtk import gdk
import gobject
from draw import draw_text
from color import alpha_color_hex_to_cairo
class NoteBook(gtk.Container):
def __init__(self):
gtk.Container.__init__(self)
self.__init_values()
def __init_values(self):
self.add_events(gtk.gdk.ALL_EVENTS_MASK)
self.title_child1 = gtk.Button("本地列表")
self.title_child1.set_parent(self)
self.title_child2 = gtk.Button("网络列表")
self.title_child2.set_parent(self)
self.title_w = 120
self.title_h = 30
self.save_title_h = self.title_h
#
self.title_child1.connect("clicked", self.__title_child1_clicked)
self.title_child2.connect("clicked", self.__title_child2_clicked)
self.title_child1.connect("expose-event", self.__title_child1_expose_event)
self.title_child2.connect("expose-event", self.__title_child2_expose_event)
#
self.layout_show_check = True
self.layout1 = None
self.layout2 = None
self.children = []
# 添加子控件.
self.children.append(self.title_child1)
self.children.append(self.title_child2)
def __title_child1_clicked(self, widget):
if self.layout2 and self.layout1:
self.layout_show_check = True
def __title_child2_clicked(self, widget):
if self.layout1 and self.layout2:
self.layout_show_check = False
def __title_child1_expose_event(self, widget, event):
self.__title_expose_event(widget, event, self.layout_show_check)
return True
def __title_child2_expose_event(self, widget, event):
self.__title_expose_event(widget, event, not self.layout_show_check)
return True
def __title_expose_event(self, widget, event, show_check):
cr = widget.window.cairo_create()
rect = widget.allocation
# draw background.
if show_check:
bg_color = "#272727"
else:
bg_color = "#1b1b1b"
cr.set_source_rgba(*alpha_color_hex_to_cairo((bg_color,1.0)))
cr.rectangle(rect.x, rect.y, rect.width + 1, rect.height)
cr.fill()
# draw title name.
text = widget.get_label()
import pango
if show_check:
text_color = "#FFFFFF"
else:
text_color = "#A9A9A9"
draw_text(cr,
text,
rect.x, rect.y, rect.width, rect.height,
text_color=text_color,
text_size=9,
alignment=pango.ALIGN_CENTER)
def add_layout1(self, layout1):
self.layout1 = layout1
self.layout1.set_parent(self)
def add_layout2(self, layout2):
self.layout2 = layout2
self.layout2.set_parent(self)
def do_realize(self):
self.set_realized(True)
self.__init_window()
self.__init_children()
self.queue_resize()
def __init_window(self):
self.window = gdk.Window(
self.get_parent_window(),
window_type=gdk.WINDOW_CHILD,
x=self.allocation.x,
y=self.allocation.y,
width=self.allocation.width,
height=self.allocation.height,
colormap=self.get_colormap(),
wclass=gdk.INPUT_OUTPUT,
visual=self.get_visual(),
event_mask=(self.get_events()
| gtk.gdk.VISIBILITY_NOTIFY
| gdk.EXPOSURE_MASK
| gdk.SCROLL_MASK
| gdk.POINTER_MOTION_MASK
| gdk.ENTER_NOTIFY_MASK
| gdk.LEAVE_NOTIFY_MASK
| gdk.BUTTON_PRESS_MASK
| gdk.BUTTON_RELEASE_MASK
| gdk.KEY_PRESS_MASK
| gdk.KEY_RELEASE_MASK
))
self.window.set_user_data(self)
self.style.set_background(self.window, gtk.STATE_NORMAL)
def __init_children(self):
if self.title_child1:
self.title_child1.set_parent_window(self.window)
if self.title_child2:
self.title_child2.set_parent_window(self.window)
self.layout1.set_parent_window(self.window)
self.layout2.set_parent_window(self.window)
def do_unrealize(self):
pass
def do_map(self):
gtk.Container.do_map(self)
self.set_flags(gtk.MAPPED)
#
self.window.show()
def do_umap(self):
gtk.Container.do_umap(self)
self.window.hide()
def do_expose_event(self, e):
#
gtk.Container.do_expose_event(self, e)
return False
def do_size_request(self, req):
self.title_child1.size_request()
self.title_child2.size_request()
self.layout1.size_request()
self.layout2.size_request()
def do_size_allocate(self, allocation):
gtk.Container.do_size_allocate(self, allocation)
self.allocation = allocation
#
title_w_padding = self.allocation.width/len(self.children)
allocation = gdk.Rectangle()
allocation.x = 0
allocation.y = 0
allocation.width = title_w_padding
allocation.height = self.title_h
self.title_child1.size_allocate(allocation)
allocation.x = 0 + allocation.width
self.title_child2.size_allocate(allocation)
#
if self.layout_s | how_check:
layout2_x = -self.allocation.width
else:
layout2_x = 0
allocation.x = layout2_x
allocation.y = 0 + self.title_h #self.layout2.allocation.y
allocation.width = self.allocation.width
allocation.height = self.allocation.height - se | lf.title_h
self.layout2.size_allocate(allocation)
if not self.layout_show_check:
layout1_x = - self.allocation.width
else:
layout1_x = 0
allocation.x = layout1_x
allocation.y = 0 + self.title_h #self.layout1.allocation.y
self.layout1.size_allocate(allocation)
#
if self.get_realized():
self.window.move_resize(
self.allocation.x,
self.allocation.y,
self.allocation.width,
self.allocation.height)
def do_show(self):
gtk.Container.do_show(self)
def do_forall(self, include_internals, callback, data):
callback(self.title_child1, data)
callback(self.title_child2, data)
callback(self.layout1, data)
callback(self.layout2, data)
def do_remove(self, widget):
pass
def hide_title(self):
self.save_title_h = self.title_h
self.title_h = 0
def show_title(self):
self.title_h = self.save_title_h
gobject.type_register(NoteBook)
if __name__ == "__main__":
from treeview_base import TreeViewBase
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
scroll_win = gtk.ScrolledWindow()
treeview_base = TreeViewBase()
scroll_win.add_with_viewport(treeview_base)
note_book = NoteBook()
note_book.add_layout1(scroll_win)
note_book.add_layout2(gtk.Button("测试一下"))
win.add(note_book)
#
node1 = treeview_base.nodes.add("优酷视频")
dianshiju = node1.nodes.add("电视剧")
node1.nodes.add("电影")
node1.nodes.add("综艺")
node1.nodes.add("音乐")
node1.nodes.add("动漫")
# 电视剧?
xinshangying = dianshiju.nodes.add("新上映")
d |
rohitranjan1991/home-assistant | homeassistant/components/sensor/device_trigger.py | Python | mit | 7,736 | 0.001163 | """Provides device triggers for sensors."""
import voluptuous as vol
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
)
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_ENTITY_ID,
CONF_FOR,
CONF_TYPE,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import get_device_class, get_unit_of_measurement
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import DOMAIN, SensorDeviceClass
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_APPARENT_POWER = "apparent_power"
CONF_BATTERY_LEVEL = "battery_level"
CONF_CO = "carbon_monoxide"
CONF_CO2 = "carbon_dioxide"
CONF_CURRENT = "current"
CONF_ENERGY = "energy"
CONF_FREQUENCY = "frequency"
CONF_GAS = "gas"
CONF_HUMIDITY = "humidity"
CONF_ILLUMINANCE = "illuminance"
CONF_NITROGEN_DIOXIDE = "nitrogen_dioxide"
CONF_NITROGEN_MONOXIDE = "nitrogen_monoxide"
CONF_NITROUS_OXIDE = "nitrous_oxide"
CONF_OZONE = "ozone"
CONF_PM1 = "pm1"
CONF_PM10 = "pm10"
CONF_PM25 = "pm25"
CONF_POWER = "power"
CONF_POWER_FACTOR = "power_factor"
CONF_PRESSURE = "pressure"
CONF_REACTIVE_POWER = "reactive_power"
CONF_SIGNAL_STRENGTH = "signal_strength"
CONF_SULPHUR_DIOXIDE = "sulphur_dioxide"
CONF_TEMPERATURE = "temperature"
CONF_VOLATILE_ORGANIC_COMPOUNDS = "volatile_organic_compounds"
CONF_VOLTAGE = "voltage"
CONF_VALUE = "value"
ENTITY_TRIGGERS = {
SensorDeviceClass.APPARENT_POWER: [{CONF_TYPE: CONF_APPARENT_POWER}],
SensorDeviceClass.BATTERY: [{CONF_TYPE: CONF_BATTERY_LEVEL}],
SensorDeviceClass.CO: [{CONF_TYPE: CONF_CO}],
SensorDeviceClass.CO2: [{CONF_TYPE: CONF_CO2}],
SensorDeviceClass.CURRENT: [{CONF_TYPE: CONF_CURRENT}],
SensorDeviceClass.ENERGY: [{CONF_TYPE: CONF_ENERGY}],
SensorDeviceClass.FREQUENCY: [{CONF_TYPE: CONF_FREQUENCY}],
SensorDeviceClass.GAS: [{CONF_TYPE: CONF_GAS}],
SensorDeviceClass.HUMIDITY: [{CONF_TYPE: CONF_HUMIDITY}],
SensorDeviceClass.ILLUMINANCE: [{CONF_TYPE: CONF_ILLUMINANCE}],
SensorDeviceClass.NITROGEN_DIOXIDE: [{CONF_TYPE: CONF_NITROGEN_DIOXIDE}],
SensorDeviceClass.NITROGEN_MONOXIDE: [{CONF_TYPE: CONF_NITROGEN_MONOXIDE}],
SensorDeviceClass.NITROUS_OXIDE: [{CONF_TYPE: CONF_NITROUS_OXIDE}],
SensorDeviceClass.OZONE: [{CONF_TYPE: CONF_OZONE}],
SensorDeviceClass.PM1: [{CONF_TYPE: CONF_PM1}],
SensorDeviceClass.PM10: [{CONF_TYPE: CONF_PM10}],
SensorDeviceClass.PM25: [{CONF_TYPE: CONF_PM25}],
SensorDeviceClass.POWER: [{CONF_TYPE: CONF_POWER}],
SensorDeviceClass.POWER_FACTOR: [{CONF_TYPE: CONF_POWER_FACTOR}],
SensorDeviceClass.PRESSURE: [{CONF_TYPE: CONF_PRESSURE}],
SensorDeviceClass.REACTIVE_POWER: [{CONF_TYPE: CONF_REACTIVE_POWER}],
SensorDeviceClass.SIGNAL_STRENGTH: [{CONF_TYPE: CONF_SIGNAL_STRENGTH}],
SensorDeviceClass.SULPHUR_DIOXIDE: [{CONF_TYPE: CONF_SULPHUR_DIOXIDE}],
SensorDeviceClass.TEMPERATURE: [{CONF_TYPE: CONF_TEMPERATURE}],
SensorDeviceClass.VOLATILE_ORGANIC_COMPOUNDS: [
{CONF_TYPE: CONF_VOLATILE_ORGANIC_COMPOUNDS}
] | ,
SensorDeviceClass.VOLTAGE: [{CONF_TYPE: CONF_VOLTAGE}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_VALUE}],
}
TRIGGER_SCHEMA = vol.All(
DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF | _TYPE): vol.In(
[
CONF_APPARENT_POWER,
CONF_BATTERY_LEVEL,
CONF_CO,
CONF_CO2,
CONF_CURRENT,
CONF_ENERGY,
CONF_FREQUENCY,
CONF_GAS,
CONF_HUMIDITY,
CONF_ILLUMINANCE,
CONF_NITROGEN_DIOXIDE,
CONF_NITROGEN_MONOXIDE,
CONF_NITROUS_OXIDE,
CONF_OZONE,
CONF_PM1,
CONF_PM10,
CONF_PM25,
CONF_POWER,
CONF_POWER_FACTOR,
CONF_PRESSURE,
CONF_REACTIVE_POWER,
CONF_SIGNAL_STRENGTH,
CONF_SULPHUR_DIOXIDE,
CONF_TEMPERATURE,
CONF_VOLATILE_ORGANIC_COMPOUNDS,
CONF_VOLTAGE,
CONF_VALUE,
]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
numeric_state_config = {
numeric_state_trigger.CONF_PLATFORM: "numeric_state",
numeric_state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if CONF_ABOVE in config:
numeric_state_config[numeric_state_trigger.CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[numeric_state_trigger.CONF_BELOW] = config[CONF_BELOW]
if CONF_FOR in config:
numeric_state_config[CONF_FOR] = config[CONF_FOR]
numeric_state_config = await numeric_state_trigger.async_validate_trigger_config(
hass, numeric_state_config
)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = get_device_class(hass, entry.entity_id) or DEVICE_CLASS_NONE
unit_of_measurement = get_unit_of_measurement(hass, entry.entity_id)
if not unit_of_measurement:
continue
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
try:
unit_of_measurement = get_unit_of_measurement(hass, config[CONF_ENTITY_ID])
except HomeAssistantError:
unit_of_measurement = None
if not unit_of_measurement:
raise InvalidDeviceAutomationConfig(
f"No unit of measurement found for trigger entity {config[CONF_ENTITY_ID]}"
)
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
}
|
moment-of-peace/EventForecast | rnn_text.py | Python | lgpl-3.0 | 9,696 | 0.007735 | import os
import pickle
import sys
import getopt
import numpy as np
from keras.models import Sequential
from keras.layers import Flatten, Dropout, Dense, Bidirectional
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
import keras.optimizers as opt
#import gensim
global LEN # the input length
global DIM # dimension of word vector
global BATCH
# index and embed raw text
def gen_embed_model(modelFile):
vocab = {} # {'word': index, ...}
with open(modelFile, 'r') as f:
line = f.readline()
[length, dim] = line.split(' ')
vec = np.zeros((int( | length)+1, int(dim)), dtype = np.float64) # {index: [vector], ...}
line = f.readline()
i = 1
while line != '':
index = line.find(' ')
word = lin | e[:index]
vector = []
for e in line[index+1:].split(' '):
try:
vector.append(float(e))
except Exception:
print('float' + e)
vocab[word] = i
vec[i] = np.array(vector, dtype=np.float64)
line = f.readline()
i = i+1
return vocab, vec
# extract data from one line of text, require strip(' ') first
# return np arrays
def extract_data(line, model, weights=None):
content = line.split('\t')
result = compute_result(content[:-1])
source = content[-1]
data = []
#print(weights is None)
for word in source.split(' '):
try:
if weights is None:
data.append(model[word]) # convert word to index
else:
data.append(weights[model[word]]) # convert to vector
except:
pass
#data.append(model['unk'])
# make every input have same length
if weights is None:
data = padding(data, False)
else:
data = padding(data, True)
return np.array(data, dtype=np.float64), np.array(result, dtype=np.float64)
# compute results based on the attributes
def compute_result(attrs):
# attrs: isroot, quadclass, glodstein, mentions, sources, articles, tone
return round((float(attrs[3]) + float(attrs[5]))/200, 2)
# padding zeros
def padding(data, useVec):
global LEN
global DIM
length = len(data)
if length < LEN:
if useVec:
zero = np.zeros(data[0].shape) # append zero vectors
else:
zero = 0 # append zeros
for i in range(length,LEN):
data.append(zero)
elif length > LEN:
data = data[:LEN]
return data
# extract input data and results from a file
def build_dataset(fileName, vocab, weights=None):
trainData, trainResult = [], []
with open(fileName, 'r') as src:
line = src.readline().strip('\n')
while line != '':
# extract data and result from each line
data, result = extract_data(line.strip(' '), vocab, weights=weights)
trainData.append(data)
trainResult.append(result)
line = src.readline().strip('\n')
return trainData, trainResult
# a generator used to fit the rnn model
def train_data_generator(dataPath, limit, vocab):
total = 2528
index = 0
while True:
inputs, targets = build_dataset('%s%d'%(dataPath, index), vocab)
for i in range(1, limit):
index += 1
if index == total:
index = 0
newInputs, newTargets = build_dataset('%s%d'%(dataPath, index), vocab)
inputs.extend(newInputs)
targets.extend(newTargets)
if index%50 == 0:
print(index)
yield (np.array(inputs, dtype=np.int32), np.array(targets, dtype=np.float64))
index += 1
if index == total:
index = 0
def train_data_generator2(dataPath, weights):
total = 2528
index = 0
while True:
inputs = np.load('%s%d%s'%(dataPath, index, '_x.npy'))
result = np.load('%s%d%s'%(dataPath, index, '_y.npy'))
data = np.zeros([BATCH,LEN,DIM],dtype=np.float64)
for i in range(len(inputs)):
for j in range(len(inputs[i])):
data[i][j] = weights[inputs[i][j]]
if index%50 == 0:
print(index)
yield data, result
index += 1
if index == total:
index = 0
# train rnn model. dataPath example: news_50/news_stem_
def model_rnn(vocab, weights, dataPath, batchn, epoch, repeat):
global LEN
global DIM
global BATCH
testx, testy = build_dataset('%s%d'%(dataPath, 2528), vocab, weights=weights)
testx = np.array(testx, dtype=np.float64)
testy = np.array(testy, dtype=np.float64)
# build and fit model
model = Sequential()
#model.add(Embedding(weights.shape[0],weights.shape[1], input_length=LEN, mask_zero=True,weights=[weights]))
model.add(Bidirectional(LSTM(50, activation='relu', return_sequences=True), input_shape=(LEN, DIM)))
model.add(Bidirectional(LSTM(50, activation='relu')))
model.add(Dropout(0.5))
model.add(Dense(1))
sgd = opt.SGD(lr=0.1, decay=1e-2, momentum=0.9)
model.compile(loss='mean_squared_error', optimizer='adam')
print(model.summary())
#model.fit_generator(train_data_generator2('news_50_bin/news_stem_'), 500, epochs=10, verbose=2, validation_data=None)
index = 0
while index < epoch:
data, result = build_dataset('%s%d'%(dataPath, index%2528), vocab, weights=weights)
for i in range(1, batchn):
index += 1
newData, newResult = build_dataset('%s%d'%(dataPath, index), vocab, weights=weights)
data.extend(newData)
result.extend(newResult)
model.fit(np.array(data, dtype=np.float64), np.array(result, dtype=np.float64), epochs=repeat, batch_size=BATCH, verbose=0, validation_data=(testx,testy))
model.save('hotnews_r_%d_%d.h5'%(BATCH, index))
predict = model.predict(testx)
error = 0
for i in range(testy.shape[0]):
error += abs(testy[i] - predict[i][0])/testy[i]
#print(testy[i], predict[i][0])
print('batch %d of %d, epoch %d, absolute error: %f'%(index%2528+1, 2528, int(index/2528)+1, error/testy.shape[0]))
index += 1
return model
# train cnn model
def model_cnn(vocab, weights, dataPath, batchn, epoch):
global LEN
global DIM
global BATCH
testx, testy = build_dataset('%s%d'%(dataPath, 2528), vocab, weights=weights)
testx = np.array(testx, dtype=np.float64)
testy = np.array(testx, dtype=np.float64)
model = Sequential()
#model.add(Embedding(400001, 50, input_length=LEN, mask_zero=False,weights=[embedModel]))
model.add(Conv1D(input_shape=(LEN, DIM), filters=32, kernel_size=30, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(250, activation='softmax'))
model.add(Dense(1, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
index = 0
while True:
data, result = build_dataset('%s%d'%(dataPath, index%2528), vocab, weights)
for i in range(1, batchn):
index += 1
newData, newResult = build_dataset('%s%d'%(dataPath, index), vocab, weights)
data.extend(newData)
result.extend(newResult)
model.fit(np.array(data, dtype=np.float64), np.array(result, dtype=np.float64), epochs=10, batch_size=BATCH, verbose=2, validation_data = (testx,testy))
model.save('hotnews_c_%d_%d.h5'%(BATCH, index))
predict = model.predict(testx)
for i in range(testy.shape[0]):
print(testy[i], predict[i])
index += 1
if index > epoch:
return model
def main():
global LEN
global BATCH
glo |
andymckay/addons-server | src/olympia/users/management/commands/activate_user.py | Python | bsd-3-clause | 1,391 | 0 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from olympia.access.models import Group, GroupUser
from olympia.users.models import UserProfile
class Command(BaseCommand):
"""Activate a registered user, and optionally set it as admin."""
args = 'email'
help = 'Activate a registered user by its email.'
option_list = BaseCommand.option_list + (
make_option('--set-admin',
action='store_true',
dest='set_admin',
default=False,
help='Give superuser/admin rights to the user.'),)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Usage: activate_user [--set-admin] email')
email = args[0]
set_admin = options['set_admin']
try:
profile = UserProfile.objects.get(email=email)
except UserProfile.DoesNotExist:
raise CommandError('User with email %s not found' % email)
profile.up | date(confirmationcode='')
admin_msg = ""
if set_admin:
| admin_msg = "admin "
GroupUser.objects.create(user=profile,
group=Group.objects.get(name='Admins'))
self.stdout.write("Done, you can now login with your %suser" %
admin_msg)
|
mkhuthir/catkin_ws | src/chessbot/devel/lib/python2.7/dist-packages/nasa_r2_common_msgs/msg/_JointStatusArray.py | Python | gpl-3.0 | 10,820 | 0.014325 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from nasa_r2_common_msgs/JointStatusArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import nasa_r2_common_msgs.msg
import std_msgs.msg
class JointStatusArray(genpy.Message):
_md5sum = "db132c4fff9528f41c0236d435100eda"
_type = "nasa_r2_common_msgs/JointStatusArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
JointStatus[] status
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: nasa_r2_common_msgs/JointStatus
string publisher
string joint
uint32 registerValue
bool coeffsLoaded
bool bridgeEnabled
bool motorEnabled
bool brakeReleased
bool motorPowerDetected
bool embeddedMotCom
bool jointFaulted
"""
__slots__ = ['header','status']
_slot_types = ['std_msgs/Header','nasa_r2_common_msgs/JointStatus[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(JointStatusArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = []
else:
self.header = std_msgs.msg.Header()
self.status = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.status)
buff.write(_struct_I.pack(length))
for val1 in self.status:
_x = val1.publisher
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.joint
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_I7B.pack(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
e | xcept TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack s | erialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.status = []
for i in range(0, length):
val1 = nasa_r2_common_msgs.msg.JointStatus()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.publisher = str[start:end].decode('utf-8')
else:
val1.publisher = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.joint = str[start:end].decode('utf-8')
else:
val1.joint = str[start:end]
_x = val1
start = end
end += 11
(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted,) = _struct_I7B.unpack(str[start:end])
val1.coeffsLoaded = bool(val1.coeffsLoaded)
val1.bridgeEnabled = bool(val1.bridgeEnabled)
val1.motorEnabled = bool(val1.motorEnabled)
val1.brakeReleased = bool(val1.brakeReleased)
val1.motorPowerDetected = bool(val1.motorPowerDetected)
val1.embeddedMotCom = bool(val1.embeddedMotCom)
val1.jointFaulted = bool(val1.jointFaulted)
self.status.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.status)
buff.write(_struct_I.pack(length))
for val1 in self.status:
_x = val1.publisher
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.joint
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_I7B.pack(_x.registerValue, _x.coeffsLoaded, _x.bridgeEnabled, _x.motorEnabled, _x.brakeReleased, _x.motorPowerDetected, _x.embeddedMotCom, _x.jointFaulted))
except struct.err |
henryhallam/peregrine | peregrine/analysis/acquisition.py | Python | gpl-3.0 | 8,541 | 0.011357 | #!/usr/bin/env python
# Copyright (C) 2012 Swift Navigation Inc.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""Functions for analysing and plotting acquisition results."""
import numpy as np
import matplotlib.pyplot as plt
from operator import attrgetter
from peregrine.acquisition import AcquisitionResult, DEFAULT_THRESHOLD
__all__ = ['acq_table', 'snr_bars', 'peak_plot', 'acq_plot_3d']
def acq_table(acq_results, show_all=False):
"""
Print a table of acquisition results.
Parameters
----------
acq_results : [:class:`peregrine.acquisition.AcquisitionResult`]
List of :class:`peregrine.acquisition.AcquisitionResult` objects.
show_all : bool, optional
If `True` then even satellites which have not been acquired will be shown
in the table.
"""
for ar in acq_results:
if ar.status == 'A':
if show_all:
print '*',
print ar
elif show_all:
print ' ',
print ar
def snr_bars(acq_results,
threshold=DEFAULT_THRESHOLD, ax=None, show_missing=True):
"""
Display the acquisition Signal to Noise Ratios as a bar chart.
This function is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquisition` or saved acquisition
results files loaded with :func:`peregrine.acquisition.load_acq_results`.
Parameters
----------
acq_results : [:class:`peregrine.acquisition.AcquisitionResult`]
List of :class:`peregrine.acquisition.AcquisitionResult` objects to plot
bars for. If the `status` field of the
:class:`peregrine.acquisition.AcquisitionResult` object is ``'A'``, i.e.
the satellite has been acquired, then the bar will be highlighted.
theshold : {float, `None`}, optional
If not `None` then an acquisition theshold of this value will be indicated
on the plot. Defaults to the value of
:attr:`peregrine.acquisition.DEFAULT_THRESHOLD`.
ax : :class:`matplotlib.axes.Axes`, optional
If `ax` is not `None` then the bar chart will be plotted on the supplied
:class:`matplotlib.axes.Axes` object rather than as a new figure.
show_missing : bool, optional
If `True` then the bar chart will show empty spaces for all PRNs not
included in `acq_results`, otherwise only the PRNs in `acq_results` will be
plotted.
Returns
-------
out : :class:`matplotlib.axes.Axes`
The `Axes` object that the bar chart was drawn to.
"""
if ax is None:
fig = plt.figure()
fig.set_size_inches(10, 4, forward=True)
ax = fig.add_subplot(111)
if show_missing:
prns = [r.prn for r in acq_results]
missing = [prn for prn in range(31) if not prn in prns]
acq_results = acq_results[:] + \
[AcquisitionResult(prn, 0, 0, 0, 0, '-') for prn in missing]
acq_results.sort(key=attrgetter('prn'))
for n, result in enumerate(acq_results):
if (result.status == 'A'):
colour = '#FFAAAA'
else:
colour = '0.8'
ax.bar(n-0.5, result.snr, color=colour, width=1)
ax.set_xticks(range(len(acq_results)))
ax.set_xticklabels(['%02d' % (r.prn+1) for r in acq_results])
ax.set_title('Acquisition results')
ax.set_ylabel('Acquisition metric')
if threshold is not None:
ax.plot([-0.5, len(acq_results)-0.5], [threshold, threshold] | ,
linestyle='dashed', color='black')
ax.text(0.01, 0.97, 'threshold = %.1f' % threshold,
horizontalalignment='left',
verticalalignment='top',
transform = ax.transAxes)
yticks = ax.get_yticks()
dist = np.abs(yticks - threshold).min()
if dist >= 0.25*(yticks[1] - yticks[0]):
ax.set_yticks(np.append(yticks, | threshold))
ax.set_xbound(-0.5, len(acq_results)-0.5)
ax.set_xlabel('PRN')
return ax
def peak_plot(powers, freqs, samples_per_code, code_length=1023.0):
"""
Visualise the peak in a table of acquisition correlation powers.
Display, in various ways, the peak in a 2D array of acquisition correlation
powers against code phase and Doppler shift.
This is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquire`.
Parameters
----------
powers : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
freqs : iterable
List of frequencies mapping the results frequency index to a value in Hz.
samples_per_code : float
The number of samples corresponding to one code length.
code_length : int, optional
The number of chips in the chipping code. Defaults to the GPS C/A code
value of 1023.
"""
samples_per_chip = samples_per_code / code_length
fig = plt.figure()
fig.set_size_inches(10, 10, forward=True)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
peak = np.unravel_index(powers.argmax(), powers.shape)
powers_detail = powers[peak[0]-5:peak[0]+5, peak[1]-50:peak[1]+50]
code_phases = np.arange(samples_per_code) / samples_per_chip
ax1.plot(code_phases, powers[peak[0],:], color='black')
ax1.set_title("Code phase cross-section")
ax1.set_xlabel("Code phase (chips)")
ax1.set_ylabel("Correlation magnitude")
ax1.set_xbound(0, code_length)
ax1.set_xticks([0, code_phases[peak[1]], code_length])
ax1.set_xticklabels(['0', code_phases[peak[1]], '%.0f' % code_length])
ax2.plot(freqs, powers[:,peak[1]], color='black')
ax2.set_title("Carrier frequency cross-section")
ax2.set_xlabel("Doppler shift (Hz)")
ax2.set_ylabel("Correlation magnitude")
ax2.set_xbound(freqs[0], freqs[-1])
ax2.set_xticks([freqs[0], freqs[peak[0]], freqs[-1]])
ax3.plot(code_phases[peak[1]-50:peak[1]+50],
powers[peak[0],peak[1]-50:peak[1]+50], color='black')
ax3.set_title("Code phase cross-section detail")
ax3.set_xlabel("Code phase (chips)")
ax3.set_ylabel("Correlation magnitude")
ax3.set_xbound(code_phases[peak[1]-50], code_phases[peak[1]+50])
ax4.imshow(powers_detail, aspect='auto', cmap=plt.cm.RdYlBu_r,
extent=(code_phases[peak[1]-50],
code_phases[peak[1]+50],
freqs[peak[0]-5],
freqs[peak[0]+5]),
interpolation='bilinear')
ax4.set_title("Peak detail")
ax4.set_xlabel("Code phase (chips)")
ax4.set_ylabel("Doppler shift (Hz)")
fig.tight_layout()
def acq_plot_3d(powers, freqs, samples_per_code, code_length=1023.0):
"""
Display a 3D plot of correlation power against code phase and Doppler shift.
This is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquire`.
Parameters
----------
powers : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
freqs : iterable
List of frequencies mapping the results frequency index to a value in Hz.
samples_per_code : float
The number of samples corresponding to one code length.
code_length : int, optional
The number of chips in the chipping code. Defaults to the GPS C/A code
value of 1023.
"""
from mpl_toolkits.mplot3d import Axes3D
samples_per_chip = samples_per_code / code_length
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
code_phases = np.arange(samples_per_code) / samples_per_chip
X, Y = np.meshgrid(code_phases, freqs)
ax.plot_surface(X[:], Y[:], powers[:], cmap=plt.cm.RdYlBu_r, linewidth=0)
ax.set_title("Acquisition")
ax.set_xlabel("Code phase (chips)")
ax.set_xbound(0, code_length)
ax.set_ylabel("Doppler shift (Hz)")
ax.set_ybound(freqs[0], freqs[-1])
ax.set_zlabel("Correlation magnitude")
fig.tight_layout()
def main():
|
ibex-team/ibex-lib | doc/conf.py | Python | lgpl-3.0 | 7,943 | 0.008057 | # -*- coding: utf-8 -*-
#
# IBEX documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 21 11:59:19 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('.'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', #'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'hidden_code_block']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'IBEX'
copyright = u'2007-2020, IMT Atlantique'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_f | mt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
primary_domain = 'cpp'
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#a | dd_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'IBEXdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'IBEX.tex', u'IBEX Documentation',
u'Gilles Chabert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ibex', u'IBEX Documentation',
[u'Gilles Chabert'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'IBEX', u'IBEX Documentation',
u'Gilles Chabert', 'IBEX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
Tinuqin/dynamic-graph-metrics | binaryreader.py | Python | mit | 470 | 0.012766 | #!/usr/bin/e | nv python
# encoding: utf-8
# Written by Aapo Kyrola
# http://stackoverflow.com/questions/28349805/extracting-plain-text-output-from-binary-file
import struct
from array import array as binarray
import sys
inputfile = sys.argv[1]
data = open(inputfile).read()
a = binarray('c')
a.fromstring(data)
s = struct.Struct("f")
l = len(a)
print "%d bytes" %l
n = l / 4
for i in xrange(0, n):
x = s.unpack_ | from(a, i * 4)[0]
print ("%d %f" % (i, x))
|
Marwe/particlemap | sample.py | Python | mit | 20 | 0 | __autho | r__ = 'tibi' | |
LLNL/spack | var/spack/repos/builtin/packages/minigan/package.py | Python | lgpl-2.1 | 1,105 | 0.00362 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
fr | om spack impo | rt *
class Minigan(Package):
"""miniGAN is a generative adversarial network code developed as part of the
Exascale Computing Project's (ECP) ExaLearn project at
Sandia National Laboratories."""
homepage = "https://github.com/SandiaMLMiniApps/miniGAN"
url = "https://github.com/SandiaMLMiniApps/miniGAN/archive/1.0.0.tar.gz"
version('1.0.0', sha256='ef6d5def9c7040af520acc64b7a8b6c8ec4b7901721b11b0cb25a583ea0c8ae3')
depends_on('python', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-torch', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-horovod@master', type=('build', 'run'))
depends_on('py-torchvision', type=('build', 'run'))
depends_on('py-matplotlib@3.0.0', type=('build', 'run'))
def install(self, spec, prefix):
install_tree('.', prefix)
|
TheBoegl/letsencrypt | letsencrypt/plugins/common_test.py | Python | apache-2.0 | 7,982 | 0.000125 | """Tests for letsencrypt.plugins.common."""
import unittest
import mock
import OpenSSL
from acme import challenges
from acme import jose
from letsencrypt import achallenges
from letsencrypt.tests import acme_util
from letsencrypt.tests import test_util
class NamespaceFunctionsTest(unittest.TestCase):
"""Tests for letsencrypt.plugins.common.*_namespace functions."""
def test_option_namespace(self):
from letsencrypt.plugins.common import option_namespace
self.assertEqual("foo-", option_namespace("foo"))
def test_dest_namespace(self):
from letsencrypt.plugins.common import dest_namespace
self.assertEqual("foo_", dest_namespace("foo"))
def test_dest_namespace_with_dashes(self):
from letsencrypt.plugins.common import dest_namespace
self.assertEqual("foo_bar_", dest_namespace("foo-bar"))
class PluginTest(unittest.TestCase):
"""Test for letsencrypt.plugins.common.Plugin."""
def setUp(self):
from letsencrypt.plugins.common import Plugin
class MockPlugin(Plugin): # pylint: disable=missing-docstring
@classmethod
def add_parser_arguments(cls, add):
add("foo-bar", dest="different_to_foo_bar", x=1, y=None)
self.plugin_cls = MockPlugin
self.config = mock.MagicMock()
self.plugin = MockPlugin(config=self.config, name="mock")
def test_init(self):
self.assertEqual("mock", self.plugin.name)
self.assertEqual(self.config, self.plugin.config)
def test_option_namespace(self):
self.assertEqual("mock-", self.plugin.option_namespace)
def test_option_name(self):
self.assertEqual("mock-foo_bar", self.plugin.option_name("foo_bar"))
def test_dest_namespace(self):
self.assertEqual("mock_", self.plugin.dest_namespace)
def test_dest(self):
self.assertEqual("mock_foo_bar", self.plugin.dest("foo-bar"))
self.assertEqual("mock_foo_bar", self.plugin.dest("foo_bar"))
def test_conf(self):
self.assertEqual(self.config.mock_foo_bar, self.plugin.conf("foo-bar"))
def test_inject_parser_options(self):
parser = mock.MagicMock()
self.plugin_cls.inject_parser_options(parser, "mock")
# note that inject_parser_options doesn't check if dest has
# correct prefix
parser.add_argument.assert_called_once_with(
"--mock-foo-bar", dest="different_to_foo_bar", x=1, y=None)
class AddrTest(unittest.TestCase):
"""Tests for letsencrypt.client.plugins.common.Addr."""
def setUp(self):
from letsencrypt.plugins.common import Addr
self.addr1 = Addr.fromstring("192.168.1.1")
self.addr2 = Addr.fromstring("192.168.1.1:*")
self.addr3 = Addr.fromstring("192.168.1.1:80")
self.addr4 = Addr.fromstring("[fe00::1]")
self.addr5 = Addr.fromstring("[fe00::1]:*")
self.addr6 = Addr.fromstring("[fe00::1]:80")
def test_fromstring(self):
self.assertEqual(self.addr1.get_addr(), "192.168.1.1")
self.assertEqual(self.addr1.get_port(), "")
self.assertEqual(self.addr2.get_addr(), "192.168.1.1")
self.assertEqual(self.addr2.get_port(), "*")
self.assertEqual(self.addr3.get_addr(), "192.168.1.1")
self.assertEqual(self.addr3.get_port(), "80")
self.assertEqual(self.addr4.get_addr(), "[fe00::1]")
self.assertEqual(self.addr4.get_port(), "")
self.assertEqual(self.addr5.get_addr(), "[fe00::1]")
self.assertEqual(self.addr5.get_port(), "*")
self.assertEqual(self.addr6.get_addr(), "[fe00::1]")
self.assertEqual(self.addr6.get_port(), "80")
def test_str(self):
self.assertEqual(str(self.addr1), "192.168.1.1")
self.assertEqual(str(self.addr2), "192.168.1.1:*")
self.assertEqual(str(self.addr3), "192.168.1.1:80")
self.assertEqual(str(self.addr4), "[fe00::1]")
self.assertEqual(str(self.addr5), "[fe00::1]:*")
self.assertEqual(str(self.addr6), "[fe00::1]:80")
def test_get_addr_obj(self):
self.assertEqual(str(self.addr1.get_addr_obj("443")), "192.168.1.1:443")
self.assertEqual(str(self.addr2.get_addr_obj("")), "192.168.1.1")
self.assertEqual(str(self.addr1.get_addr_obj("*")), "192.168.1.1:*")
self.assertEqual(str(self.addr4.get_addr_obj("443")), "[fe00::1]:443")
self.assertEqual(str(self.addr5.get_addr_obj("")), "[fe00::1]")
self.assertEqual(str(self.addr4.get_addr_obj("*")), "[fe00::1]:*")
def test_eq(self):
self.assertEqual(self.addr1, self.addr2.get_addr_obj(""))
self.assertNotEqual(self.addr1, self.addr2)
self.assertFalse(self.addr1 == 3333)
self.assertEqual(self.addr4, self.addr4.get_addr_obj(""))
self.assertNotEqual(self.addr4, self.addr5)
self.assertFalse(self.addr4 == 3333)
def test_set_inclusion(self):
from letsencrypt.plugins.common import Addr
set_a = set([self.addr1, self.addr2])
addr1b = Addr.fromstring("192.168.1.1")
addr2b = Addr.fromstring("192.168.1.1:*")
set_b = set([addr1b, addr2b])
self.assertEqual(set_a, set_b)
set_c = set([self.addr4, self.addr5])
addr4b = Addr.fromstring("[fe00::1]")
addr5b = Addr.fromstring("[fe00::1]:*")
set_d = set([addr4b, addr5b])
self.assertEqual(set_c, set_d)
class TLSSNI01Test(unittest.TestCase):
"""Tests for letsencrypt.plugins.common.TLSSNI01."""
auth_key = jose.JWKRSA.load(test_util.load_vector("rsa512_key.pem"))
achalls = [
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(token=b'token1'), "pending"),
domain="encryption-example.demo" | , account_key=auth_key),
achallenges.KeyAuthorizationAnnotatedCha | llenge(
challb=acme_util.chall_to_challb(
challenges.TLSSNI01(token=b'token2'), "pending"),
domain="letsencrypt.demo", account_key=auth_key),
]
def setUp(self):
from letsencrypt.plugins.common import TLSSNI01
self.sni = TLSSNI01(configurator=mock.MagicMock())
def test_add_chall(self):
self.sni.add_chall(self.achalls[0], 0)
self.assertEqual(1, len(self.sni.achalls))
self.assertEqual([0], self.sni.indices)
def test_setup_challenge_cert(self):
# This is a helper function that can be used for handling
# open context managers more elegantly. It avoids dealing with
# __enter__ and __exit__ calls.
# http://www.voidspace.org.uk/python/mock/helpers.html#mock.mock_open
mock_open, mock_safe_open = mock.mock_open(), mock.mock_open()
response = challenges.TLSSNI01Response()
achall = mock.MagicMock()
key = test_util.load_pyopenssl_private_key("rsa512_key.pem")
achall.response_and_validation.return_value = (
response, (test_util.load_cert("cert.pem"), key))
with mock.patch("letsencrypt.plugins.common.open",
mock_open, create=True):
with mock.patch("letsencrypt.plugins.common.le_util.safe_open",
mock_safe_open):
# pylint: disable=protected-access
self.assertEqual(response, self.sni._setup_challenge_cert(
achall, "randomS1"))
# pylint: disable=no-member
mock_open.assert_called_once_with(self.sni.get_cert_path(achall), "wb")
mock_open.return_value.write.assert_called_once_with(
test_util.load_vector("cert.pem"))
mock_safe_open.assert_called_once_with(
self.sni.get_key_path(achall), "wb", chmod=0o400)
mock_safe_open.return_value.write.assert_called_once_with(
OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
palashahuja/pgmpy | pgmpy/tests/test_inference/test_Mplp.py | Python | mit | 4,901 | 0.006937 | import unittest
import numpy as np
from pgmpy.models import MarkovModel
from pgmpy.inference.mplp import Mplp
from pgmpy.factors import Factor
from pgmpy.readwrite import UAIReader
class TestMplp(unittest.TestCase):
def setUp(self):
reader_file = UAIReader('pgmpy/tests/test_readwrite/testdata/grid4x4_with_triplets.uai')
self.markov_model = reader_file.get_model()
for factor in self.markov_model.factors:
factor.values = np.log(factor.values)
self.mplp = Mplp(self.markov_model)
class TightenTripletOff(TestMplp):
# Query when tighten triplet is OFF
def test_query_tighten_triplet_off(self):
query_result = self.mplp.map_query(tighten_triplet=False)
# Results from the Sontag code for a mplp run without tightening is:
expected_result = {
'P': -0.06353, 'N': 0.71691, 'O': 0.43431, 'L': -0.69140,
'M': -0.89940, 'J': 0.49731, 'K': 0.91856, 'H': 0.96819,
'I': 0.68913, 'F': 0.41164, 'G': 0.73371, 'D': -0.57461,
'E': 0.75254, 'B': 0.06297, 'C': -0.11423, 'A': 0.60557}
self.assertAlmostEqual(expected_result['A'], query_result['var_0'], places=4)
self.assertAlmostEqual(expected_result['B'], query_result['var_1'], places=4)
self.assertAlmostEqual(expected_result['C'], query_result['var_2'], places=4)
self.assertAlmostEqual(expected_result['D'], query_result['var_3'], places=4)
self.assertAlmostEqual(expected_result['E'], query_result['var_4'], places=4)
self.assertAlmostEqual(expected_result['F'], query_result['var_5'], places=4)
self.assertAlmostEqual(expected_result['G'], query_result['var_6'], places=4)
self.assertAlmostEqual(expected_result['H'], query_result['var_7'], places=4)
self.assertAlmostEqual(expected_result['I'], query_result['var_8'], places=4)
self.assertAlmostEqual(expected_result['J'], query_result['var_9'], places=4)
self.assertAlmostEqual(expected_result['K'], query_result['var_10'], places=4)
self.assertAlmostEqual(expected_r | esult['L'], query_result['var_11'], places=4)
self.assertAlmostEqual(expected_result['M'], query_result['var_12'], places=4)
self.assertAlmostEqual(expected_result['N'], query_result['var_13'], places=4)
self.assertAlmostEqual(expected_result['O'], query_result['var_14'], places=4)
self.assertAlmostEqual(expected_result['P'], query_result['var_15'], places=4)
# The final Integrality gap after solving for the present case
int_gap = s | elf.mplp.get_integrality_gap()
self.assertAlmostEqual(64.59, int_gap, places=1)
class TightenTripletOn(TestMplp):
# Query when tighten triplet is ON
def test_query_tighten_triplet_on(self):
query_result = self.mplp.map_query(tighten_triplet=True)
# Results from the Sontag code for a mplp run with tightening is:
expected_result = {
'P': 0.06353, 'C': 0.11422, 'B': -0.06300, 'A': 0.60557,
'G': -0.73374, 'F': 0.41164, 'E': 0.75254, 'D': -0.57461,
'K': -0.91856, 'J': 0.49731, 'I': 0.68913, 'H': 0.96819,
'O': 0.43431, 'N': 0.71691, 'M': -0.89940, 'L': 0.69139}
self.assertAlmostEqual(expected_result['A'], query_result['var_0'], places=4)
self.assertAlmostEqual(expected_result['B'], query_result['var_1'], places=4)
self.assertAlmostEqual(expected_result['C'], query_result['var_2'], places=4)
self.assertAlmostEqual(expected_result['D'], query_result['var_3'], places=4)
self.assertAlmostEqual(expected_result['E'], query_result['var_4'], places=4)
self.assertAlmostEqual(expected_result['F'], query_result['var_5'], places=4)
self.assertAlmostEqual(expected_result['G'], query_result['var_6'], places=4)
self.assertAlmostEqual(expected_result['H'], query_result['var_7'], places=4)
self.assertAlmostEqual(expected_result['I'], query_result['var_8'], places=4)
self.assertAlmostEqual(expected_result['J'], query_result['var_9'], places=4)
self.assertAlmostEqual(expected_result['K'], query_result['var_10'], places=4)
self.assertAlmostEqual(expected_result['L'], query_result['var_11'], places=4)
self.assertAlmostEqual(expected_result['M'], query_result['var_12'], places=4)
self.assertAlmostEqual(expected_result['N'], query_result['var_13'], places=4)
self.assertAlmostEqual(expected_result['O'], query_result['var_14'], places=4)
self.assertAlmostEqual(expected_result['P'], query_result['var_15'], places=4)
# The final Integrality gap after solving for the present case
int_gap = self.mplp.get_integrality_gap()
# Since the ties are broken arbitrary, we have 2 possible solutions howsoever trivial in difference
self.assertIn(round(int_gap, 2), (7.98, 8.07))
|
amaozhao/algorithms | algorithms/union-find/count_islands.py | Python | mit | 2,094 | 0.000478 | """
A 2d grid map of m rows and n columns is initially filled with water.
We may perform an addLand operation which turns the water at position
(row, col) into a land. Given a list of positions to operate,
count the number of islands after each addLand operation.
An island is surrounded by water and is formed by connecting adjacent
lands horizontally or vertically.
You may assume all four edges of the grid are all surrounded by water.
Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]].
Initially, the 2d grid grid is filled with water.
(Assume 0 represents water and 1 represents land).
0 0 0
0 0 0
0 0 0
Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land.
1 0 0
0 0 0 Num | ber of islands = 1
0 0 0
Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land.
1 1 0
0 0 0 Number of islands = 1
0 0 0
Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land.
1 1 0
0 0 1 Number of islands = 2
0 0 0
Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land.
1 1 0
0 0 1 | Number of islands = 3
0 1 0
"""
class Solution(object):
def num_islands2(self, m, n, positions):
ans = []
islands = Union()
for p in map(tuple, positions):
islands.add(p)
for dp in (0, 1), (0, -1), (1, 0), (-1, 0):
q = (p[0] + dp[0], p[1] + dp[1])
if q in islands.id:
islands.unite(p, q)
ans += [islands.count]
return ans
class Union(object):
def __init__(self):
self.id = {}
self.sz = {}
self.count = 0
def add(self, p):
self.id[p] = p
self.sz[p] = 1
self.count += 1
def root(self, i):
while i != self.id[i]:
self.id[i] = self.id[self.id[i]]
i = self.id[i]
return i
def unite(self, p, q):
i, j = self.root(p), self.root(q)
if i == j:
return
if self.sz[i] > self.sz[j]:
i, j = j, i
self.id[i] = j
self.sz[j] += self.sz[i]
self.count -= 1
|
bespike/litecoin | contrib/testgen/gen_key_io_test_vectors.py | Python | mit | 9,388 | 0.004261 | #!/usr/bin/env python3
# Copyright (c) 2012-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
from segwit_addr import bech32_encode, decode, convertbits, CHARSET
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
SCRIPT_ADDRESS2 = 50
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
SCRIPT_ADDRESS_TEST2 = 58
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((SCRIPT_ADDRESS2,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((SCRIPT_ADDRESS_TEST2,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, output_prefix
('ltc', 0, 20, (False, 'main', None, True), p2wpkh_prefix),
('ltc', 0, 32, (False, 'main', None, True), p2wsh_prefix),
('ltc', 1, 2, (False, 'main', None, True), (OP_1, 2)),
('tltc', 0, 20, (False, 'test', None, True), p2wpkh_prefix),
('tltc', 0, 32, (False, 'test', None, True), p2wsh_prefix),
('tltc', 2, 16, (False, 'test', None, True), (OP_2, 16)),
('rltc', 0, 20, (False, 'regtest', None, True), p2wpkh_prefix),
('rltc', 0, 32, (False, 'regtest', None, True), p2wsh_prefix),
('rltc', 16, 40, (False, 'regtest', None, True), (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, False, False, False),
('tltc', 17, 32, False, False, False),
('rltc', 3, 1, False, False, False),
('ltc', 15, 41, False, False, False),
('tltc', 0, 16, False, False, False),
('rltc', 0, 32, True, False, False),
('ltc', 0, 16, True, False, False),
('tltc', 0, 32, False, True, False),
('rltc', 0, 20, False, False, True)
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['ltc', 'tltc', 'rltc']:
if decode(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
dst_prefix = bytearray(template[4])
rv = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = b2a_hex(payload)
if isinstance(hexrepr, bytes):
hexrepr = hexrepr.decode('utf8')
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.u | random(template[2]))
if no_data:
rv = bech32_encode(hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[3] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(hrp, data)
if template[4]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice( | CHARSET.replace(rv[i], '')) + rv[i + 1:]
if |
gustavla/self-supervision | selfsup/caffe.py | Python | bsd-3-clause | 7,096 | 0.002959 | from .util import DummyDict
from .util import tprint
import deepdish as dd
import numpy as np
# CAFFE WEIGHTS: O x I x H x W
# TFLOW WEIGHTS: H x W x I x O
def to_caffe(tfW, name=None, shape=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if tfW.ndim == 4:
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
cfW = tfW.transpose(3, 2, 0, 1)
return cfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tf_shape = (cf_shape[2], cf_shape[3], cf_shape[1], cf_shape[0])
cfW = tfW.reshape(tf_shape).transpose(3, 2, 0, 1).reshape(cf_shape[0], -1)
info[name] = 'fc->c transitioned with caffe shape {}'.format(cf_shape)
return cfW
else:
return tfW.T
def from_caffe(cfW, name=None, color_layer='', conv_fc_transitionals=None, info=DummyDict()):
assert conv_fc_transitionals is None or name is not None
if cfW.ndim == 4:
tfW = cfW.transpose(2, 3, 1, 0)
assert conv_fc_transitionals is None or name is not None
if (name == 'conv1_1' or name == 'conv1' or name == color_layer) and tfW.shape[2] == 3:
tfW = tfW[:, :, ::-1]
info[name] = 'flipped'
return tfW
else:
if conv_fc_transitionals is not None and name in conv_fc_transitionals:
cf_shape = conv_fc_transitionals[name]
tfW = cfW.reshape(cf_shape).transpose(2, 3, 1, 0).reshape(-1, cf_shape[0])
info[name] = 'c->fc transitioned with caffe shape {}'.format(cf_shape)
| return tfW
else:
return cfW.T
def load_caffemodel(path, session, prefix='', ignore=set(),
conv_fc_ | transitionals=None, renamed_layers=DummyDict(),
color_layer='', verbose=False, pre_adjust_batch_norm=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if tw.name.split(':')[0] == name + '/' + which:
return tw
return None
"""
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
"""
data = dd.io.load(path, '/data')
assigns = []
loaded = []
info = {}
for key in data:
local_key = prefix + renamed_layers.get(key, key)
if key not in ignore:
bn_name = 'batch_' + key
if '0' in data[key]:
weights = find_weights(local_key, 'weights')
if weights is not None:
W = from_caffe(data[key]['0'], name=key, info=info,
conv_fc_transitionals=conv_fc_transitionals,
color_layer=color_layer)
if W.ndim != weights.get_shape().as_list():
W = W.reshape(weights.get_shape().as_list())
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
W /= sigma
init_str += ' batch-adjusted'
assigns.append(weights.assign(W))
loaded.append('{}:0 -> {}:weights{} {}'.format(key, local_key, init_str, info.get(key, '')))
if '1' in data[key]:
biases = find_weights(local_key, 'biases')
if biases is not None:
bias = data[key]['1']
init_str = ''
if pre_adjust_batch_norm and bn_name in data:
bn_data = data[bn_name]
sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
mu = bn_data['0'] / bn_data['2']
bias = (bias - mu) / sigma
init_str += ' batch-adjusted'
assigns.append(biases.assign(bias))
loaded.append('{}:1 -> {}:biases{}'.format(key, local_key, init_str))
# Check batch norm and load them (unless they have been folded into)
#if not pre_adjust_batch_norm:
session.run(assigns)
if verbose:
tprint('Loaded model from', path)
for l in loaded:
tprint('-', l)
return loaded
def save_caffemodel(path, session, layers, prefix='',
conv_fc_transitionals=None, color_layer='', verbose=False,
save_batch_norm=False, lax_naming=False):
import tensorflow as tf
def find_weights(name, which='weights'):
for tw in tf.trainable_variables():
if lax_naming:
ok = tw.name.split(':')[0].endswith(name + '/' + which)
else:
ok = tw.name.split(':')[0] == name + '/' + which
if ok:
return tw
return None
def find_batch_norm(name, which='mean'):
for tw in tf.all_variables():
#if name + '_moments' in tw.name and tw.name.endswith(which + '/batch_norm:0'):
if tw.name.endswith(name + '/bn_' + which + ':0'):
return tw
return None
data = {}
saved = []
info = {}
for lay in layers:
if isinstance(lay, tuple):
lay, p_lay = lay
else:
p_lay = lay
weights = find_weights(prefix + p_lay, 'weights')
d = {}
if weights is not None:
tfW = session.run(weights)
cfW = to_caffe(tfW, name=lay,
conv_fc_transitionals=conv_fc_transitionals,
info=info, color_layer=color_layer)
d['0'] = cfW
saved.append('{}:weights -> {}:0 {}'.format(prefix + p_lay, lay, info.get(lay, '')))
biases = find_weights(prefix + p_lay, 'biases')
if biases is not None:
b = session.run(biases)
d['1'] = b
saved.append('{}:biases -> {}:1'.format(prefix + p_lay, lay))
if d:
data[lay] = d
if save_batch_norm:
mean = find_batch_norm(lay, which='mean')
variance = find_batch_norm(lay, which='var')
if mean is not None and variance is not None:
d = {}
d['0'] = np.squeeze(session.run(mean))
d['1'] = np.squeeze(session.run(variance))
d['2'] = np.array([1.0], dtype=np.float32)
data['batch_' + lay] = d
saved.append('batch_norm({}) saved'.format(lay))
dd.io.save(path, dict(data=data), compression=None)
if verbose:
tprint('Saved model to', path)
for l in saved:
tprint('-', l)
return saved
|
rtucker-mozilla/mozilla_inventory | mozdns/models.py | Python | bsd-3-clause | 8,260 | 0 | from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import mozdns
from mozdns.domain.models import Domain
from mozdns.view.models import View
from mozdns.mixins import ObjectUrlMixin, DisplayMixin
from mozdns.validation import validate_first_label, validate_name
from mozdns.validation import validate_ttl
class LabelDomainMixin(models.Model):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``LabelDomainMixin``, you must include ``domain`` and
``label`` explicitly if you need them to.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
This field makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
domain = models.ForeignKey(Domain, null=False, help_text="FQDN of the "
"domain after the short hostname. "
"(Ex: <i>Vlan</i>.<i>DC</i>.mozilla.com)")
# "The length of any one label is limited to between 1 and 63 octets."
# -- RFC218
label = models.CharField(max_length=63, blank=True, null=True,
validators=[validate_first_label],
help_text="Short name of the fqdn")
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name], db_index=True)
class Meta:
abstract = True
class ViewMixin(models.Model):
def validate_views(instance, views):
for view in views:
instance.clean_views(views)
views = models.ManyToManyField(
View, blank=True, validators=[validate_views]
)
class Meta:
abstract = True
def clean_views(self, views):
"""cleaned_data is the data that is going to be called with for
updating an existing or creating a new object. Classes should implement
this function according to their specific needs.
"""
for view in views:
if hasattr(self, 'domain'):
self.check_no_ns_soa_condition(self.domain, view=view)
if hasattr(self, 'reverse_domain'):
self.check_no_ns_soa_condition(self.reverse_domain, view=view)
def check_no_ns_soa_condition(self, domain, view=None):
if domain.soa:
fail = False
root_domain = domain.soa.root_domain
if root_domain and not root_domain.nameserver_set.exists():
fail = True
elif (view and
not root_domain.nameserver_set.filter(views=view).exists()):
fail = True
if fail:
raise ValidationError(
"The zone you are trying to assign this record into does "
"not have an NS record, thus cannnot support other "
"records.")
class MozdnsRecord(ViewMixin, DisplayMixin, ObjectUrlMixin):
ttl = models.PositiveIntegerField(default=3600, blank=True, null=True,
validators=[validate_ttl],
help_text="Time to Live of this record")
description = models.CharField(max_length=1000, blank=True, null=True,
help_text="A description of this record.")
# fqdn = label + domain.name <--- see set_fqdn
def __str__(self):
self.set_fqdn()
return self.bind_render_record()
def __repr__(self):
return "<{0} '{1}'>".format(self.rdtype, str(self))
class Meta:
abstract = True
@classmethod
def get_api_fields(cls):
"""
The purpose of this is to help the API decide which fields to exp | ose
to the user when they are creating and updateing an Object. This
function should be implemented in inheriting models and overriden to
provide additional fields. Tastypie ignores any relational fields on
the model. See the ModelResou | rce definitions for view and domain
fields.
"""
return ['fqdn', 'ttl', 'description', 'views']
def clean(self):
# The Nameserver and subclasses of BaseAddressRecord do not call this
# function
self.set_fqdn()
self.check_TLD_condition()
self.check_no_ns_soa_condition(self.domain)
self.check_for_delegation()
if self.rdtype != 'CNAME':
self.check_for_cname()
def delete(self, *args, **kwargs):
if self.domain.soa:
self.domain.soa.schedule_rebuild()
from mozdns.utils import prune_tree
call_prune_tree = kwargs.pop('call_prune_tree', True)
objs_domain = self.domain
super(MozdnsRecord, self).delete(*args, **kwargs)
if call_prune_tree:
prune_tree(objs_domain)
def save(self, *args, **kwargs):
self.full_clean()
if self.pk:
# We need to get the domain from the db. If it's not our current
# domain, call prune_tree on the domain in the db later.
db_domain = self.__class__.objects.get(pk=self.pk).domain
if self.domain == db_domain:
db_domain = None
else:
db_domain = None
no_build = kwargs.pop("no_build", False)
super(MozdnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the soa
if self.domain.soa:
self.domain.soa.schedule_rebuild()
if db_domain:
from mozdns.utils import prune_tree
prune_tree(db_domain)
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label,
self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
"""
"If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = mozdns.cname.models.CNAME
if hasattr(self, 'label'):
if CNAME.objects.filter(domain=self.domain,
label=self.label).exists():
raise ValidationError("A CNAME with this name already exists.")
else:
if CNAME.objects.filter(label='', domain=self.domain).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""
If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
try:
if not self.domain.delegated:
return
except ObjectDoesNotExist:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
def check_TLD_condition(self):
domain = Domain.objects.filter(name=self.fqdn)
if not domain:
return
if self.label == '' and domain[0] == self.domain:
return # This is allowed
else:
raise ValidationError("You cannot create an record that po |
tmm1/pygments.rb | vendor/pygments-main/pygments/lexers/matlab.py | Python | mit | 31,893 | 0.00138 | # -*- coding: utf-8 -*-
"""
pygments.lexers.matlab
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Matlab and related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLe | xer, bygroups, default, words, \
do_insertions
from pygments.tok | en import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Whitespace
from pygments.lexers import _scilab_builtins
__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
.. versionadded:: 0.10
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 9.4 (R2018a)
#
elfun = ("sin", "sind", "sinh", "asin", "asind", "asinh", "cos", "cosd", "cosh",
"acos", "acosd", "acosh", "tan", "tand", "tanh", "atan", "atand", "atan2",
"atan2d", "atanh", "sec", "secd", "sech", "asec", "asecd", "asech", "csc", "cscd",
"csch", "acsc", "acscd", "acsch", "cot", "cotd", "coth", "acot", "acotd",
"acoth", "hypot", "deg2rad", "rad2deg", "exp", "expm1", "log", "log1p", "log10", "log2", "pow2",
"realpow", "reallog", "realsqrt", "sqrt", "nthroot", "nextpow2", "abs",
"angle", "complex", "conj", "imag", "real", "unwrap", "isreal", "cplxpair",
"fix", "floor", "ceil", "round", "mod", "rem", "sign")
specfun = ("airy", "besselj", "bessely", "besselh", "besseli", "besselk", "beta",
"betainc", "betaincinv", "betaln", "ellipj", "ellipke", "erf", "erfc", "erfcx",
"erfinv", "erfcinv", "expint", "gamma", "gammainc", "gammaincinv", "gammaln", "psi", "legendre",
"cross", "dot", "factor", "isprime", "primes", "gcd", "lcm", "rat",
"rats", "perms", "nchoosek", "factorial", "cart2sph", "cart2pol",
"pol2cart", "sph2cart", "hsv2rgb", "rgb2hsv")
elmat = ("zeros", "ones", "eye", "repmat", "repelem", "linspace", "logspace",
"freqspace", "meshgrid", "accumarray", "size", "length", "ndims", "numel",
"disp", "isempty", "isequal", "isequaln", "cat", "reshape",
"diag", "blkdiag", "tril", "triu", "fliplr", "flipud", "flip", "rot90",
"find", "end", "sub2ind", "ind2sub", "bsxfun", "ndgrid", "permute",
"ipermute", "shiftdim", "circshift", "squeeze", "isscalar", "isvector",
"isrow", "iscolumn", "ismatrix", "eps", "realmax", "realmin", "intmax", "intmin", "flintmax", "pi", "i", "inf", "nan", "isnan",
"isinf", "isfinite", "j", "true", "false", "compan", "gallery", "hadamard", "hankel",
"hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander",
"wilkinson")
_operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\|\|?|\.\*|\*|\+|\.\^|\.\\|\./|/|\\'
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function\b', Keyword, 'deffunc'),
# from 'iskeyword' on version 9.4 (R2018a):
# Check that there is no preceding dot, as keywords are valid field
# names.
(words(('break', 'case', 'catch', 'classdef', 'continue', 'else',
'elseif', 'end', 'for', 'function',
'global', 'if', 'otherwise', 'parfor',
'persistent', 'return', 'spmd', 'switch',
'try', 'while'),
prefix=r'(?<!\.)', suffix=r'\b'),
Keyword),
("(" + "|".join(elfun + specfun + elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'(\.\.\.)(.*)$', bygroups(Keyword, Comment)),
# command form:
# "How MATLAB Recognizes Command Syntax" specifies that an operator
# is recognized if it is either surrounded by spaces or by no
# spaces on both sides; only the former case matters for us. (This
# allows distinguishing `cd ./foo` from `cd ./ foo`.)
(r'(?:^|(?<=;))(\s*)(\w+)(\s+)(?!=|\(|(?:%s)\s+)' % _operators,
bygroups(Text, Name, Text), 'commandargs'),
# operators:
(_operators, Operator),
# numbers (must come before punctuation to handle `.5`; cannot use
# `\b` due to e.g. `5. + .5`).
(r'(?<!\w)((\d+\.\d*)|(\d*\.\d+))([eEf][+-]?\d+)?(?!\w)', Number.Float),
(r'\b\d+[eEf][+-]?[0-9]+\b', Number.Float),
(r'\b\d+\b', Number.Integer),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'"(""|[^"])*"', String),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'commandargs': [
# If an equal sign or other operator is encountered, this
# isn't a command. It might be a variable assignment or
# comparison operation with multiple spaces before the
# equal sign or operator
(r"=", Punctuation, '#pop'),
(_operators, Operator, '#pop'),
(r"[ \t]+", Text),
("'[^']*'", String),
(r"[^';\s]+", String),
(";", Punctuation, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
# function declaration.
first_non_comment = next((line for line in text.splitlines()
if not re.match(r'^\s*%', text)), '').strip()
if (first_non_comment.startswith('function')
and '{' not in first_non_comment):
return 1.
# comment
elif re.search(r'^\s*%', text, re.M):
return 0.2
# system cmd
elif re.search(r'^!\w+', text, re.M):
return 0.2
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <kschutte@csail.mit.edu>.
.. versionadded:: 0.10
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
continuation = False
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>> '):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]) |
mdrasmus/summon | examples/18_summon.py | Python | gpl-2.0 | 2,904 | 0.017218 | #!/usr/bin/env python-i
# draws SUMMON logo
#
import math
import summon
from summon.core import *
from summon import shapes, colors
def interleave(a, b):
c = []
for i in xrange(0, len(a), 2):
c.extend(a[i:i+2] + b[i:i+2])
return c
def curve(x, y, start, end, radius, width):
p = shapes.arc_path(x, y, start, end, radius, 30)
p2 = shapes.arc_pa | th(x, y, start, end, radius-width, 30)
return triangle_strip(*interleave(p, p2))
def draw_u(top, bottom, w, t):
return g | roup(shapes.box(-w,top, -w+t, bottom+w),
shapes.box(w,top, w-t, bottom+w),
curve(0, bottom+w, -math.pi, 0.0, w, t))
def draw_m(top, bottom, w, t):
return group(
translate(0, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))),
translate(2*w-t, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))))
def draw_summon():
t = 150 # thickness
w = 200 # width
s = 50 # spacing
top = w
bottom = -3*w+t
return translate(-7*w+t-2.5*s, -(top + bottom) / 2.0,
# S
curve(0, 0, 0, 1.5*math.pi, w, t),
curve(0, -2*w+t, -math.pi, .5*math.pi, w, t),
# U
translate(2*w+s, 0,
draw_u(top, bottom, w, t)),
# M
translate(4*w+2*s, 0,
draw_m(top, bottom, w, t)),
# M
translate(8*w-t+3*s, 0,
draw_m(top, bottom, w, t)),
# 0
translate(12*w-2*t+4*s, 0,
curve(0, 0, 0.0, math.pi, w, t),
shapes.box(-w,top-w, -w+t, bottom+w),
shapes.box(w,top-w, w-t, bottom+w),
curve(0, bottom+w, -math.pi, 0.0, w, t)),
# N
translate(14*w-2*t+5*s, 0,
translate(0, -2*w+t,
rotate(180,
draw_u(top, bottom, w, t))))
)
def blur(x, col):
return group(
# color fade
quads(col, -2000, 0, 2000, 0,
color(0, 0, 0, 0), 2000, 300, -2000, 300),
# white fades
quads(color(1, 1, 1, 1), -2000, 0, -2000, 600,
color(1, 1, 1, 0), -x, 600, -x, 0),
quads(color(1, 1, 1, 1), 2000, 0, 2000, 600,
color(1, 1, 1, 0), x, 600, x, 0))
def draw_summon_logo():
return group(
blur(1200, color(0, .2, .5, .8)),
rotate(180, blur(0, color(0, 0, .5, .5))),
color(0, 0, 0),
draw_summon(),
color(0, 0, 0),
text_clip("visualization prototyping and scripting",
-1600, -450, 1600, -900, 0, 20,
"top", "center"))
# draw logo
win = summon.Window("18_summon", size=(800,400))
win.set_bgcolor(1, 1, 1)
win.add_group(draw_summon_logo())
win.home()
|
jhumphry/gazetteer_etl | gazetteer/fields.py | Python | gpl-2.0 | 3,587 | 0 | # gazetteer.fields
# Copyright 2016, James Humphry
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
'''Descriptions of the fields that can make up gazetteer data files, along
with information on their SQL equivalents'''
import abc
class GazetteerField(metaclass=abc.ABCMeta):
'''An abstract class that defines a field/column in a gazetteer data
table.'''
sql_type_name = 'NONE'
def __init__(self, field_name, sql_name='', nullable=True):
self.field_name = field_name
if sql_name == '':
self.sql_name = field_name.lower().replace(' ', '_')
else:
self.sql_name = sql_name
self.nullable = nullable
def generate_sql(self):
'''Return the SQL describing a field of this sort, suitable for
inclusion in a CREATE TABLE statement'''
if self.nullable:
return self.sql_name + ' ' + self.sql_type_name
else:
return self.sql_name + ' ' + self.sql_type_name + ' NOT NULL'
class BigIntField(GazetteerField):
'''A gazetteer field corresponding to the SQL type BIGINT.'''
sql_type_name = 'BIGINT'
class IntegerField(GazetteerField):
'''A gazetteer field corresponding to the SQL type INTEGER.'''
sql_type_name = 'INTEGER'
class SmallIntField(GazetteerField):
'''A gazetteer field corresponding to the SQL type SMALLINT.'''
sql_type_name = 'SMALLINT'
class DoubleField(GazetteerField):
'''A gazetteer field corresponding to the SQL type DOUBLE PRECISION.'''
sql_type_nam | e = 'DOUBLE PRECISION'
class TextField(GazetteerField):
'''A gazetteer field corresponding to the SQL type TEXT.'''
sql_type_name = 'TEXT'
class FixedTextField(GazetteerField):
'''A gazetteer field corresponding to the SQL type CHARACTER VARYING()
with a defined width.'''
d | ef __init__(self, field_name, width, sql_name='', nullable=True):
super().__init__(field_name, sql_name, nullable)
self.width = width
def generate_sql(self):
if self.nullable:
return self.sql_name + ' CHARACTER VARYING({})'.format(self.width)
else:
return self.sql_name + ' CHARACTER VARYING({})'.format(self.width)\
+ ' NOT NULL'
class DateField(GazetteerField):
'''A gazetteer field corresponding to the SQL type DATE.'''
sql_type_name = 'DATE'
class TimeStampField(GazetteerField):
'''A gazetteer field corresponding to the SQL type TIMESTAMP.'''
sql_type_name = 'TIMESTAMP'
class FlagField(GazetteerField):
'''This is intended for gazetteer single character fields that are
sometimes used as a form of Boolean or basic enumeration type. It may be
more efficient to switch these to the "char" type (with the quotations)
which is an internal PostgreSQL type which has a fixed width and only
takes up one byte.'''
sql_type_name = 'CHARACTER VARYING(1)'
|
gemrb/gemrb | gemrb/GUIScripts/GUISONGS.py | Python | gpl-2.0 | 2,309 | 0.026851 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify | it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. |
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#instead of credits, you can listen the songs of the game :)
import GemRB
import GameCheck
MovieWindow = 0
TextAreaControl = 0
MoviesTable = 0
MoviesTableName = ""
CreditsRef = ""
ColOffset = 0
def OnLoad():
global MovieWindow, TextAreaControl, MoviesTable, MoviesTableName, ColOffset, CreditsRef
MovieWindow = GemRB.LoadWindow(0, "GUIMOVIE")
TextAreaControl = MovieWindow.GetControl(0)
PlayButton = MovieWindow.GetControl(2)
CreditsButton = MovieWindow.GetControl(3)
DoneButton = MovieWindow.GetControl(4)
if GameCheck.IsBG1():
MoviesTableName = "MUSIC"
ColOffset = 1
CreditsRef = "credits"
elif GameCheck.IsBG2():
MoviesTableName = "SONGLIST"
ColOffset = 0
CreditsRef = "endcrdit"
MoviesTable = GemRB.LoadTable (MoviesTableName)
TextAreaControl.SetColor (ColorWhitish, TA_COLOR_OPTIONS)
TextAreaControl.SetOptions ([MoviesTable.GetValue (i, 0) for i in range(ColOffset, MoviesTable.GetRowCount())], "MovieIndex", 0)
PlayButton.SetText(17318)
CreditsButton.SetText(15591)
DoneButton.SetText(11973)
PlayButton.OnPress (PlayPress)
CreditsButton.OnPress (CreditsPress)
DoneButton.OnPress (DonePress)
MovieWindow.Focus()
return
def PlayPress():
s = GemRB.GetVar ("MovieIndex") + ColOffset
t = MoviesTable.GetValue (s, 1-ColOffset)
GemRB.LoadMusicPL(t,1)
return
def CreditsPress():
GemRB.PlayMovie (CreditsRef, 1)
return
def DonePress():
if MovieWindow:
MovieWindow.Close ()
if GameCheck.HasTOB():
GemRB.SetNextScript ("Start2")
else:
GemRB.SetNextScript ("Start")
return
|
IndiciumSRL/wirecurly | wirecurly/configuration/node.py | Python | mpl-2.0 | 363 | 0.055096 | import logging
log = logging.getLogger(__name__)
__all__ = ['Node']
class Node(object):
'''
Node oject for acls
'''
def __init__(self,perm,add):
sup | er(Node, self).__init__()
self.attrs = {'type' : perm , 'cidr' : add}
def todict(self):
'''
| Create a dict so it can be converted/serialized
'''
return {'tag': 'node', 'attrs': self.attrs }
|
anhstudios/swganh | data/scripts/templates/object/tangible/component/armor/shared_base_armor_segment_enhancement.py | Python | mit | 504 | 0.043651 | ### | # NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/armor/shared_base_armor_segment_enhancement.iff"
result.attribute_template_id = -1
result.stfName("craft_clothing_ingredients_n","armor_segment_enhancement")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS | ####
return result |
memoria-exorior/memoria | memoria/wsgi.py | Python | apache-2.0 | 97 | 0 | from memoria.app import create | _app
app = create_app()
if __name__ == "__main__":
app.run() | |
svohara/svo_util | src/svo_util/data_io.py | Python | unlicense | 12,678 | 0.013409 | '''
Created on Dec 10, 2012
@author: Stephen O'Hara
Utility functions for interacting with certain types
of data files.
'''
import os
import numpy as np
import svo_util
from StringIO import StringIO
import tokenize
find = np.nonzero #this is the matlab command
which = np.nonzero #this is the R command
def smart_split(linein, sep=","):
'''
Works much like built-in split method of strings, but this version
will honor quoted strings. Meaning that if the separator is inside a quoted string,
then the separator is ignored in favor of keeping quoted strings as a single token.
'''
curtokens = []
fieldvals = []
prev_end = 0
tkz = tokenize.generate_tokens(StringIO(linein).readline)
for _, tokval, (_,colstart), (_,colend), _ in tkz:
ws_delta = colstart - prev_end
prev_end = colend
if tokval.strip() == '': continue #ignore whitespace
if ''.join(tokval.split(sep)) == '':
if len(curtokens) > 0:
fieldvals.append( ("".join(curtokens)).strip() )
curtokens = []
continue
else:
continue
if ws_delta > 0:
tokval = (" "*ws_delta)+tokval
if (tokval[0] == tokval[-1] == '"') or (tokval[0] == tokval[-1] == '\''):
tokval = tokval[1:-1]
curtokens.append(tokval)
#at end of line, we will likely have curtokens and no separator encountered
if len(curtokens) > 0: fieldvals.append( ("".join(curtokens)).strip() )
return fieldvals
class CSV_data:
'''
Class to encapsulate reading data from simple csv files
where the first row is the column headers and fields are
separated with commas. Rows are data samples, columns are field values.
'''
def __init__(self, datadir, csvfile, label_cols=[0], separator=",", missing_val_sentinel=-9999):
'''
constructor
@param datadir: The directory where the csv file is located
@param csvfile: The filename of the data file
@param label_cols: A list of the zero-indexed column numbers
that should be treated as labels. These columns are assumed
to have a discrete set of values, like a set of string labels,
a s | et of integers. For regression data sets where | the "labels"
are continuous-valued numbers, set label_cols = None, and all
the csv fields will be loaded into the data matrix.
'''
self.datadir = datadir
self.csvfile = csvfile
self.label_cols = label_cols
self.label_dict = {} # key is label_col #, value is a list of the values of each sample
self.label_names = [] #list of the field names of the label columns
self.fields = [] #list of the field names for the data columns (non-labels)
self.separator = separator
self.skipped_rows = [] #row skipped if a data field was empty
self.missing_val_sentinel = missing_val_sentinel
self._load_data()
def _parse_row(self, linedat):
linedat = linedat.strip()
tmpfields = [ fld.strip() for fld in linedat.split(self.separator)]
#are any field values empty? then badrow if missing_val_sentinel is none
if self.missing_val_sentinel is None:
if '' in tmpfields: return None
#handle label columns
for col in self.label_cols:
fieldval = tmpfields[col]
self.label_dict[col].append( fieldval )
#non-label columns are converted to floats, appended into an ordered list
#replace any non-numeric entries in tmpfields with the sentinel number
data_cols = sorted( set(range(len(tmpfields)))-set(self.label_cols) )
data_fields = [ tmpfields[i] for i in data_cols]
row_dat = [ svo_util.parse_number(s, fail=self.missing_val_sentinel) for s in data_fields ]
return row_dat
def _load_data(self):
'''
internal function that reads the csv text data and converts
the values as appropriate to the internal data representation
'''
infile = os.path.join(self.datadir,self.csvfile)
with open(infile,"r") as f:
lines = f.readlines()
first_row = self._load_header(lines)
raw_dat = lines[(first_row+1):] #drop the header row and any leading blank rows
data_list = []
for i,linedat in enumerate(raw_dat):
row_num = i + first_row + 1 #original row from csv file including header/initial blank lines
row_dat = self._parse_row(linedat)
if row_dat is None:
self.skipped_rows.append(row_num)
print "Warning: row %d of input skipped with missing values."%row_num
#print "Line data: %s"%str(linedat)
else:
data_list.append(row_dat)
self.data = np.array(data_list)
print "Loaded data matrix of size: %d by %d"%self.data.shape
print "The header row was line %d"%(first_row+1) #one-based for user
print "The label columns are: %s"%str(self.label_names)
def _load_header(self, lines):
'''
header has the field names, and should
be the first non-empty line of the csv file
'''
i = 0
while lines[i].strip() == '':
i+=1
first_row = i
self.fields = [ fld.strip() for fld in lines[first_row].split(self.separator) ]
#setup initial label_dict entries for label fields
for col in self.label_cols:
self.label_dict[ col ] = []
self.label_names.append(self.fields[col])
#self.fields will have the field names for the data (non-label) columns in order
for label_name in self.label_names:
self.fields.remove(label_name)
return first_row
class C45_data:
'''
Class to encapsulate functions to read data from C4.5 formatted
data mining files. Characterized by text files with .names and .data
extensions.
'''
def __init__(self, datadir, namefile, datafile, missing_val_sentinel=-9999):
'''
Constructor
@param datadir: The directory where the data files are located
@param namefile: The filename that has the .names information. You may
specify as xyz.names or just xyz (with .names extension inferred)
@param datafile: The filename that has the .data information. You may
specify as xyz.data or just xyz (with .data extension inferred)
@param missing_val_sentinel: Set to a numeric value that can uniquely identify
fields in the data matrix where the values were missing. Matrix can be
transformed via functions like 'replace_missing_values_with_col_means()'. Set
to None to have an error thrown if any missing values are encountered.
@note: Do NOT include the path in the namefile or datafile parameters. These
are assumed to be files in the datadir specified.
'''
self.classes = []
self.fields = []
self.fieldtypes = []
self.data = []
self.labels = []
self.datadir = datadir
self.namefile = namefile if namefile[-5:] == "names" else "%s.names"%namefile
self.datafile = datafile if datafile[-4:] == "data" else "%s.data"%datafile
self.missing_val_sentinel = missing_val_sentinel
self._loadNames()
self._loadData()
def _loadNames(self):
nfile = os.path.join(self.datadir, self.namefile)
assert os.path.exists(nfile)
self.classes = []
self.fields = []
self.fieldtypes = []
with open(nfile,"r") as f:
lines = f.readlines()
#first line is supposed to be the class names
tmp = lines[0].split(",")
self.classes = [c.strip() for c in tmp]
print "There are %d classes defined by data set: %s"%(len(self.classes), str(self.classes))
|
Som-Energia/heman | heman/config.py | Python | gpl-3.0 | 2,611 | 0.000383 | from __future__ import absolute_import
import logging
import os
from flask import Flask
from flask_pymongo import PyMongo
from raven.contrib.flask import Sentry
from heman.api import HemanAPI
api = HemanAPI(prefix='/api')
"""API object
"""
sentry = Sentry(logging=True, level=logging.ERROR)
"""Sentry object
"""
mongo = PyMongo()
"""Access to database
In other parts of the application you can do::
from heman.config import mongo
mongo.db.collection.find({"foo": "bar"})
"""
def create_app(**config):
"""Application Factory
You can create a new He-Man application with::
from heman.config import create_app
app = create_app() # app can be uses as WSGI application
app.run() # Or you can run as a simple web server
"""
app = Flask(
__name__, static_folder=None
)
if 'MONGO_URI' in os.environ:
app.config['MONGO_URI'] = os.environ['MONGO_URI']
app.config['LOG_LEVEL'] = 'DEBUG'
app.config['SECRET_KEY'] = '2205552d13b5431bb537732bbb051f1214414f5ab34d47 | '
configure_logging(app)
configure_sentry(app)
configure_api(app)
configure_mongodb(app)
configure_login(app)
return app
def configure_api(app):
"""Configure API Endpoints.
"""
from heman.api.cch import resources as cch_resources
from heman.api.infoenergia import | resources as infoenergia_resources
from heman.api import ApiCatchall
# Add CCHFact resources
for resource in cch_resources:
api.add_resource(*resource)
# Add InfoEnergia resources
for resource in infoenergia_resources:
api.add_resource(*resource)
api.add_resource(ApiCatchall, '/<path:path>')
api.init_app(app)
def configure_sentry(app):
"""Configure Sentry logger.
Uses `Raven
<http://raven.readthedocs.org/en/latest/integrations/flask.html>`_
"""
sentry.init_app(app)
def configure_mongodb(app):
"""Configure MongoDB access.
Uses `Flask-PyMongo <https://flask-pymongo.readthedocs.org/>`_
"""
mongo.init_app(app)
def configure_logging(app):
"""Configure logging
Call ``logging.basicConfig()`` with the level ``LOG_LEVEL`` of application.
"""
logging.basicConfig(level=getattr(logging, app.config['LOG_LEVEL']))
def configure_login(app):
"""Configure login authentification
Uses `Flask-Login <https://flask-login.readthedocs.org>`_
"""
from heman.auth import login_manager
from flask_login import logout_user
login_manager.init_app(app)
@app.teardown_request
def force_logout(*args, **kwargs):
logout_user()
|
rahulunair/nova | nova/conf/keystone.py | Python | apache-2.0 | 1,257 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS | " BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as | ks_loading
from oslo_config import cfg
from nova.conf import utils as confutils
DEFAULT_SERVICE_TYPE = 'identity'
keystone_group = cfg.OptGroup(
'keystone',
title='Keystone Options',
help='Configuration options for the identity service')
def register_opts(conf):
conf.register_group(keystone_group)
confutils.register_ksa_opts(conf, keystone_group.name,
DEFAULT_SERVICE_TYPE, include_auth=False)
def list_opts():
return {
keystone_group: (
ks_loading.get_session_conf_options() +
confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)
)
}
|
uppsaladatavetare/foobar-api | src/wallet/migrations/0007_auto_20170218_2034.py | Python | mit | 880 | 0.002273 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 20:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('wallet', '0006_auto_20170130_1430'),
]
operations = [
migrations.AddField(
model_name='wallettransaction',
name='internal_reference',
| field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wallet.WalletTransaction'),
),
migrations.AlterField(
model_name='wallettransaction',
name='date_created',
field=models.DateTimeField(blank=True, defaul | t=django.utils.timezone.now, editable=False, null=True, verbose_name='date created'),
),
]
|
seLain/mattermost_bot | tests/behavior_tests/run_bot.py | Python | mit | 701 | 0.00428 | #!/usr/bin/env python
from mattermost_bot.bot import Bot, PluginsManager
from mattermost_bot.mattermost import MattermostClient
from mattermost_bot.dispatcher import MessageDispatcher
import bot_settings
class LocalBot(Bot):
def __init__(self):
self._client = MattermostClient(
bot_settings.BOT_URL, bot_settings.BOT_TEAM,
bot_settings.BOT_LOGIN, bot_settings.BOT_PASSWORD,
bot_settings.SSL_VERIFY
)
self._plugins = PluginsManager()
self._plugins.init_plugins()
| self._dispatcher = MessageDispatcher(self._client, self._plugins)
def main():
bot = LocalBot()
bot.run()
| if __name__ == '__main__':
main()
|
speccy88/SSD1306_Edison | example1_image.py | Python | mit | 351 | 0.008547 | import SSD1306
import Image, ImageFilter
disp = SSD1306.SSD1306_128_64()
disp.begin()
disp | .clear()
disp.display()
image = Image.open('edison.png')
image = image.resize((128,64))
#image = image.filter(ImageFilter.SHARPEN)
image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
image = image.convert('1')
disp.image(image)
disp.display()
disp.invert( | )
|
harshk360/yantra_shiksha | hw8/cifar/enhanced/cifar10_eval.py | Python | gpl-3.0 | 5,485 | 0.006746 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for CIFAR-10.
Accuracy:
cifar10_train.py achieves 83.0% accuracy after 100K steps (256 epochs
of data) as judged by cifar10_eval.py.
Speed:
On a single Tesla K40, cifar10_train.py processes a single batch of 128 images
in 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%
accuracy after 100K steps in 8 hours of training time.
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import numpy as np
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar10_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar10_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 10000,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate predictions.
top_k_op = tf.nn | .in_top_k(logits, labels, 1)
# Restore the moving average versi | on of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run() |
ElementsProject/elements | test/functional/rpc_txoutproof.py | Python | mit | 6,163 | 0.003894 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.messages import CMerkleBlock, FromHex, ToHex
from test_framework.t | est_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet import MiniWallet
class MerkleBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
[],
["-txindex"],
]
def run_test(self):
miniwallet = MiniWallet(self.n | odes[0])
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(5)
self.nodes[0].generate(100)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
txid1 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
txid2 = miniwallet.send_self_transfer(from_node=self.nodes[0])['txid']
# This will raise an exception because the transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = miniwallet.get_utxo() # Get the change from txid2
tx3 = miniwallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=txin_spent)
txid3 = tx3['txid']
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 # Input was change from txid2, so txid1 should be unspent
# Invalid txids
assert_raises_rpc_error(-8, "txid must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["00000000000000000000000000000000"], blockhash)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, ["ZZZ0000000000000000000000000000000000000000000000000000000000000"], blockhash)
# Invalid blockhashes
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 32, for '00000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "00000000000000000000000000000000")
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].gettxoutproof, [txid_spent], "ZZZ0000000000000000000000000000000000000000000000000000000000000")
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].gettxoutproof, [txid_spent], "0000000000000000000000000000000000000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[0].verifytxoutproof(self.nodes[0].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[0].verifytxoutproof(self.nodes[1].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[0].gettxoutproof, [txid1, txid3])
# Test empty list
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [])
# Test duplicate txid
assert_raises_rpc_error(-8, 'Invalid parameter, duplicated txid', self.nodes[0].gettxoutproof, [txid1, txid1])
# Now we'll try tweaking a proof.
proof = self.nodes[1].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = FromHex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[0].verifytxoutproof(ToHex(tweaked_proof))
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False]*7
for n in self.nodes:
assert not n.verifytxoutproof(ToHex(tweaked_proof))
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid
if __name__ == '__main__':
MerkleBlockTest().main()
|
kavigupta/61a-analysis | src/ocr.py | Python | gpl-3.0 | 1,475 | 0.002712 | """
Runs OCR on a given file.
"""
from os import system, listdir
from PIL import Image
from pytesseract import image_to_string
import ed | itdistance
from constants import DATA_DIR
def classify(image, people_class, max_classify_ | distance=1, min_nonclassify_distance=3):
"""
Runs an OCR classifier on a given image file, drawing from a dictionary
"""
read = image_to_string(Image.open(image)).lower()
result = None
for person in people_class:
dist = editdistance.eval(person, read)
if dist <= max_classify_distance:
if result is not None:
return None
result = people_class[person]
elif max_classify_distance < dist <= min_nonclassify_distance:
return None
return result
def setup_ocr(raw_data, progress):
"""
Grabs names from a pdf to an image
"""
system("unzip {} -d {}/extract".format(raw_data, DATA_DIR))
base = DATA_DIR + "/extract/"
mainfolder = base + listdir(base)[0]
files = sorted(listdir(mainfolder))
p_bar = progress(len(files))
for index, path in enumerate(files):
p_bar.update(index)
fullpath = mainfolder + "/" + path
system("mkdir {}/ocr".format(DATA_DIR))
basic_format = r"pdftoppm -png -f 3 -l 3 -x 170 -y %s -W 900 -H 100 {} > {}/ocr/%s{}.png" \
.format(fullpath, DATA_DIR, index)
system(basic_format % (1030, "left"))
system(basic_format % (1115, "right"))
|
weechat/weechat.org | weechat/context_processors.py | Python | gpl-3.0 | 1,229 | 0 | #
# Copyright (C) 2003-2022 Sébastien Helleu <flashcode@flashtux.org>
#
# This file is part of WeeChat.org.
#
# WeeChat.org is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# WeeChat.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the | GNU General Public License
# along with WeeChat.org. If not, see <https://www.gnu.org/licenses/>.
#
"""Context processors."""
from django.conf import settings
def theme(request):
"""Add theme variables in context."""
user_theme = request.GET.get(
'theme',
request.COOKIES.get('theme', settings.THEMES[0]),
)
if user_theme not in settings.TH | EMES:
user_theme = settings.THEMES[0]
other_themes = [name for name in settings.THEMES if name != user_theme]
return {
'theme': user_theme,
'other_themes': other_themes,
}
|
deryckh/dcuolfg | dcuolfg/missions/models.py | Python | lgpl-3.0 | 2,609 | 0 | # Copyright 2012 Deryck Hodge. This software is licensed under the
# GNU Lesser General Public License version 3 (see the file LICENSE).
"""
Models for missions in DCUO LFG.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from dcuolfg.missions.managers import MissionManager
class Location(models.Model):
"""Mission Location object."""
name = models.CharField(max_length=100)
slug = models.SlugField()
image = models.ImageField(
_('image'), upload_to='img/locations/%Y/%m/%d', blank=True)
class Meta:
"""Meta options for Location objects."""
db_table = 'mission_location'
def __unicode__(self):
"""unicode representation for Location"""
return self.name
class Mission(models.Model):
"""Mission the base class for grouping together in DCUO LFG."""
MISSION_TYPE_CHOICES = (
(0, 'Areana PVP'),
(1, 'Alert'),
(2, 'Legends PVP'),
(3, 'Raid'),
(4, 'Duo'),
(5, 'Bounty'),
(6, 'Character leveling'),
(7, 'Event'),
)
MISSION_MODE_CH | OICES = (
(0, 'Normal'),
(1, 'T1'),
(2, 'T2'),
)
MISSION_NUM_PLAYER_CHOICES = (
(2, '2'),
(4, '4'),
| (5, '5'),
(8, '8'),
)
mission_type = models.IntegerField(
_('mission type'), choices=MISSION_TYPE_CHOICES, default=1)
mode = models.IntegerField(
_('mode'), blank=True, null=True, choices=MISSION_MODE_CHOICES)
name = models.CharField(_('name'), max_length=100)
slug = models.SlugField()
nick_name = models.CharField(_('nick name'), max_length=25)
location = models.ForeignKey(
Location, verbose_name=_('location'), related_name='missions')
num_players = models.IntegerField(
_('number of players'), choices=MISSION_NUM_PLAYER_CHOICES, default=4)
about = models.TextField(_('about'), blank=True)
featured = models.BooleanField(_('featured'), default=False)
objects = MissionManager()
class Meta():
"""Meta options for Mission."""
db_table = 'mission'
def __unicode__(self):
"""unicode representation of a Mission."""
name = self.name
location_name = self.location.name
if name != location_name:
full_name = '%s: %s' % (self.location.name, self.name)
else:
full_name = name
return full_name
@models.permalink
def get_absolute_url(self):
"""Returns the URL for a Mission."""
return ('mission_index', (), {'slug': self.slug})
|
PhonologicalCorpusTools/PolyglotDB | polyglotdb/acoustics/pitch/base.py | Python | mit | 10,009 | 0.003397 | import math
from datetime import datetime
from conch import analyze_segments
from conch.analysis.segments import SegmentMapping
from .helper import generate_pitch_function
from ..segments import generate_utterance_segments
from ...exceptions import SpeakerAttributeError
from ..classes import Track, TimePoint
from ..utils import PADDING
def analyze_utterance_pitch(corpus_context, utterance, source='praat', min_pitch=50, max_pitch=500,
**kwargs):
if isinstance(utterance, str):
utterance_id = utterance
else:
utterance_id = utterance.id
padding = kwargs.pop('padding', None)
if padding is None:
padding = PADDING
utt_type = corpus_context.hierarchy.highest
statement = '''MATCH (s:Speaker:{corpus_name})-[r:speaks_in]->(d:Discourse:{corpus_name}),
(u:{utt_type}:{corpus_name})-[:spoken_by]->(s),
(u)-[:spoken_in]->(d)
WHERE u.id = $utterance_id
RETURN u, d, r.channel as channel'''.format(corpus_name=corpus_context.cypher_safe_name,
utt_type=utt_type)
results = corpus_context.execute_cypher(statement, utterance_id=utterance_id)
segment_mapping = SegmentMapping()
for r in results:
channel = r['channel']
file_path = r['d']['vowel_file_path']
u = r['u']
segment_mapping.add_file_segment(file_path, u['begin'], u['end'], channel, padding=padding)
path = None
if source == 'praat':
path = corpus_context.config.praat_path
elif source == 'reaper':
path = corpus_context.config.reaper_path
pitch_function = generate_pitch_function(source, min_pitch, max_pitch, path=path)
track = Track()
for seg in segment_mapping:
output = pitch_function(seg)
for k, v in output.items():
if v['F0'] is None or v['F0'] <= 0:
continue
p = TimePoint(k)
p.add_value('F0', v['F0'])
track.add(p)
if 'pitch' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'pitch', [('F0', float)])
corpus_context.encode_hierarchy()
return track
def update_utterance_pitch_track(corpus_context, utterance, new_track):
from ...corpus.audio import s_to_ms, s_to_nano
if isinstance(utterance, str):
utterance_id = utterance
else:
utterance_id = utterance.id
today = datetime.utcnow()
utt_type = corpus_context.hierarchy.highest
phone_type = corpus_context.hierarchy.lowest
time_stamp = today.timestamp()
statement = '''MATCH (s:Speaker:{corpus_name})-[r:speaks_in]->(d:Discourse:{corpus_name}),
(u:{utt_type}:{corpus_name})-[:spoken_by]->(s),
(u)-[:spoken_in]->(d),
(p:{phone_type}:{corpus_name})-[:contained_by*]->(u)
WHERE u.id = $utterance_id
SET u.pitch_last_edited = $date
RETURN u, d, r.channel as channel, s, collect(p) as p'''.format(
corpus_name=corpus_context.cypher_safe_name,
utt_type=utt_type, phone_type=phone_type)
results = corpus_context.execute_cypher(statement, utterance_id=utterance_id, date=time_stamp)
for r in results:
channel = r['channel']
discourse = r['d']['name']
speaker = r['s']['name']
u = r['u']
phones = r['p']
client = corpus_context.acoustic_client()
query = '''DELETE from "pitch"
where "discourse" = '{}'
and "speaker" = '{}'
and "time" >= {}
and "time" <= {};'''.format(discourse, speaker, s_to_nano(u['begin']), s_to_nano(u['end']))
result = client.query(query)
data = []
for data_point in new_track:
speaker, discourse, channel = speaker, discourse, channel
time_point, value = data_point['time'], data_point['F0']
t_dict = {'speaker': speaker, 'discourse': discourse, 'channel': channel}
label = None
for i, p in enumerate(sorted(phones, key=lambda x: x['begin'])):
if p['begin'] > time_point:
break
label = p['label']
if i == len(phones) - 1:
break
else:
label = None
if label is None:
continue
fields = {'phone': label, 'utterance_id': u['id']}
try:
if value is None:
continue
value = float(value)
except TypeError:
continue
if value <= 0:
continue
fields['F0'] = value
d = {'measurement': 'pitch',
'tags': t_dict,
'time': s_to_ms(time_point),
'fields': fields
}
data.append(d)
client.write_points(data, batch_size=1000, time_precision='ms')
if 'pitch' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.acoustics.add('pitch')
corpus_context.encode_hierarchy()
return time_stamp
def analyze_pitch(corpus_context,
source='praat',
algorithm='base',
call_back=None,
absolute_min_pitch=50,
absolute_max_pitch=500,
adjusted_octaves=1,
stop_check=None, multiprocessing=True):
"""
Parameters
----------
corpus_context : :class:`~polyglotdb.corpus.audio.AudioContext`
source : str
Program to use for analyzing pitch, either ``praat`` or ``reaper``
algorithm : str
Algorithm to use, ``base``, ``gendered``, or ``speaker_adjusted``
absolute_min_pitch : int
Absolute pitch floor
absolute_max_pitch : int
Absolute pitch ceiling
adjusted_octaves : int
How many octaves around the speaker's mean pitch to set the speaker adjusted pitch floor and ceiling
stop_check : callable
Function to check whether processing should stop early
call_back : callable
Function to report progress
multiprocessing : bool
Flag whether to use multiprocessing or threading
Returns
-------
"""
if not 'utterance' in corpus_context.hierarchy:
raise (Exception('Must encode utterances before pitch can be analyzed'))
segment_mapping = generate_utterance_segments(corpus_context, padding=PADDING).grouped_mapping('speaker')
num_speakers = len(segment_mapping)
path = None
if source == 'praat':
path = corpus_context.config.praat_path
# kwargs = {'si | lence_threshold': 0.03,
# 'voicing_threshold': 0.45, 'octave_cost': 0.01, 'octave_jump_cost': 0.35,
# 'voiced_unvoiced_cost': 0.14}
elif source == 'reaper':
path = corpus_context.config.reaper_path
# kwargs = None
pitch_function = generate_pitch_function(source, absolute_min_pitch, absolute_max_pitch,
path=path)
if 'pitch' not in corpus_c | ontext.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'pitch', [('F0', float)])
corpus_context.encode_hierarchy()
if algorithm == 'speaker_adjusted':
speaker_data = {}
if call_back is not None:
call_back('Getting original speaker means and SDs...')
for i, ((k,), v) in enumerate(segment_mapping.items()):
if call_back is not None:
call_back('Analyzing speaker {} ({} of {})'.format(k, i, num_speakers))
output = analyze_segments(v, pitch_function, stop_check=stop_check, multiprocessing=multiprocessing)
sum_pitch = 0
n = 0
for seg, track in output.items():
for t, v in track.items():
v = v['F0']
if v is not None and v > 0: # only voiced frames
n += 1
sum_pitch += v
mean_pitch = sum_pitch / n
speaker_data[k] = int(mean_pitch / math.pow(2, adjusted_octaves)), \
|
florentmercier/BtoVLE | src/main.py | Python | gpl-2.0 | 1,659 | 0.024714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sans titre.py
#
# Copyright 2013 Florent Mercier <contact@florentmercier.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import sys
import os
import distutils.dir_util
from Namespace import *
from Model import *
from Controller import *
from Environment import *
from Function import *
from Init import *
from Output import *
from TimeAdvance import *
from Inter | nalTransition import *
from ExternalTransition import *
from Observation import *
from Event import *
from strings import *
def main():
# MAN
if (len(sys.argv)<3):
print BTOVLE_MAN
sys.exit(0);
# Opening project:
cwd = os.path.split(sys.argv[0])[0]
projectName = os.path.basename(sys.argv[1])
path = os.path.join(os.path.expanduser("~/.vle/pkgs"),projectName)
distutils.dir_util.copy_tree(os.path.join(cwd,"..","template" | ),path)
os.chdir(path)
Namespace(sys.argv[1], sys.argv[2]).write()
if __name__ == '__main__':
main()
|
a13m/ansible | v2/ansible/executor/task_executor.py | Python | gpl-3.0 | 16,471 | 0.004432 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.connection_info import ConnectionInformation
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, connection_loader, action_loader
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.debug import debug
__all__ = ['TaskExecutor']
import json
import time
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
def __init__(self, host, task, job_vars, connection_info, loader, module_loader):
self._host = host
self._task = task
self._job_vars = job_vars
self._connection_info = connection_info
self._loader = loader
self._module_loader = module_loader
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with
'''
debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
res = dict(results=item_results)
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
debug("calling self._execute()")
res = self._execute()
debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
debug("dumping result to json")
result = json.dumps(res)
debug("done dumping result, returning")
return result
except AnsibleError, e:
return dict(failed=True, msg=str(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
items = None
if self._task.loop and self._task.loop in lookup_loader:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
task_vars = self._job_vars.copy()
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
except AnsibleParserError, e:
results.append(dict(failed=True, msg=str(e)))
continue
# now we swap the internal task with the copy, execute,
# and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
results.append(res)
# FIXME: we should be sending back a callback result for each item in the loop here
print(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'):
final_items = []
for item in items:
variables['item'] = item
if self._task.evaluate_conditional(variables):
final_items.append(item)
return [",".join(final_items)]
else:
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it
self._connection_info.post_validate(variables=variables, loader=self._loader)
# get the connection and the handler for this execution
| self._connection = self._get_connection(variables)
self._handler = self._get_action_handler(connection=self._connection)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# | variable not being present which would otherwise cause validation to fail
if not self._task.evaluate_conditional(variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
# Now we do final validation on the task, which sets all fields to their final values
self._task.post_validate(variables)
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = self._task.args.copy()
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(changed=True, include=include_file, include_variables=include_variables)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
# Read some values from the task, so that we can modify t |
kjemmett/TARGet | bindings/python/dionysus/viewer/diagram.py | Python | gpl-3.0 | 5,443 | 0.009186 | from PyQt4 import QtGui, QtCore
from math import fabs
class DiagramPoint(QtGui.QGraphicsEllipseItem):
def __init__(self,x,y, p, infty = False, color = 0):
super(QtGui.QGraphicsEllipseItem, self).__init__()
c = self.color(color)
self.setBrush(QtGui.QBrush(c[0]))
self.setPen(QtGui.QPen(c[1]))
self.radius = .075
if infty:
self.radius *= 2
self.x, self.y = x,y
self.scale(1)
self.p = p
def scale(self, delta):
self.radius *= delta
self.setRect(self.x - self.radius, self.y - self.radius, 2*self.radius, 2*self.radius)
def color(self, i):
return self._colors[i % len(self._colors)]
# (fill, border) pairs
_colors = [(QtCore.Qt.red, QtGui.QColor(225, 0, 0)),
(QtCore.Qt.blue, QtGui.QColor(0, 0, 225)),
(QtCore.Qt.green, QtGui.QColor(0, 225, 0)),
]
class DiagramViewer(QtGui.QGraphicsView):
def __init__(self, dgm, noise):
super(QtGui.QGraphicsView, self).__init__()
self.selection = None
self._pan = False
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.scene = QtGui.QGraphicsScene(self)
self.setScene(self.scene)
if not isinstance(dgm, list):
# Assume it's just a single diagram
dgms = [dgm]
else:
dgms = dgm
inf = float('inf')
xs = [p[0] for d in dgms for p in d]
ys = [p[1] for d in dgms for p in d]
minx = min(0, min(xs) if xs else 0)
miny = min(0, min(ys) if ys else 0)
xs = [x for x in xs if x != inf]
ys = [y for y in ys if y != inf]
maxx = max(0, max(xs) if xs else 0)
maxy = max(0, max(ys) if ys else 0)
self.draw_axes(minx,miny,maxx,maxy)
for i, dgm in enumerate(dgms):
for p in dgm:
x,y = p[0],p[1]
if fabs(y - x) < noise:
continue
if fabs(x) == inf or fabs(y) == inf:
if x == inf: x = maxx + 2
if y == inf: y = maxy + 2
if x == -inf: x = minx - 2
if y == -inf: y = miny - 2
| item = DiagramPoint(x,y,p, infty = True, color = i)
else:
item = DiagramPoint(x,y,p, color = i)
self.scene.addItem(item)
# Flip y-axis
self.scale(1, -1)
# Set the correct view
rect = self.scene.itemsBoundingRect()
self.fitInView(rect, QtCore.Qt.KeepAspectRatio)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self._pan = True
self._ | panStartX = event.x()
self._panStartY = event.y()
self.setCursor(QtCore.Qt.ClosedHandCursor)
event.accept()
else:
p = self.mapToScene(event.pos())
item = self.scene.itemAt(p)
if isinstance(item, DiagramPoint):
self.selection = item.p
self.close()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
self._pan = False
self.setCursor(QtCore.Qt.ArrowCursor)
event.accept()
return
event.ignore()
def mouseMoveEvent(self, event):
if self._pan:
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value() - (event.x() - self._panStartX))
self.verticalScrollBar().setValue(self.verticalScrollBar().value() - (event.y() - self._panStartY))
self._panStartX = event.x()
self._panStartY = event.y()
event.accept()
return
event.ignore()
def wheelEvent(self, event):
delta = 1 + float(event.delta())/100
if delta < 0:
event.ignore()
return
self.scale(delta, delta)
for item in self.scene.items():
if isinstance(item, DiagramPoint):
item.scale(1/delta)
event.accept()
def draw_axes(self, minx, miny, maxx, maxy):
# Draw axes and diagonal
if maxx > 0:
self.scene.addItem(QtGui.QGraphicsLineItem(0,0, maxx, 0))
if minx < 0:
self.scene.addItem(QtGui.QGraphicsLineItem(minx,0, 0, 0))
if maxy > 0:
self.scene.addItem(QtGui.QGraphicsLineItem(0,0, 0, maxy))
if miny < 0:
self.scene.addItem(QtGui.QGraphicsLineItem(0,miny, 0, 0))
self.scene.addItem(QtGui.QGraphicsLineItem(0,0, min(maxx, maxy), min(maxx, maxy)))
self.scene.addItem(QtGui.QGraphicsLineItem(max(minx,miny), max(minx,miny), 0,0))
# Dashed, gray integer lattice
pen = QtGui.QPen(QtCore.Qt.DashLine)
pen.setColor(QtCore.Qt.gray)
for i in xrange(min(0, int(minx)) + 1, max(0,int(maxx)) + 1):
line = QtGui.QGraphicsLineItem(i,0, i, maxy)
line.setPen(pen)
self.scene.addItem(line)
for i in xrange(min(0, int(miny)) + 1, max(0, int(maxy)) + 1):
line = QtGui.QGraphicsLineItem(0,i, maxx, i)
line.setPen(pen)
self.scene.addItem(line)
def show_diagram(dgm, noise, app):
#app = QtGui.QApplication([])
view = DiagramViewer(dgm, noise)
view.show()
view.raise_()
app.exec_()
return view.selection
|
unique-horn/ppap | ppap/layers/HyperNetwork_max.py | Python | gpl-3.0 | 4,797 | 0.003961 | """
This is an extension of the HyperNetwork, where the layer specific input z's
will be conditioned on the input. We first find the max of the input across
the feature channel, then we project this reduced input to the dimension of
z's, which is 4 here.
"""
import keras.backend as K
from keras.engine.topology import Layer
from keras.utils.np_utils import conv_output_length
from keras import initializations
import numpy as np
class HyperNetwork_max(Layer):
"""
Implements the generator network proposed by HyperNetwork paper
"""
def __init__(self,
weight_shape,
hidden_dim,
strides=(1, 1),
border_mode="same",
nb_filters=1,
dim_ordering="th",
**kwargs):
"""
:param weight_shape:
:param layer_sizes:
"""
self.strides = strides
self.dim_ordering = dim_ordering
self.border_mode = border_mode
self.weight_shape = weight_shape
self.nb_filters = nb_filters
self.hidden_dim = hidden_dim
super().__init__(**kwargs)
def build(self, input_dim):
self.input_channels = input_dim[1]
rows, cols = input_dim[2], input_dim[3]
self.ppn_gen = HyperNetwork_gen(output_shape=self.weight_shape,
rows=rows, cols=cols,
hidden_dim=self.hidden_dim,
num_filters=self.nb_filters,
input_channels=self.input_channels)
self.gen_weights = self.ppn_gen.weights # Weight of the generator
self.gen_bias = self.ppn_gen.biases
self.b = K.zeros((self.nb_filters))
self.trainable_weights = self.gen_weights + self.gen_bias + [self.b]
self.built = True
def call(self, x, mask=None):
self.W = self.ppn_gen.setup_output(x) # PPN generator output, used as filter
# self.non_trainable_weights = [self.W]
output = K.conv2d(x, self.W, border_mode=self.border_mode,
strides=self.strides)
output += K.reshape(self.b, (1, self.nb_filters, 1, 1))
return output
def get_output_shape_for( | self, input_shape):
rows = input_shape[2]
cols = input_shape[3]
rows = conv_output_length(rows, self.weight_shape[0],
self.border_mode, self.strides[0])
cols = conv_output_length(cols, self.weight_shape[1],
self.border_mode, self.strides[1])
return (input_shape[0], self.nb_filters, rows, cols)
|
class HyperNetwork_gen(object):
"""
Simple feed forward generator
Doesn't take any explicit input
"""
def __init__(self,
input_channels,
rows, cols,
output_shape,
num_filters,
hidden_dim,
init="glorot_uniform"):
"""
Parameters
----------
output_shape : list_like
Size of the generated matrix (x, y)
layer_sizes : array_like
List of nodes in hidden layers
init : str
Keras initializer to use for weights
"""
self.input_rows = rows
self.input_cols = cols
self.input_channels = input_channels
self.num_filters = num_filters
self.output_shape = output_shape
self.hidden_dim = hidden_dim
self.init = initializations.get(init)
self.bias_init = initializations.get("zero")
self.setup_weights()
self.num_param = np.prod(self.output_shape) * self.num_filters * \
self.input_channels
def setup_weights(self):
"""
Setup weights for the generator
"""
# Layers with input and output
self.w_proj_to_z = self.init((self.input_cols * self.input_rows, 4))
# self.b_proj_to_z = self.bias_init((4))
w1 = self.init((4, self.hidden_dim * self.input_channels))
b1 = self.bias_init((self.hidden_dim * self.input_channels))
w2 = self.init((self.hidden_dim, np.prod(self.output_shape) *
self.num_filters)) # (hid X 3*3*33)
b2 = self.bias_init((np.prod(self.output_shape) *
self.num_filters))
# self.z = self.init((1, 4))
self.weights = [w1, w2, self.w_proj_to_z]
self.biases = [b1, b2]
def setup_output(self, x):
"""
Setup output tensor
"""
x_max = K.max(x, axis=1)
x_max = K.flatten(x_max)
z = K.dot(x_max, self.w_proj_to_z) #+ self.b_proj_to_z
hidden = K.dot(z, self.weights[0]) + self.biases[0]
hidden = K.reshape(hidden, shape=(self.input_channels,
self.hidden_dim))
output = K.dot(hidden, self.weights[1]) + self.biases[1]
self.output = K.reshape(output, (self.num_filters, self.input_channels,
*self.output_shape))
return self.output |
DPRL/CROHME_2014 | src/symbol_classifier.py | Python | gpl-3.0 | 3,883 | 0.011589 | ## DPRL CROHME 2014
## Copyright (c) 2013-2014 Lei Hu, Kenny Davila, Francisco Alvaro, Richard Zanibbi
##
## This file is part of DPRL CROHME 2014.
| ##
## DPRL CROHME 2014 is free software:
## you can redistribute it and/or modify it under the terms of the GNU
## General Public License as published by the Free Software Foundation,
## either version 3 of the License, or (at your option) any later version.
##
## DPRL CROHME | 2014 is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with DPRL CROHME 2014.
## If not, see <http://www.gnu.org/licenses/>.
##
## Contact:
## - Lei Hu: lei.hu@rit.edu
## - Kenny Davila: kxd7282@rit.edu
## - Francisco Alvaro: falvaro@dsic.upv.es
## - Richard Zanibbi: rlaz@cs.rit.edu
import cPickle
import numpy as np
from mathSymbol import *
class SymbolClassifier:
def __init__(self, training_filename, mapping_filename):
#load classifier parameters...
training_file = open(training_filename, 'rb')
self.classifier = cPickle.load(training_file)
training_file.close()
#load classifier mapping and scaler
mapping_file = open(mapping_filename, 'rb')
self.classes_dict = cPickle.load(mapping_file)
self.classes_list = cPickle.load(mapping_file)
self.scaler = cPickle.load(mapping_file)
mapping_file.close()
print(len(self.classes_list))
#by default...
self.apply_scaler = True
def createSymbol(self, trace_group):
#first, create the traceInfo...
traces = []
for trace_id, points_f in trace_group:
#create object...
object_trace = TraceInfo(trace_id, points_f)
#apply general trace pre processing...
#1) first step of pre processing: Remove duplicated points
object_trace.removeDuplicatedPoints()
#Add points to the trace...
object_trace.addMissingPoints()
#Apply smoothing to the trace...
object_trace.applySmoothing()
#it should not ... but .....
if object_trace.hasDuplicatedPoints():
#...remove them! ....
object_trace.removeDuplicatedPoints()
traces.append(object_trace)
#now create the symbol
new_symbol = MathSymbol(0, traces, '{Unknown}')
#normalization ...
new_symbol.normalize()
return new_symbol
def classify(self, trace_group):
#create the symbol object...
symbol = self.createSymbol(trace_group)
#get raw features
features = symbol.getFeatures()
#convert them to numpy matrix...
matrix_features = np.matrix([features])
if self.apply_scaler:
matrix_features = self.scaler.transform(matrix_features)
predicted = self.classifier.predict_proba(matrix_features)
confidences = np.array(predicted).reshape(-1).tolist()
return confidences
def mostProbableLabel(self, confidences):
most_probable = 0
for i in range(1, len(confidences)):
if confidences[i] > confidences[most_probable]:
most_probable = i
return self.classes_list[most_probable], confidences[most_probable]
def topNLabels(self, confidences, n_top):
all_scores = [(confidences[i], self.classes_list[i]) for i in xrange(len(self.classes_list))]
sorted_scores = sorted(all_scores, reverse=True)
return [(class_label, class_confidence) for class_confidence, class_label in sorted_scores[0:n_top]]
|
kartotherian/meddo | get-external-data.py | Python | mit | 9,549 | 0.010786 | #!/usr/bin/env python3
# This script is designed to load quasi-static data into a PostGIS database for
# rendering maps. It differs from the usual scripts to do this in that it is
# designed to take its configuration from a file rather than be a series of
# shell commands, and consideration has been given to doing the loading the
# right way.
#
# Some implicit assumptions are
# - Time spent querying (rendering) the data is more valuable than the one-time
# cost of loading it
# - The script will not be running multiple times in parallel. This is not
# normally likely because the script is likely to be called daily or less,
# not minutely.
# - Usage patterns will be similar to typical map rendering
import yaml
import os
import re
import argparse
import shutil
# modules for getting data
import zipfile
import requests
import io
# modules for converting and postgres loading
import subprocess
import psycopg2
import logging
def database_setup(conn, temp_schema, schema, metadata_table):
with conn.cursor() as cur:
cur.execute('''CREATE TABLE IF NOT EXISTS "{schema}"."{metadata_table}" (name text primary key, last_modified text);'''
.format(schema=schema, metadata_table=metadata_table))
conn.commit()
class Table:
def __init__(self, name, conn, temp_schema, schema, metadata_table):
self._name = name
self._conn = conn
self._temp_schema = temp_schema
self._dst_schema = schema
self._metadata_table = metadata_table
# Clean up the temporary schema in preperation for loading
def clean_temp(self):
with self._conn.cursor() as cur:
cur.execute('''DROP TABLE IF EXISTS "{temp_schema}"."{name}"'''.format(name=self._name, temp_schema=self._temp_schema))
self._conn.commit()
# get the last modified date from the metadata table
def last_modified(self):
with self._conn.cursor() as cur:
cur.execute('''SELECT last_modified FROM "{schema}"."{metadata_table}" WHERE name = %s'''.format(schema=self._dst_schema, metadata_table=self._metadata_table), [self._name])
results = cur.fetchone()
if results is not None:
return results[0]
def index(self):
with self._conn.cursor() as cur:
# ogr creates a ogc_fid column we don't need
cur.execute('''ALTER TABLE "{temp_schema}"."{name}" DROP COLUMN ogc_fid;'''.format(name=self._name, temp_schema=self._temp_schema))
# sorting static tables helps performance and reduces size from the column drop above
# see osm2pgsql for why this particular geohash invocation
cur.execute('''
CREATE INDEX "{name}_geohash"
ON "{temp_schema}"."{name}"
(ST_GeoHash(ST_Transform(ST_Envelope(way),4326),10) COLLATE "C");
CLUSTER "{temp_schema}"."{name}" USING "{name}_geohash";
DROP INDEX "{temp_schema}"."{name}_geohash";
CREATE INDEX ON "{temp_schema}"."{name}" USING GIST (way) WITH (fillfactor=100);
ANALYZE "{temp_schema}"."{name}";
'''.format(name=self._name, temp_schema=self._temp_schema))
self._conn.commit()
def replace(self, new_last_modified):
with self._conn.cursor() as cur:
cur.execute('''BEGIN;''')
cur.execute('''
DROP TABLE IF EXISTS "{schema}"."{name}";
ALTER TABLE "{temp_schema}"."{name}" SET SCHEMA "{schema}";
'''.format(name=self._name, temp_schema=self._temp_schema, schema=self._dst_schema))
# We checked if the metadata table had this table way up above
cur.execute('''SELECT 1 FROM "{schema}"."{metadata_table}" WHERE name = %s'''.format(schema=self._dst_schema, metadata_table=self._metadata_table), [self._name])
if cur.rowcount == 0:
cur.execute('''INSERT INTO "{schema}"."{metadata_table}" (name, last_modified) VALUES (%s, %s)'''.format(schema=self._dst_schema, metadata_table=self._metadata_table),
[self._name, new_last_modified])
else:
cur.execute('''UPDATE "{schema}"."{metadata_table}" SET last_modified = %s WHERE name = %s'''.format(schema=self._dst_schema, metadata_table=self._metadata_table),
[new_last_modified, self._name])
self._conn.commit()
def main():
# parse options
parser = argparse.ArgumentParser(description="Load external data into a database")
parser.add_argument("-f", "--force", action="store_true", help="Download new data, even if not required")
parser.add_argument("-c", "--config", action="store", default="external-data.yml", help="Name of configuraton file (default external-data.yml)")
parser.add_argument("-D", "--data", action="store", help="Override data download directory")
parser.add_argument("-d", "--database", action="store", help="Override database name to connect to")
parser.add_argument("-H", "--host", action="store", help="Override database server host or socket directory")
parser.add_argument("-p", "--port", action="store", help="Override database server port")
parser.add_argument("-U", "--username", action="store", help="Override database user name")
parser.add_argument("-v", "--verbose", action="store_true", help="Be more verbose. Overrides -q")
parser.add_argument("-q", "--quiet", action="store_true", help="Only report serious problems")
opts = parser.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
elif opts.quiet:
logging.basicConfig(level=logging.WARNING)
else:
logging.basicConfig(level=logging.INFO)
with open(opts.config) as config_file:
config = yaml.safe_load(config_file)
data_dir = opts.data or config["settings"]["data_dir"]
os.makedirs(data_dir, exist_ok=True)
# If the DB options are unspecified in both on the command line and in the
# config file, libpq will pick what to use with the None
database = opts.database or config["settings"].get("database")
host = opts.host or config["settings"].get("host")
port = opts.port or config["settings"].get("port")
user = opts.username or config["settings"].get("username")
with requests.Session() as s, \
psycopg2.connect(database=database,
host=host,
port=port,
user=user) as conn:
s.headers.update({'User-Agent': 'get-external-data.py/meddo'})
# DB setup
database_setup(conn, config["settings"]["temp_schema"], config["settings"]["schema"], config["settings"]["metadata_table"])
for name, source in config["sources"].items():
logging.info("Checking table {}".format(name))
# Don't attempt to handle strange names
# Even if there was code to escape them properly here, you don't want
# in a style with all the quoting headaches
if not re.match('''^[a-zA-Z0-9_]+$''', name):
raise RuntimeError("Only ASCII alphanumeric table are names supported")
workingdir = os.path.join(data_dir, name)
# Clean up anything left over from an aborted run
shutil.rmtree(workingdir, ignore_errors=True)
os.makedirs(workingdir, exist_ok=True)
this_table = Table(n | ame, conn, config["settings"]["temp_schema"], config["settings"]["schema"], config["settings"]["metadata_table"])
this_table.clean_temp()
if not opts.force:
headers = {'If-Modified-Since': this_table.last_modified()}
else:
headers = {}
download = s.get(source["url"], headers=headers)
download.raise_for_status()
if (download.status_code == 200):
if " | Last-Modified" in download.headers:
new_last_modified = download.headers["Last-Modified"]
else:
new_last_modified = None
if "archive" in source and source["archive"]["format"] == "zip":
zip = zipfile.ZipFile(io.BytesIO(download.content))
for member in source["archive"]["files"]:
zip.extract(member, workingdir)
ogrpg = "PG:dbname={}".format(database)
if port is not None:
ogrpg = ogrpg + " port={}".format(port)
if user is not None:
ogrpg = ogrpg + " user={}".format(user)
if host is not None:
ogrpg = ogrpg + " host={}".format(host)
ogrcommand = ["ogr2ogr",
|
yuanbaowen521/tadbit | _pytadbit/utils/three_dim_stats.py | Python | gpl-3.0 | 19,120 | 0.002929 | """
30 Oct 2013
"""
from pytadbit.eqv_rms_drms import rmsdRMSD_wrapper
from pytadbit.consistency import consistency_wrapper
from itertools import combinations
import numpy as np
from math import pi, sqrt, cos, sin, acos
def generate_sphere_points(n=100):
"""
Returns list of 3d coordinates of points on a sphere using the
Golden Section Spiral algorithm.
:param n: number of points in the sphere
:returns a sphere of radius 1, centered in the origin
"""
points = []
inc = pi * (3 - sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = sqrt(1 - y*y)
phi = k * inc
# points.append(dict((('x', cos(phi) * r),('y', y),('z', sin(phi) * r))))
points.append((cos(phi) * r, y, sin(phi) * r))
return points
def get_center_of_mass(x, y, z, zeros):
"""
get the center of mass of a given object with list of x, y, z coordinates
"""
xm = ym = zm = 0.
size = len(x)
subsize = 0
for i in xrange(size):
if not zeros[i]:
continue
subsize += 1
xm += x[i]
ym += y[i]
zm += z[i]
xm /= subsize
ym /= subsize
zm /= subsize
return xm, ym, zm
def mass_center(x, y, z, zeros):
"""
Transforms coordina | tes accordin | g to the center of mass
:param x: list of x coordinates
:param y: list of y coordinates
:param z: list of z coordinates
"""
xm, ym, zm = get_center_of_mass(x, y, z, zeros)
for i in xrange(len(x)):
x[i] -= xm
y[i] -= ym
z[i] -= zm
# def generate_circle_points(x, y, z, a, b, c, u, v, w, n):
# """
# Returns list of 3d coordinates of points on a circle using the
# Rodrigues rotation formula.
#
# see *Murray, G. (2013). Rotation About an Arbitrary Axis in 3 Dimensions*
# for details
#
# :param x: x coordinate of a point somewhere on the circle
# :param y: y coordinate of a point somewhere on the circle
# :param z: z coordinate of a point somewhere on the circle
# :param a: x coordinate of the center
# :param b: y coordinate of the center
# :param c: z coordinate of the center
# :param u: 1st element of a vector in the same plane as the circle
# :param v: 2nd element of a vector in the same plane as the circle
# :param w: 3rd element of a vector in the same plane as the circle
# :param n: number of points in the circle
#
# TODO: try simplification for a=b=c=0 (and do the translation in the main
# function)
# """
# points = []
# offset = 2 * pi / float(n)
# u_2 = u**2
# v_2 = v**2
# w_2 = w**2
# dst = u_2 + v_2 + w_2
# sqrtdst = sqrt(dst)
# uxvywz = - u*x - v*y - w*z
# b_v = b*v
# c_w = c*w
# a_u = a*u
# one = (a * (v_2 + w_2) - u*(b_v + c_w + uxvywz))
# two = (b * (u_2 + w_2) - v*(a_u + c_w + uxvywz))
# tre = (c * (u_2 + v_2) - w*(a_u + b_v + uxvywz))
# onep = sqrtdst * (-c*v + b*w - w*y + v*z)
# twop = sqrtdst * ( c*u - a*w + w*x - u*z)
# trep = sqrtdst * (-b*u + a*v - v*x + u*y)
# for k in range(int(n)):
# ang = k * offset
# cosang = cos(ang)
# dcosang = cosang * dst
# sinang = sin(ang)
# points.append([(one * (1 - cosang) + x * dcosang + onep * sinang) / dst,
# (two * (1 - cosang) + y * dcosang + twop * sinang) / dst,
# (tre * (1 - cosang) + z * dcosang + trep * sinang) / dst]
# )
# return points
def rotate_among_y_axis(x, y, z, angle):
"""
Rotate and object with a list of x, y, z coordinates among its center of
mass
"""
xj = []
yj = []
zj = []
for xi, yi, zi in zip(*(x, y, z)):
#dist = square_distance((xi, yi, zi), center_of_mass)
xj.append(xi*cos(angle) + zi*sin(angle))
yj.append(yi)
zj.append(xi*-sin(angle)+zi*cos(angle))
return xj, yj, zj
def find_angle_rotation_improve_x(x, y, z, center_of_mass):
"""
Finds the rotation angle needed to face the longest edge of the molecule
"""
# find most distant point from center of mass:
coords = zip(*(x, y, z))
xdst, ydst, zdst = max(coords, key=lambda i: square_distance(i, center_of_mass))
dist = distance((xdst, ydst, zdst), center_of_mass)
angle = acos((-xdst**2 - (dist + sqrt(dist**2 - xdst**2))) /
(2 * dist**2) + 1)
return angle
def generate_circle_points(x, y, z, u, v, w, n):
"""
Returns list of 3d coordinates of points on a circle using the
Rodrigues rotation formula.
see *Murray, G. (2013). Rotation About an Arbitrary Axis in 3 Dimensions*
for details
:param x: x coordinate of a point somewhere on the circle
:param y: y coordinate of a point somewhere on the circle
:param z: z coordinate of a point somewhere on the circle
:param a: x coordinate of the center
:param b: y coordinate of the center
:param c: z coordinate of the center
:param u: 1st element of a vector in the same plane as the circle
:param v: 2nd element of a vector in the same plane as the circle
:param w: 3rd element of a vector in the same plane as the circle
:param n: number of points in the circle
TODO: try simplification for a=b=c=0 (and do the translation in the main
function)
"""
points = []
offset = 2 * pi / float(n)
u_2 = u**2
v_2 = v**2
w_2 = w**2
dst = u_2 + v_2 + w_2
sqrtdst = sqrt(dst)
uxvywz = - u*x - v*y - w*z
one = (-u * (uxvywz))
two = (-v * (uxvywz))
tre = (-w * (uxvywz))
onep = sqrtdst * (- w*y + v*z)
twop = sqrtdst * (+ w*x - u*z)
trep = sqrtdst * (- v*x + u*y)
for k in range(int(n)):
ang = k * offset
cosang = cos(ang)
dcosang = cosang * dst
sinang = sin(ang)
points.append([(one * (1 - cosang) + x * dcosang + onep * sinang) / dst,
(two * (1 - cosang) + y * dcosang + twop * sinang) / dst,
(tre * (1 - cosang) + z * dcosang + trep * sinang) / dst]
)
return points
def square_distance(part1, part2):
"""
Calculates the square distance between two particles.
:param part1: coordinate (dict format with x, y, z keys)
:param part2: coordinate (dict format with x, y, z keys)
:returns: square distance between two points in space
"""
return ((part1[0] - part2[0])**2 +
(part1[1] - part2[1])**2 +
(part1[2] - part2[2])**2)
def fast_square_distance(x1, y1, z1, x2, y2, z2):
"""
Calculates the square distance between two coordinates.
:param part1: coordinate (dict format with x, y, z keys)
:param part2: coordinate (dict format with x, y, z keys)
:returns: square distance between two points in space
"""
return ((x1 - x2)**2 +
(y1 - y2)**2 +
(z1 - z2)**2)
def distance(part1, part2):
"""
Calculates the distance between two particles.
:param part1: coordinate in list format (x, y, z)
:param part2: coordinate in list format (x, y, z)
:returns: distance between two points in space
"""
return sqrt((part1[0] - part2[0])**2 +
(part1[1] - part2[1])**2 +
(part1[2] - part2[2])**2)
def angle_between_3_points(point1, point2, point3):
"""
Calculates the angle between 3 particles
Given three particles A, B and C, the angle g (angle ACB, shown below):
::
A
/|
/i|
c/ |
/ |
/ |
B )g |b
\ |
\ |
a\ |
\h|
\|
C
is given by the theorem of Al-Kashi:
.. math::
b^2 = a^2 + c^2 - 2ac\cos(g)
:param point1: list of 3 coordinate for x, y and z
:param point2: list of 3 coordinate for x, y and z
:param point3: list of |
tystr/mongodb-replication-status | mongodb_replication_status.py | Python | mit | 6,156 | 0.004224 | #!/usr/bin/env python
# mongodb_replicaset_status.py
# Author: Tyler Stroud <ststroud@gmail.com>
# Date: 2012-11-06
"""
This script monitors replication status of a replicaset
"""
from daemon import runner
import logging
from pymongo import Connection
from pymongo.errors import AutoReconnect
from time import sleep
import smtplib
from email.mime.text import MIMEText
import sys
from argparse import ArgumentParser
from ConfigParser import RawConfigParser, NoOptionError
class MongoDBReplicationStatus(object):
last_primary = None
def __init__(self, host, poll_interval=5, lag_threshold=30,
max_connect_retries=5, log_level=logging.INFO,
pidfile='/tmp/mongodb_replication_status.pid',
logfile='/var/log/mongodb_replication_status.log'):
self.poll_interval = poll_interval
self.lag_threshold = lag_threshold
self.max_connect_retries = max_connect_retries
self.stdin_path = '/dev/null'
self.stdout_path = logfile
self.stderr_path = logfile
self.pidfile_path = pidfile
self.pidfile_timeout = 5
self.hostnames = host
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.getLevelName(log_level))
self.logger_handler = logging.FileHandler(logfile)
self.logger_handler.setFormatter(logging.Formatter('[%(asctime)s] % | (message)s'))
self.logger.addHandler(self.logger_handler)
def set_notifier(self, notifier):
assert isinstance(notifier, Notifier), ('"notifier" must be an instance'
| 'of "Notifier"')
self.notifier = notifier
def get_members(self):
""" Connect to the primary member and refresh the replica set status """
if self.last_primary is not None:
connection = self.get_connection(self.last_primary)
if connection is not None and connection.is_primary:
return connection['admin'].command('replSetGetStatus')['members']
for hostname in [h for h in self.hostnames if h != self.last_primary]:
connection = self.get_connection(hostname)
if not isinstance(connection, Connection):
continue # failed to connect to the current iteration's hostname, so continue and try the next hostname
if connection.is_primary:
self.last_primary = hostname
return connection['admin'].command('replSetGetStatus')['members']
# There is no primary, so wait 5 seconds and try again
sleep(5)
return self.get_members()
def get_primary_optime(self, members):
""" Returns the optime of the primary member """
for member in members:
if 'PRIMARY' == member['stateStr']:
return member['optime'].time
def get_connection(self, hostname):
""" Attempt to create a mongodb Connection to the given hostname """
retries = self.max_connect_retries
while retries > 0:
try:
return Connection(hostname)
except AutoReconnect:
self.logger.warning(
'WARNING: Failed to connect to hostname "%s". Trying again in 5 seconds. (%s tries left).'
% (hostname, retries))
retries -= 1
sleep(5)
errmsg = 'ERROR: All %s attempts to connect to hostname "%s" failed. Host may be down.'\
% (self.max_connect_retries, hostname)
self.logger.error(errmsg)
self.notifier.send_to_all(errmsg, '[ALERT] Host %s may be down' % hostname)
def run(self):
while True:
members = self.get_members()
message = ''
for member in members:
lag = self.get_primary_optime(members) - member['optime'].time
if lag > self.lag_threshold:
message += 'WARNING: Member "%s" is %s seconds behind the primary\n' % (member['name'], lag)
self.logger.warning(message)
self.logger.debug('DEBUG: Member "%s" is %s seconds behind the primary' % (member['name'], lag))
if message is not '':
self.notifier.send_to_all(message)
sleep(self.poll_interval)
class Notifier(object):
def __init__(self, from_email, recipient_emails, smtp_host='localhost'):
self.from_email = from_email
self.recipient_emails = recipient_emails
self.smtp_host = smtp_host
def send_to_all(self, message, subject='[ALERT] Replication Status Warning'):
message = MIMEText(message)
message['Subject'] = subject
mailer = smtplib.SMTP(self.smtp_host)
return mailer.sendmail(self.from_email, self.recipient_emails, str(message))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-c', '--config',help='The path to the configuration file', dest='FILE', required=True)
parser.add_argument('action', choices=('start', 'stop', 'restart'))
args = parser.parse_args()
config_parser = RawConfigParser()
config_file = open(args.FILE)
try:
config_parser.readfp(config_file)
finally:
config_file.close()
status = MongoDBReplicationStatus(
config_parser.get('main', 'host').split(','),
config_parser.getint('main', 'poll_interval'),
config_parser.getint('main', 'lag_threshold'),
config_parser.getint('main', 'max_connect_retries'),
config_parser.get('main', 'log_level'),
config_parser.get('main', 'pidfile'),
config_parser.get('main', 'logfile'),
)
notifier = Notifier(config_parser.get('main', 'from_email'),
config_parser.get('main', 'recipients'),
config_parser.get('main', 'smtp_host'))
status.set_notifier(notifier)
sys.argv = sys.argv[0], args.action # overwrite sys.argv to be what daemon_runner expects
daemon_runner = runner.DaemonRunner(status)
daemon_runner.daemon_context.files_preserve = [status.logger_handler.stream]
daemon_runner.do_action()
|
kouroshparsa/iis_bridge | iis_bridge/config.py | Python | gpl-2.0 | 2,094 | 0.004298 | """
This module is used for common operations
as well as global variables
:copyright: (c) 2014 by Kourosh Parsa.
"""
import os
import subprocess
import platform
IIS_HOME = "%s\\Windows\\System32\\inetsrv" % os.getenv("SYSTEMDRIVE")
APP_CMD = "%s\\appcmd.exe" % IIS_HOME
DOCROOT = "%s\\inetpub\\wwwroot" % os.getenv("SYSTEMDRIVE")
LOG_DIR = "%s\\inetpub\\logs\\LogFiles" % os.getenv("SYSTEMDRIVE")
PIPELINE_MODES = ["Integrated", "Classic"]
IDENTITIES = ["LocalService", "LocalSystem", "NetworkService",\
"ApplicationPoolIdentity", "Custom"]
NET_DIR = "%s\\Windows\\Microsoft.NET" % os.getenv("SYSTEMDRIVE")
if platform.machine().endswith("64") and\
platform.architecture()[0].startswith("32"):
# 64 windows with 32 bit python - disable wow64
DISM = "%s\\Windows\sysnative\\Dism.exe" % os.getenv("SYSTEMDRIVE")
CONFIG_DIR = "%s\\Windows\\sysnative\\inetsrv\\config" % os.getenv("SYSTEMDRIVE")
WMIC = "%s\\Windows\\sysnative\\wbem\\wmic.exe" % os.getenv('SYSTEMDRIVE')
SERVER_MGR_CMD = "%s\\windows\\sysnative\\ServerManagerCmd.exe" % os.getenv('SYSTEMDRIVE')
else:
DISM = "%s\\Windows\System32\\Dism.exe" % os.getenv("SYSTEMDRIVE")
CONFIG_DIR = "%s\\Windows\\System32\\inetsrv\\config" % os.getenv("SYSTEMDRIVE")
WMIC = "%s\\Windows\\system32\\wbem\\wmic.exe" % os.getenv('SYSTEMDRIVE')
SERVER_MGR_CMD = "%s\\windows\\system32\\ServerManagerCmd.exe" % os.getenv('SYSTEMDRIVE')
if not os.path.exists(DISM):
DISM = None
def run(cmd, errMsg=None):
""" executes a command, throws exception upon failure
| returns the stdout as string
cmd: string - command to run
errMsg: string (default=None) - the error message to display
"""
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output, err = proc.communicate()
if type(output) == bytes:
output = outpu | t.decode(encoding='UTF-8')
if "[Error]" in output or proc.returncode != 0:
if errMsg:
raise Exception(errMsg)
else:
raise Exception("%s\r\n%s" % (output, err))
return output
|
tasercake/Crypto_Algotrader | crizzle/patterns/__init__.py | Python | mit | 398 | 0.002513 | from crizzle.patterns.memoize import memoize
from crizzle.patterns.observer import Obse | rvable, Observer
from crizzle.patterns.singleton import Singleto | n
from crizzle.patterns.deprecated import deprecated
from crizzle.patterns.value_checking import assert_none, assert_not_none, assert_in, assert_type, assert_equal
from crizzle.patterns.graph import DiGraph
from crizzle.patterns import conversion
|
mmottahedi/neuralnilm_prototype | scripts/e470.py | Python | mit | 7,017 | 0.005558 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1, 2, 4],
validation_buildings=[5],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
ignore_incomplete=True,
offset_probability=0.5,
ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense1',
'ty | pe': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer | ,
'num_units': 128,
'nonlinearity': rectify
},
{
'label': 'dense3',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge', 'freezer'], 800),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 2000)
]
for experiment, appliance, seq_length in APPLIANCES[-1:]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e470.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
|
flavoi/diventi | diventi/ebooks/migrations/0121_auto_20201005_2211.py | Python | apache-2.0 | 834 | 0.003597 | # Generated by Django 2.2.16 on 2020-10-05 20:11
import ckeditor.fie | lds
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0120_auto_20201005_1453'),
]
operations = [
mig | rations.AlterField(
model_name='book',
name='summary',
field=ckeditor.fields.RichTextField(blank=True, verbose_name='summary'),
),
migrations.AlterField(
model_name='book',
name='summary_en',
field=ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='summary'),
),
migrations.AlterField(
model_name='book',
name='summary_it',
field=ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='summary'),
),
]
|
jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/qa_uncaught_exception.py | Python | gpl-3.0 | 2,500 | 0 | #!/usr/bin/env python
#
# Copyright 2018 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
from multiprocessing import Process
import numpy
# This block just exists to pass data through and throw an exception
class except_block(gr.sync_block):
def __init__(self, throw_except, except_count=10000):
gr.sync_block.__init__(
self,
name="except_block",
in_sig=[numpy.complex64],
out_sig=[numpy.complex64],
)
self.throw_except = throw_except
self.except_count = except_count
self.count = 0
def work(self, input_items, output_items):
output_items[0][:] = input_items[0]
self.count += len(output_items[0])
if self.count >= self.except_count:
raise RuntimeError("Error in except_block")
return len(output_items[0])
def process_func(catch_exceptions):
tb = gr.top_block(catch_exceptions=catch_exceptions)
# some test data
src_data = [complex(x, x + 1) for x in range(65536)]
src = blocks.vector_source_c(src_data)
src.set_repeat(True)
e_block_1 = except_block(False)
e_block_2 = except_block(True)
sink_1 = blocks.null_sink(gr.sizeof_gr_complex)
| sink_2 = blocks.null_sink(gr.sizeof_gr_complex)
tb.connect(src, e_block_1)
tb.connect(src, e_bl | ock_2)
tb.connect(e_block_1, sink_1)
tb.connect(e_block_2, sink_2)
tb.run()
class test_uncaught_exception(gr_unittest.TestCase):
def test_exception_throw_uncaught(self):
# Test to ensure that throwing an exception causes the
# process running top_block to exit
p = Process(target=process_func, args=(False,))
p.daemon = True
p.start()
p.join(2.5)
exit_code = p.exitcode
self.assertIsNotNone(
exit_code, "exception did not cause flowgraph exit")
def test_exception_throw_caught(self):
# Test to ensure that throwing an exception does not cause the process
# running top_block to exit (in catch_exceptions mode)
p = Process(target=process_func, args=(True,))
p.daemon = True
p.start()
p.join(2.5)
exit_code = p.exitcode
self.assertIsNone(
exit_code, "exception caused flowgraph exit")
if __name__ == '__main__':
gr_unittest.run(test_uncaught_exception, "test_uncaught_exception.xml")
|
googleads/googleads-shopping-samples | python/shopping/content/accounts/insert.py | Python | apache-2.0 | 1,326 | 0.008296 | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an account to a specified multi-client account."""
from __future__ import print_function
import sys
from shopping.content import common
def main(argv):
# Authenticate and construct service.
service, config, _ = common.init(argv, __doc__)
merchant_id = config['m | erchantId']
common.check_mca(config, True)
name = 'account%s' % common.get_unique_id()
account = {'name': name, 'websiteUrl': 'https://%s.example.com/' % name}
# Add account.
request = service | .accounts().insert(merchantId=merchant_id, body=account)
result = request.execute()
print('Created sub-account ID %s for MCA %d.' % (result['id'], merchant_id))
if __name__ == '__main__':
main(sys.argv)
|
mjpbaywolf/plugin.video.wwen | resources/wwe.py | Python | gpl-3.0 | 18,159 | 0.002478 | import re
from urllib import unquote
import requests
from xml.dom.minidom import parseString
PRE_LOGIN_URL = "https://secure.net.wwe.com/enterworkflow.do?flowId=account.login"
LOGIN_URL = "https://secure.net.wwe.com/workflow.do"
LOGOUT_URL = "https://secure.net.wwe.com/enterworkflow.do?flowId=registration.logout"
VIDEO_URL = "https://ws.media.net.wwe.com/ws/media/mf/op-findUserVerifiedEvent/v-2.3"
SOAPCODES = {
"1": "OK",
"-1000": "Requested Media Not Found",
"-1500": "Other Undocumented Error",
"-2000": "Authentication Error",
"-2500": "Blackout Error",
"-3000": "Identity Error",
"-3500": "Sign-on Restriction Error",
"-4000": "System Error",
}
class NetworkItem(object):
def __init__(self):
self.item_type = "episode"
self.show_name = ""
self.name = ""
self.title = ""
self.description = ""
self.icon = ""
self.thumbnail = ""
self.fan_art = ""
self.banner = ""
self.media_id = ""
self.air_date = ""
self.duration = ""
self.genre = ""
self.on_watchlist = False
class Network:
def __init__(self, user, password):
self.user = user
self.password = password
self.user_uuid = ''
self.cookies = None
self.logged_in = False
def login(self):
with requests.Session() as s:
s.get(PRE_LOGIN_URL)
auth_values = {'registrationAction': 'identify',
'emailAddress': self.user,
'password': self.password}
s.post(LOGIN_URL, data=auth_values)
try:
self.user_uuid = unquote(s.cookies['mai']).split('useruuid=')[1].replace('[', '').replace(']', '')
self.cookies = s.cookies
self.logged_in = True
except:
raise ValueError('Login was unsuccessful.')
def set_cookies(self, cookies):
self.user_uuid = unquote(cookies['mai']).split('useruuid=')[1].replace('[', '').replace(']', '')
self.cookies = cookies
self.logged_in = True
def get_video_url(self, content_id, bit_rate):
if not self.logged_in:
self.login()
query_values = {
'contentId': content_id,
'fingerprint': unquote(self.cookies['fprt']),
'identityPointId': self.cookies['ipid'],
'playbackScenario': 'FMS_CLOUD',
}
with requests.Session() as s:
s.cookies = self.cookies
response = s.get(VIDEO_URL, params=query_values).content
parsed_response = parseString(response)
status_code = parsed_response.getElementsByTagName('status-code')[0].childNodes[0].data
if status_code != "1":
raise ValueError(SOAPCODES[status_code])
stream_url = parsed_response.getElementsByTagName('url')[0].childNodes[0].data
smil = parseString(s.get(stream_url).content)
rtmp_base = smil.getElementsByTagName('meta')[0].getAttribute('base')
auth_pat = re.compile(r'auth=(.*)')
auth_chunk = '?auth=' + re.search(auth_pat, stream_url).groups()[0]
if 'ondemand' in rtmp_base:
rtmp_base += '?_fcs_vhost=cp271756.edgefcs.net&akmfv=1.6&aifp=v0004' + auth_chunk
else:
rtmp_base += '?_fcs_vhost=cp269217.live.edgefcs.net&akmfv=1.6&aifp=v0004' + auth_chunk
swf_url = 'http://ui.bamstatic.com/fedapp/video/flash/fvp/wwenetwork/3.0.0/fvp.swf swfVfy=1'
if 'live' in rtmp_base:
swf_url += ' live=1'
for elem in smil.getElementsByTagName('video'):
try:
speed = elem.getAttribute('system-bitrate')
vid_src = elem.getAttribute('src')
if bit_rate.replace('K', '000') == speed:
break
except ValueError:
continue
return rtmp_base + ' Playpath=' + vid_src + auth_chunk + ' swfUrl=' + swf_url
def logout(self):
with requests.Session() as s:
s.cookies = self.cookies
response = s.get(LOGOUT_URL)
pattern = re.compile(r'You are now logged out.')
if not re.search(pattern, response):
raise ValueError("Logout was unsuccessful.")
def get_live_stream(self):
json_object = requests.get('http://epg.media.net.wwe.com/epg_small.json').json()
live_stream = NetworkItem()
for i in json_object['events']:
if i[' | live_media_state'] == 'MEDIA_ON':
live_stream.item_type = 'episode'
live_stream.show_name = i['show_name']
live_stream.name = 'LIVE: ' + i['title']
live_stream.title = i['title']
live_stream.media_id = i['media_playback_ids']['live']['content_id']
live_stream.icon = i['thumbnail_scenarios']['7']
live_stream.thumbnail = i['th | umbnail_scenarios']['35']
live_stream.fan_art = i['thumbnail_scenarios']['67']
live_stream.banner = i['thumbnail_scenarios']['63']
live_stream.description = i['big_blurb']
live_stream.air_date = i['dates_and_times']['air_date_gmt']
live_stream.duration = self.get_length_in_seconds(i['duration'])
live_stream.genre = i['genre']
break
return live_stream
def get_sections(self):
sections = []
ppv_response = requests.get('http://network.wwe.com/gen/content/tag/v1/section/ppv/jsonv4.json').json()
for i in ppv_response['list']:
if i['type'] == 'wwe-section':
temp = NetworkItem()
temp.item_type = 'section'
temp.title = i['title']
temp.name = i['title']
temp.icon = i['thumbnails']['124x70']['src']
temp.thumbnail = i['thumbnails']['400x224']['src']
if '1920x1080' in i['thumbnails']:
temp.fan_art = i['thumbnails']['1920x1080']['src']
else:
temp.fan_art = i['thumbnails']['1280x720']['src']
temp.media_id = i['key']
temp.air_date = i['userDate']
sections.append(temp)
show_response = requests.get('http://network.wwe.com/gen/content/tag/v1/section/shows/jsonv4.json').json()
for i in show_response['list']:
if i['type'] == 'wwe-section':
temp = NetworkItem()
temp.item_type = 'section'
temp.title = i['title']
temp.name = i['title']
temp.icon = i['thumbnails']['124x70']['src']
temp.thumbnail = i['thumbnails']['400x224']['src']
if '1920x1080' in i['thumbnails']:
temp.fan_art = i['thumbnails']['1920x1080']['src']
else:
temp.fan_art = i['thumbnails']['1280x720']['src']
temp.media_id = i['key']
temp.air_date = i['userDate']
sections.append(temp)
return sections
def get_recommended(self):
recommended = []
json_object = requests.get('http://network.wwe.com/gen/content/tag/v1/list/recommended/jsonv4.json').json()
for i in json_object['list']:
if i['type'] == 'wwe-asset' and all(r.name != i['headline'] for r in recommended):
temp = NetworkItem()
temp.item_type = "episode"
temp.show_name = i['show_name']
temp.name = i['headline']
temp.title = i['headline']
temp.description = i['notes']
temp.icon = i['thumbnails']['124x70']['src']
temp.thumbnail = i['thumbnails']['400x224']['src']
if '1920x1080' in i['thumbnails']:
temp.fan_art = i['thumbnails']['1920x1080']['src']
else:
temp.fan_art = i['thumbnails']['1280x720']['src']
|
LucidAi/nlcd | nlcd/wsgi.py | Python | mit | 221 | 0.004525 | # coding: utf-8
# Aut | hor: Vova Zaytsev <zaytsev@usc.edu>
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nlcd.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application( | )
|
frePPLe/frePPLe | freppledb/input/migrations/0020_buffer_owner.py | Python | agpl-3.0 | 1,109 | 0 | #
# Copyright (C) 2017 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# | (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the | GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("input", "0019_available")]
operations = [
migrations.RemoveField(model_name="buffer", name="lft"),
migrations.RemoveField(model_name="buffer", name="lvl"),
migrations.RemoveField(model_name="buffer", name="owner"),
migrations.RemoveField(model_name="buffer", name="rght"),
]
|
waltherg/PLoSPy | setup.py | Python | bsd-3-clause | 555 | 0 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
|
kargs = {}
kargs['name'] = 'PLoSPy'
kargs['version'] = ''
kargs['author'] = 'Georg R Walther'
kargs['author_email'] = 'contact@georg.io'
kargs['packages'] = ['plospy']
kargs['package_dir'] = {'plospy': 'plospy'}
kargs['url'] = 'http://georg.io'
kargs['license'] = 'BSD'
kargs['description'] = 'A library for handling PLoS data'
kargs['long_description'] = ''
kargs['test_suite'] = 'plospy.tests'
kargs['install_requires'] = ['BeautifulSo | up']
setup(**kargs)
|
CarlFK/wafer | wafer/schedule/south_migrations/0003_auto__add_day__add_field_slot_day__add_field_slot__order__chg_field_sl.py | Python | isc | 11,509 | 0.007386 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Day'
db.create_table(u'schedule_day', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal(u'schedule', ['Day'])
# Adding field 'Slot.day'
db.add_column(u'schedule_slot', 'day',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['schedule.Day'], null=True, blank=True),
keep_default=False)
# Adding field 'Slot._order'
db.add_column(u'schedule_slot', '_order',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Changing field 'Slot.end_time'
db.alter_column(u'schedule_slot', 'end_time', self.gf('django.db.models.fields.TimeField')(null=True))
# Changing field 'Slot.start_time'
db.alter_column(u'schedule_slot', 'start_time', self.gf('django.db.models.fields.TimeField')(null=True))
# Adding M2M table for field days on 'Venue'
m2m_table_name = db.shorten_name(u'schedule_venue_days')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('venue', models.ForeignKey(orm[u'schedule.venue'], null=False)),
('day', models.ForeignKey(orm[u'schedule.day'], null=False))
))
db.create_unique(m2m_table_name, ['venue_id', 'day_id'])
def backwards(self, orm):
# Deleting model 'Day'
db.delete_table(u'schedule_day')
# Deleting field 'Slot.day'
db.delete_column(u'schedule_slot', 'day_id')
# Deleting field 'Slot._order'
db.delete_column(u'schedule_slot', '_order')
# Changing field 'Slot.end_time'
db.alter_column(u'schedule_slot', 'end_time', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'Slot.start_time'
db.alter_column(u'schedule_slot', 'start_time', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Removing M2M table for field days on 'Venue'
db.delete_table(db.shorten_name(u'schedule_venue_days'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pages.file': {
'Meta': {'object_name': 'File'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'pages.page': {
'Meta': {'object_name': 'Page'},
'_content_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('markitup.fields.MarkupField', [], {'no_rendered_field': 'True'}),
'exclude_from_static': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'files': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['pages.File']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
| u'schedule.day': {
'Meta': {'ordering': "['date']", 'object_name': 'Day'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'schedule.scheduleitem': {
'Meta': {'object_na | me': 'ScheduleItem'},
'details': ('wafer.snippets.markdown_field.MarkdownTextField', [], {'blank': 'True', 'allow_html': 'False'}),
'details_html': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']", 'null': 'True', 'blank': 'True'}),
'slots': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['schedule.Slot']", 'symmetric |
EdDev/vdsm | lib/vdsm/v2v.py | Python | gpl-2.0 | 48,122 | 0.000062 | # Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
When importing a VM a thread start with a new process of virt-v2v.
The way to feedback the information on the progress and the status of the
process (ie job) is via getVdsStats() with the fields progress and status.
progress is a number which represent percentage of a single disk copy,
status is a way to feedback information on the job (init, error etc)
"""
from __future__ import absolute_import
from collections import namedtuple
from contextlib import closing, contextmanager
import errno
import io
import logging
import os
import re
import subprocess
import tarfile
import time
import threading
import xml.etree.ElementTree as ET
import zipfile
import libvirt
from vdsm.cmdutils import wrap_command
from vdsm.commands import execCmd, BUFFSIZE
from vdsm.common import | cmdutils
from vdsm.common.define import errCode, doneCode
from vdsm.common import response
from vdsm.common import zombiereaper
from vdsm.common.compat import CPopen
from vdsm.common.logutils import traceback
from vdsm.common.time import monotonic_time
from vdsm.constants import P_VDSM_LOG, P_VDSM_RUN, EXT_KVM_2_OVIRT
from vdsm import concurrent, libvirtconnection |
from vdsm import password
from vdsm.utils import terminating, NICENESS, IOCLASS
try:
import ovirt_imageio_common
except ImportError:
ovirt_imageio_common = None
_lock = threading.Lock()
_jobs = {}
_V2V_DIR = os.path.join(P_VDSM_RUN, 'v2v')
_LOG_DIR = os.path.join(P_VDSM_LOG, 'import')
_VIRT_V2V = cmdutils.CommandPath('virt-v2v', '/usr/bin/virt-v2v')
_SSH_AGENT = cmdutils.CommandPath('ssh-agent', '/usr/bin/ssh-agent')
_SSH_ADD = cmdutils.CommandPath('ssh-add', '/usr/bin/ssh-add')
_XEN_SSH_PROTOCOL = 'xen+ssh'
_VMWARE_PROTOCOL = 'vpx'
_KVM_PROTOCOL = 'qemu'
_SSH_AUTH_RE = '(SSH_AUTH_SOCK)=([^;]+).*;\nSSH_AGENT_PID=(\d+)'
_OVF_RESOURCE_CPU = 3
_OVF_RESOURCE_MEMORY = 4
_OVF_RESOURCE_NETWORK = 10
_QCOW2_COMPAT_SUPPORTED = ('0.10', '1.1')
# OVF Specification:
# https://www.iso.org/obp/ui/#iso:std:iso-iec:17203:ed-1:v1:en
_OVF_NS = 'http://schemas.dmtf.org/ovf/envelope/1'
_RASD_NS = 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/' \
'CIM_ResourceAllocationSettingData'
ImportProgress = namedtuple('ImportProgress',
['current_disk', 'disk_count', 'description'])
DiskProgress = namedtuple('DiskProgress', ['progress'])
class STATUS:
'''
STARTING: request granted and starting the import process
COPYING_DISK: copying disk in progress
ABORTED: user initiated aborted
FAILED: error during import process
DONE: convert process successfully finished
'''
STARTING = 'starting'
COPYING_DISK = 'copying_disk'
ABORTED = 'aborted'
FAILED = 'error'
DONE = 'done'
class V2VError(Exception):
''' Base class for v2v errors '''
err_name = 'unexpected' # TODO: use more specific error
class ClientError(Exception):
''' Base class for client error '''
err_name = 'unexpected'
class InvalidVMConfiguration(ValueError):
''' Unexpected error while parsing libvirt domain xml '''
class OutputParserError(V2VError):
''' Error while parsing virt-v2v output '''
class JobExistsError(ClientError):
''' Job already exists in _jobs collection '''
err_name = 'JobExistsError'
class VolumeError(ClientError):
''' Error preparing volume '''
class NoSuchJob(ClientError):
''' Job not exists in _jobs collection '''
err_name = 'NoSuchJob'
class JobNotDone(ClientError):
''' Import process still in progress '''
err_name = 'JobNotDone'
class NoSuchOvf(V2VError):
''' Ovf path is not exists in /var/run/vdsm/v2v/ '''
err_name = 'V2VNoSuchOvf'
class V2VProcessError(V2VError):
''' virt-v2v process had error in execution '''
class InvalidInputError(ClientError):
''' Invalid input received '''
def get_external_vms(uri, username, password, vm_names=None):
if vm_names is not None:
if not vm_names:
vm_names = None
else:
vm_names = frozenset(vm_names)
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.exception('error connecting to hypervisor')
return {'status': {'code': errCode['V2VConnection']['status']['code'],
'message': str(e)}}
with closing(conn):
vms = []
for vm in _list_domains(conn):
if vm_names is not None and vm.name() not in vm_names:
# Skip this VM.
continue
elif conn.getType() == "ESX" and _vm_has_snapshot(vm):
logging.error("vm %r has snapshots and therefore can not be "
"imported since snapshot conversion is not "
"supported for VMware", vm.name())
continue
_add_vm(conn, vms, vm)
return {'status': doneCode, 'vmList': vms}
def get_external_vm_names(uri, username, password):
try:
conn = libvirtconnection.open_connection(uri=uri,
username=username,
passwd=password)
except libvirt.libvirtError as e:
logging.exception('error connecting to hypervisor')
return response.error('V2VConnection', str(e))
with closing(conn):
vms = [vm.name() for vm in _list_domains(conn)]
return response.success(vmNames=vms)
def convert_external_vm(uri, username, password, vminfo, job_id, irs):
if uri.startswith(_XEN_SSH_PROTOCOL):
command = XenCommand(uri, vminfo, job_id, irs)
elif uri.startswith(_VMWARE_PROTOCOL):
command = LibvirtCommand(uri, username, password, vminfo, job_id,
irs)
elif uri.startswith(_KVM_PROTOCOL):
if ovirt_imageio_common is None:
raise V2VError('Unsupported protocol KVM, ovirt_imageio_common'
'package is needed for importing KVM images')
command = KVMCommand(uri, username, password, vminfo, job_id, irs)
else:
raise ClientError('Unknown protocol for Libvirt uri: %s', uri)
job = ImportVm(job_id, command)
job.start()
_add_job(job_id, job)
return {'status': doneCode}
def convert_ova(ova_path, vminfo, job_id, irs):
command = OvaCommand(ova_path, vminfo, job_id, irs)
job = ImportVm(job_id, command)
job.start()
_add_job(job_id, job)
return response.success()
def get_ova_info(ova_path):
ns = {'ovf': _OVF_NS, 'rasd': _RASD_NS}
try:
root = ET.fromstring(_read_ovf_from_ova(ova_path))
except ET.ParseError as e:
raise V2VError('Error reading ovf from ova, position: %r' % e.position)
vm = {}
_add_general_ovf_info(vm, root, ns, ova_path)
_add_disks_ovf_info(vm, root, ns)
_add_networks_ovf_info(vm, root, ns)
return response.success(vmList=vm)
def get_converted_vm(job_id):
try:
job = _get_job(job_id)
_validate_job_done(job)
ovf = _read_ovf(job_id)
except ClientError as e:
logging.info('Converted VM error %s', e)
return errCode[e.err_name]
except V2VError as e:
logging.error('Converted VM error |
davidgutierrez/HeartRatePatterns | Python/NMF.py | Python | gpl-3.0 | 3,873 | 0.00672 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 19 00:50:49 2017
Utils for calculating the NMF
@author: David Gutierrez
"""
from sklearn.decomposition import NMF
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.abspath("/home/scidb/HeartRatePatterns/Python"))
from LogisticRegresion import ajustLogisticRegression
def generateNMF(patients, survived, n_components=30):
"""Generates a NMF and gives a Logistic Regression trained,
the model, the table actor, the roc_auc and accurracy of the
Logistic Regresion.
Keyword arguments:
patients -- matrix with the heartbeats of the patients
survived -- list that indicates thet the row of patient survived
n_components -- number of components of the table
"""
nmf = NMF(n_components=n_components, random_state=0, alpha=.1, l1_ratio=0)
patients_nmf = nmf.fit_transform(patients)
m_train, m_test, l_train, l_test = train_test_split(patients_nmf,
survived,
test_size=0.2,
random_state=42)
result = ajustLogisticRegression(m_train, l_train, m_test, l_test)
predict_poba = result['model'].predict_proba(m_train)[:, 1]
result.update({'patients_test':m_test, 'nmf':nmf,
'patients_nmf':patients_nmf, 'predict_poba':predict_poba,
'survived_test':l_train})
return result
#from operator import itemgetter
from scipy.stats.stats import pearsonr
def find_pearson(value, patient, survived):
# pearsonList = []
result = -100
for i in range(value):
patientpear = patient[:, i]
pearson = pearsonr(patientpear, survived)
if pearson[0] > result:
result = pearson[0]
# pearsonList.append({'group':i,'p1':pearson[0],'p2':pearson[1]})
# sortedList = sorted(pearsonList, key=itemgetter('p1'), reverse=True)
return result
def plot_pearson(title,pearson):
leng = range(1, len(pears | on)+1)
maxperson = max(pearson)
indxperson = pearson.index(maxperson)
plt.subplot(111)
plt.plot(leng, pearson, lw=2)
plt.annotate('maximo ('+str(maxperson)+","+str(indxperson+2)+")",
xy=(indxperson, maxperson),
xytext=(indxperson+5, maxperson-0.02),
arrowprops=dict(facecolor='black', shrink=0.15))
plt.xlim([1, 100])
plt.title(title)
pl | t.xlabel('Valor de k en NMF')
plt.show()
def plot_error(title, pearson):
leng = range(2, len(pearson)+2)
plt.subplot(111)
plt.plot(leng, pearson, lw=2)
plt.title(title)
plt.xlabel('Valor de k en NMF')
plt.show()
def find_best_NMF(patients, survived):
fig_size = [16, 4]
plt.rcParams["figure.figsize"] = fig_size
result = []
old_err = None
for value in range(2, 100):
print(value,end=",")
diction = generateNMF(patients, survived, n_components=value)
err_new = diction['nmf'].reconstruction_err_
diff_err = None if old_err is None else old_err-err_new
old_err = err_new
# diction.update({'n_components':value})
result.append({'pearson':find_pearson(value, diction['patients_nmf'], survived),
'recostrucción error': err_new,
'diffErr':diff_err,
'accuracy':diction['accuracy'],
'roc_auc':diction['roc_auc']})
plot_pearson('pearson',[d['pearson'] for d in result])
plot_error('recostrucción error',
[d['recostrucción error'] for d in result])
plot_error('diferencia del Error', [d['diffErr'] for d in result])
plot_pearson('Presición', [d['accuracy'] for d in result])
plot_pearson('Area bajo la curva', [d['roc_auc'] for d in result])
|
jjgomera/pychemqt | lib/EoS/Cubic/PT.py | Python | gpl-3.0 | 7,861 | 0 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from scipy import roots
from scipy.constants import R
from lib.EoS.cubic import Cubic
# Table I in [1]_ and Table III, IV in [3]_
dat = {
98: (0.328, 0.450751),
46: (0.329, 0.516798),
47: (0.327, 0.487035),
2: (0.324, 0.455336),
3: (0.317, 0.561567),
22: (0.313, 0.554369),
4: (0.317, 0.648049),
23: (0.324, 0.661305),
65: (0.310, 0.664179),
6: (0.309, 0.678389),
5: (0.315, 0.683133),
24: (0.315, 0.696423),
8: (0.308, 0.746470),
7: (0.314, 0.741095),
10: (0.305, 0.801605),
11: (0.305, 0.868856),
12: (0.301, 0.918544),
13: (0.301, 0.982750),
14: (0.297, 1.021919),
15: (0.297, 1.080416),
16: (0.294, 1.115585),
17: (0.295, 1.179982),
18: (0.291, 1.188785),
21: (0.283, 1.297054),
90: (0.276, 1.276058),
92: (0.277, 1.409671),
49: (0.309, 0.707727),
48: (0.328, 0.535060),
51: (0.310, 0.797391),
50: (0.320, 0.583165),
62: (0.269, 0.689803),
38: (0.303, 0.665434),
# 842: (0.310, 0.859036), Quinoline
346: (0.300, 1.000087),
406: (0.305, 1.082667),
191: (0.297, 0.827417),
63: (0.283, 0.642740),
40: (0.311, 0.698911),
41: (0.306, 0.753893),
42: (0.305, 0.812845),
43: (0.301, 0.816962),
44: (0.300, 0.807023),
117: (0.274, 0.965347),
134: (0.292, 1.171714),
146: (0.302, 1.211304),
160: (0.305, 1.221182),
313: (0.308, 1.240459),
335: (0.330, 1.433586),
357: (0.301, 1.215380),
360: (0.308, 1.270267),
130: (0.258, 0.762043),
143: (0.295, 1.146553),
154: (0.329, 1.395151),
306: (0.292, 1.174746),
510: (0.291, 1.272986),
540: (0.292, 1.393678),
545: (0.290, 1.496554),
140: (0.283, 0.701112),
162: (0.308, 0.787322),
100: (0.314, 0.694866),
155: (0.296, 0.842965),
166: (0.295, 0.882502),
165: (0.294, 0.826046)}
# Table I in [2]_
PT2 = {
135: [-1.11765, -1.81779, 0.47892, 3],
129: [0.82082, -2.80514, 0, 0],
62: [0.60462, -2.56713, 0, 0],
22: [1.94572, -3.59956, -0.37410, 2],
49: [0.63199, -2.69935, 0, 0],
50: [0.66433, -2.39792, -0.00669, 10],
2: [0.32274, -1.47606, -0.02025, 6],
140: [0.19454, -1.45357, 0.32485, -0.5],
46: [0.09339, -1.26573, 0, 0],
1: [-0.72258, 1.08363, -1.4928e-6, -8]}
class PT(Cubic):
r"""Patel-Teja cubic equation of state implementation
.. math::
\begin{array}[t]{l}
P = \frac{RT}{V-b}-\frac{a}{V\left(V+b\right)+c\left(V-b\right)}\\
a = \Omega_a\frac{R^2T_c^2}{P_c}\alpha\\
b = \Omega_b\frac{RT_c}{P_c}\\
c = \Omega_c\frac{RT_c}{P_c}\\
\Omega_c = 1 - 3\zeta_c\\
\Omega_a = 3\zeta_c^2 + 3\left(1-2\zeta_c\right)\Omega_b + \Omega_b^2
+ 1 - 3\zeta_c\\
\end{array}
:math:`\Omega_b` is the smallest positive root or the equation:
.. math::
\Omega_b^3 + \left(2-3\zeta_c\right)\Omega_b^2 + 3\zeta_c^2\Omega_b -
\zeta_c^3 = 0
The paper give generation correlation for F and ζc, valid only for nonpolar
compounds.
.. math::
\begin{array}[t]{l}
F = 0.452413 + 1.30982\omega - 0.295937\omega^2\\
\zeta_c = 0.329032 - 0.076799\omega + 0.0211947\omega^2\\
\end{array}
In [1]_ and [3]_ there are values for these parameters for several
compounds.
The temperature dependence of alpha is defined in [2]_
.. math::
\alpha = 1 + c_1\left(T_r-1\right) + c_2\left(\sqrt{T_r}-1\right) +
c_3\left(T_r^N-1\right)
where c₁, c₂, c₃ and | N are compound specific parameters available for
several co | mpounds from [2]_. In compound with no parameters available use
the SRK original temperature dependence:
.. math::
\alpha^{0.5} = 1 + F\left(1-Tr^{0.5}\right)\\
Examples
--------
Example 4.3 from [4]_, Propane saturated at 300K
>>> from lib.mezcla import Mezcla
>>> mix = Mezcla(5, ids=[4], caudalMolar=1, fraccionMolar=[1])
>>> eq = PT(300, 9.9742e5, mix)
>>> '%0.1f' % (eq.Vl.ccmol)
'90.9'
>>> eq = PT(300, 42.477e5, mix)
>>> '%0.1f' % (eq.Vg.ccmol)
'88.1'
"""
__title__ = "Patel-Teja (1982)"
__status__ = "PT"
__doi__ = (
{
"autor": "Patel, N.C., Teja, A.S.",
"title": "A New Cubic Equation of State for Fluids and Fluid Mixtures",
"ref": "Chem. Eng. Sci. 37(3) (1982) 463-473",
"doi": "10.1016/0009-2509(82)80099-7"},
{
"autor": "Patel, N.C.",
"title": "Improvements of the Patel-Teja Equation of State",
"ref": "Int. J. Thermophysics 17(3) (1996) 673-682",
"doi": "10.1007/bf01441513"},
{
"autor": "Georgeton, G.K., Smith, R.L.Jr., Teja, A.S",
"title": "Application of Cubic Equations of State to Polar Fluids "
"and Fluid Mixtures",
"ref": "in Chao, K.C., Robinson, R.L. Equations of State. Theories "
"and Applications, 1985, ACS Svmposium 300, pp. 434-451",
"doi": ""},
{
"autor": "Poling, B.E, Prausnitz, J.M, O'Connell, J.P",
"title": "The Properties of Gases and Liquids 5th Edition",
"ref": "McGraw-Hill, New York, 2001",
"doi": ""})
def _cubicDefinition(self, T):
"""Definition of individual components coefficients"""
ai = []
bi = []
ci = []
for cmp in self.componente:
a, b, c = self._lib(cmp, T)
ai.append(a)
bi.append(b)
ci.append(c)
self.ai = ai
self.bi = bi
self.ci = ci
def _GEOS(self, xi):
am, bm, cm = self._mixture(None, xi, [self.ai, self.bi, self.ci])
delta = bm+cm
epsilon = -bm*cm
return am, bm, delta, epsilon
def _lib(self, cmp, T):
if cmp.id in dat:
# Use the compound specific parameters values
xic, f = dat[cmp.id]
else:
# Use the generalization correlations, Eq 20-21
f = 0.452413 + 1.30982*cmp.f_acent - 0.295937*cmp.f_acent**2
xic = 0.329032 - 0.076799*cmp.f_acent + 0.0211947*cmp.f_acent**2
# Eq 8
c = (1-3*xic)*R*cmp.Tc/cmp.Pc
# Eq 10
b = roots([1, 2-3*xic, 3*xic**2, -xic**3])
Bpositivos = []
for i in b:
if i > 0:
Bpositivos.append(i)
Omegab = min(Bpositivos)
b = Omegab*R*cmp.Tc/cmp.Pc
# Eq 9
Omegaa = 3*xic**2 + 3*(1-2*xic)*Omegab + Omegab**2 + 1 - 3*xic
if cmp.id in PT2:
# Using improved alpha correlation from [2]_
c1, c2, c3, n = PT2[cmp.id]
alfa = 1 + c1*(T/cmp.Tc-1) + c2*((T/cmp.Tc)**0.5-1) + \
c3*((T/cmp.Tc)**n-1)
else:
alfa = (1+f*(1-(T/cmp.Tc)**0.5))**2
a = Omegaa*alfa*R**2*cmp.Tc**2/cmp.Pc
return a, b, c
|
mbrondani/django-agenda | agenda/templatetags/next_previous.py | Python | gpl-3.0 | 2,732 | 0.007687 | import logging
from django import template
register = template.Library()
@register.tag(name="previous")
def do_previous(parser, token):
# previous in <list> from <object> as < | previous_object>
bits = token.contents.split()
if len(bits) != 7:
raise template.TemplateSyntaxError, "%r takes six arguments" % | bits[0]
return PreviousNode(bits[2], bits[4], bits[6])
def get_previous(object_list, object_current):
logging.debug('Finding previous of %s in %s' % (object_current, object_list))
assert object_list.contains(object_current)
index = object_list.index(object_current)
if index == 0:
return None
return object_list[index-1]
def get_next(object_list, object_current):
logging.debug('Finding next of %s in %s' % (object_current, object_list))
assert object_list.contains(object_current)
index = object_list.index(object_current)
if index == len(object_list)-1:
return None
return object_list[index+1]
class PreviousNode(template.Node):
def __init__(self, object_list, object_current, previous_name):
self.object_list = template.Variable(object_list)
self.object_current = template.Variable(object_current)
self.previous_name = previous_name
def render(self, context):
logging.debug('blaat')
logging.debug(self.object_list)
object_list = self.object_list.resolve(context)
object_current = self.object_current.resolve(context)
from django.db.models.query import QuerySet
logging.debug(object_list)
if type(QuerySet()) == type(object_list):
# This is efficient, but very experimental
if len(object_list.query.order_by) == 1:
if object_list.query.order_by[0][0] == '-':
date_field = object_list.query.order_by[0][1:]
prev_getter = getattr(object_current, 'get_previous_by_%s' % date_field, None)
if prev_getter:
object_previous = prev_getter()
else:
date_field = object_list.query.order_by[0]
prev_getter = getattr(object_current, 'get_next_by_%s' % date_field, None)
if prev_getter:
object_previous = prev_getter()
previous_id = get_previous(object_list.values_list('id', flat=True), object_current.id)
object_previous = object_list.get(id=previous_id)
else:
object_previous = get_previous(list(object_list), object_current)
context[self.previous_name] = object_previous
return '' |
tacaswell/pyOlog | setup.py | Python | mit | 640 | 0 | '''
Copyright (c) 2010 Brookhaven National Laboratory
All rights reserved. Use is subject to license terms and conditions.
Created on Jan 10, 2013
@author: shroffk
'''
from setuptools import setup
setup(name='pyOlog',
version='0.3.0',
description='Python Olog Client Lib',
author='Kunal Shroff',
author_email='shroffk@bnl.gov',
packages=['pyOlog', 'pyOlog.cli'],
requires=['requests (>=2.0.0)', 'urllib | 3 (>=1.7.1)'],
entry_points={'console_scripts': [
'olog | = pyOlog.cli:main'],
'gui_scripts': [
'ologgui = pyOlog.gui:main']}
)
|
kvas-it/cli-mock | tests/conftest.py | Python | mit | 1,537 | 0 | import os
import py
import pytest
@pytest.fixture()
def logfile(tmpdir):
return tmpdir.join('log.txt')
@pytest.fixture()
def testlog():
datadir = py.path.local(os.path.dirname(__file__)).join('data')
return datadir.join('log.txt')
@pytest.fixture()
def creplay(script_runner, tmpdir, testlog):
def creplay(*args, **kw):
if 'creplay_args' in kw:
creplay_args = kw['creplay_args']
del kw['creplay_args']
else:
creplay_args = ['-l', testlog.strpath]
creplay_args.extend(['--'] + list(args))
if 'cwd' not in kw:
kw['cwd'] = tmpdir.strpath
return script_runner.run('creplay', *creplay_args, **kw)
return creplay
@pytest.fixture()
def crecord(script_runner, tmpdir, logfile):
def crecord(*args, **kw):
if 'crecord_args' in kw:
crecord_args = kw['crecord_args']
del kw['crecord_args']
else:
crecord_args = ['-l', logfile.strpath]
crecord_args.extend(['--'] + list(args))
if 'cwd' not in kw:
kw['cwd'] = tmpdir.strpath
ret = script_run | ner.run('crecord', *crecord_args, **kw)
try:
print(logfile.read()) # For test debugging.
except IOError:
print('-- no logfile produced\n-- stderr:')
print(ret.stderr)
return ret
return crecord
@pytest.fixture | ()
def pyscript(tmpdir):
script = tmpdir.join('script.py')
script.write('')
script.chmod(0o777)
return script
|
xuru/pyvisdk | pyvisdk/do/virtual_serial_port_pipe_backing_info.py | Python | mit | 1,296 | 0.009259 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualSerialPortPipeBackingInfo(vim, *args, **kwargs):
| '''The data object defines information for backing a with a named pipe. You can
use a pipe to connect a virtual serial port to a host application or | to another
virtual machine on the host computer. This is useful for capturing debugging
information sent through the virtual serial port.'''
obj = vim.client.factory.create('ns0:VirtualSerialPortPipeBackingInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'endpoint', 'pipeName' ]
optional = [ 'noRxLoss', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
tykling/bulkvote | src/bulkvote/wsgi.py | Python | bsd-3-clause | 393 | 0 | """
WSGI config for bulkvote project.
It ex | poses the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_ws | gi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bulkvote.settings")
application = get_wsgi_application()
|
azumimuo/family-xbmc-addon | plugin.video.elysium/resources/lib/sources/disabled/mvzone_mv.py | Python | gpl-2.0 | 3,243 | 0.026827 | # -*- coding: utf-8 -*-
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gn | u.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['moviezone.ch']
self.base_link = 'http://moviezone.ch'
self.search_link = '/?s=%s+%s'
def movie(self, imdb, title, year):
s | elf.elysium_url = []
try:
self.elysium_url = []
title = cleantitle.getsearch(title)
cleanmovie = cleantitle.get(title)
query = self.search_link % (urllib.quote_plus(title),year)
query = urlparse.urljoin(self.base_link, query)
link = client.request(query)
r = client.parseDOM(link, 'div', attrs = {'class': 'boxinfo'})
for links in r:
url = client.parseDOM(links, 'a', ret='href')[0]
info = client.parseDOM(links, 'span', attrs = {'class': 'tt'})[0]
url = url.encode('utf-8')
info = info.encode('utf-8')
# print("MOVIEZONE LINKS", url,info)
if year in info:
if cleanmovie == cleantitle.get(info):
self.elysium_url.append(url)
# print("MOVIEZONE PASSED LINKS", self.elysium_url)
return self.elysium_url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
for movielink in self.elysium_url:
referer = movielink
link = client.request(movielink)
r = client.parseDOM(link, 'iframe', ret='src', attrs = {'class': 'movieframe'})
for item in r:
try:
iframe = item.encode('utf-8')
# print('MOVIEZONE IFRAMES',iframe)
redirect = client.request(iframe, timeout='10')
frame2 = client.parseDOM(redirect, 'iframe', ret='src')[0]
frame2 = frame2.encode('utf-8')
# print('MOVIEZONE IFRAMES2',frame2)
finalurl = client.request(frame2, timeout='5')
gv_frame = client.parseDOM(finalurl, 'source', ret='src')
for items in gv_frame:
url = items.encode('utf-8')
url = client.replaceHTMLCodes(url)
# print ('MOVIEZONE players', url)
quality = directstream.googletag(url)[0]['quality']
# print ('MOVIEZONE', quality, url)
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Mvzone', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
|
piontec/docker-enforcer | rules/rules.py | Python | gpl-3.0 | 96 | 0 | rules = [
{
" | name": "always false",
"rule": lam | bda container: False
}
]
|
pratikmallya/heat | heat/tests/engine/service/test_stack_resources.py | Python | apache-2.0 | 21,657 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
import six
from heat.common import exception
from heat.common import identifier
from heat.engine.clients.os import keystone
from heat.engine import dependencies
from heat.engine import resource as res
from heat.engine import service
from heat.engine import stack
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import fakes as test_fakes
from heat.tests import generic_resource as generic_rsrc
from heat.tests import utils
policy_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "alarming",
"Resources" : {
"WebServerScaleDownPolicy" : {
"Type" : "AWS::AutoScaling::ScalingPolicy",
"Properties" : {
"AdjustmentType" : "ChangeInCapacity",
"AutoScalingGroupName" : "",
"Cooldown" : "60",
"ScalingAdjustment" : "-1"
}
},
"Random" : {
"Type" : "OS::Heat::RandomString"
}
}
}
'''
class StackResourcesServiceTest(common.HeatTestCase):
def setUp(self):
super(StackResourcesServiceTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_resource_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.thread_group_mgr = tools.DummyThreadGroupManager()
self.eng.engine_id = 'engine-fake-uuid'
cfg.CONF.set_default('heat_stack_user_role', 'stack_user_role')
@mock.patch.object(stack.Stack, 'load')
def _test_describe_stack_resource(self, mock_load):
mock_load.return_value = self.stack
r = self.eng.describe_stack_resource(self.ctx, self.stack.identifier(),
'WebServer', with_attr=None)
self.assertIn('resource_identity', r)
| self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack | _identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('metadata', r)
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertIn('attributes', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_stack_resource_describe__test_stack')
def test_stack_resource_describe(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resource_describe_nonexist_stack(self, mock_get):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
mock_get.side_effect = exception.StackNotFound(stack_name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
mock_get.assert_called_once_with(self.ctx, non_exist_identifier)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resource_describe_nonexist_test_stack')
def test_stack_resource_describe_nonexist_resource(self, mock_load):
mock_load.return_value = self.stack
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.ResourceNotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@tools.stack_context('service_resource_describe_noncreated_test_stack',
create_res=False)
def test_stack_resource_describe_noncreated_resource(self):
self._test_describe_stack_resource()
@mock.patch.object(service.EngineService, '_authorize_stack_user')
@tools.stack_context('service_resource_describe_user_deny_test_stack')
def test_stack_resource_describe_stack_user_deny(self, mock_auth):
self.ctx.roles = [cfg.CONF.heat_stack_user_role]
mock_auth.return_value = False
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resource,
self.ctx, self.stack.identifier(), 'foo')
self.assertEqual(exception.Forbidden, ex.exc_info[0])
mock_auth.assert_called_once_with(self.ctx, mock.ANY, 'foo')
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_test_stack')
def test_stack_resources_describe(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(self.ctx,
self.stack.identifier(),
'WebServer')
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_identity', r)
self.assertIn('description', r)
self.assertIn('updated_time', r)
self.assertIn('stack_identity', r)
self.assertIsNotNone(r['stack_identity'])
self.assertIn('stack_name', r)
self.assertEqual(self.stack.name, r['stack_name'])
self.assertIn('resource_status', r)
self.assertIn('resource_status_reason', r)
self.assertIn('resource_type', r)
self.assertIn('physical_resource_id', r)
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
@tools.stack_context('service_resources_describe_no_filter_test_stack')
def test_stack_resources_describe_no_filter(self, mock_load):
mock_load.return_value = self.stack
resources = self.eng.describe_stack_resources(
self.ctx, self.stack.identifier(), None)
self.assertEqual(1, len(resources))
r = resources[0]
self.assertIn('resource_name', r)
self.assertEqual('WebServer', r['resource_name'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(service.EngineService, '_get_stack')
def test_stack_resources_describe_bad_lookup(self, mock_get):
mock_get.side_effect = TypeError
self.assertRaises(TypeError,
self.eng.describe_stack_resources,
self.ctx, None, 'WebServer')
mock_get.assert_called_once_with(self.ctx, None)
def test_stack_resources_describe_nonexist_stack(self):
non_exist_identifier = identifier.HeatIdentifier(
self.ctx.tenant_id, 'wibble',
'18d06e2e-44d3-4bef-9fbf-52480d604b02')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.describe_stack_resources,
self.ctx, non_exist_identifier, 'WebServer')
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
@tools.stack_context('find_phys_res_stack')
def test_find_physical_resource(self):
resources = s |
tushortz/pygeminfo | pygeminfo/example/stats_example.py | Python | mit | 1,132 | 0.020318 | from pygeminfo.gems import *
#from pygeminfo.gems import Stats
#! Import pygeminfo
#--> sample = pygeminfo.gems.Stats("ruby")
#Using the gem's Stats class
#Enter Rubygem's name as argument
sample = Stats("ruby")
print ("Gem Name: {}".format(sample.name()))
print ("Overall Gem download: {}".format(sample.total()))
print ("Latest version download: {}".format(sample.latest()))
print ("Latest version: {}".format(sample.latestversion()))
print ("Authors: {}".format(sample.authors()))
print ("Description: {}".format(sample.info()))
print ("Licenses : {}".format(sample.licenses()))
print ("Metadata : {}".format(sample.licenses() | ))
print ("Secure Hash Algorithm: {}".format(sample.sha()))
print ("Gem's URL: {}".format(sample.gemURL | ()))
print ("Project URL: {}".format(sample.projectURL()))
print ("Gem's homepage: {}".format(sample.homepage()))
print ("Wiki webpage: {}".format(sample.wikiURL()))
print ("Documentation webpage: {}".format(sample.docURL()))
print ("Mailing list website: {}".format(sample.mailURL()))
print ("Source code URL: {}".format(sample.sourceURL()))
print ("Bug tracker URL: {}".format(sample.bugURL()))
|
DGA-MI-SSI/YaCo | deps/swig-3.0.7/Examples/test-suite/python/smart_pointer_multi_typedef_runme.py | Python | gpl-3.0 | 188 | 0 | from smart_pointer_multi_typedef import *
f = Foo()
b = Bar(f)
s = Spam(b)
g = Grok(b) |
s.x = 3
if s.getx() != 3:
raise RuntimeError
g.x = 4
if g.getx() != 4:
raise Runt | imeError
|
Gatomlo/shareandplay | catalogue/migrations/0015_auto_20170415_1628.py | Python | gpl-3.0 | 495 | 0.00202 | # - | *- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 14:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0014_auto_20170414_0845'),
]
operations = [
migrations.AlterField(
model_name='jeux',
name='image',
field=models.ImageField(null=True, upload_to='photos_jeux/', verbose_name='Image'),
| ),
]
|
tttro/projektityokurssi | backend/lbd_backend/middleware/cors.py | Python | mit | 837 | 0.004785 | # -*- coding: UTF-8 -*-
__author__ = 'Aki Mäkinen'
from django.http import HttpResponse
_cors_dict = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "false",
"Access-Control-Allow-Headers": "LBD_LOGIN_HEADER, LBD_OAUTH_ID, Authorization, Content-Type, Accept",
"Access-Control-Allow-Methods": "GET, PUT, POST, DELETE, OPTIONS"
}
class CorsMiddleware(object):
def process_request(self, request):
if request.method == "OPTIONS" and "HTTP_ACCESS_CONTR | OL_REQUEST_METHOD" in request.META:
return HttpResponse()
else:
return None
def process_response(self, request, response):
if isinstance(response, HttpResponse):
| for header, value in _cors_dict.iteritems():
response[header] = value
return response
|
wteiken/letsencrypt | certbot/plugins/disco_test.py | Python | apache-2.0 | 10,343 | 0.000097 | """Tests for certbot.plugins.disco."""
import unittest
import mock
import pkg_resources
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import standalone
from certbot.plugins import webroot
EP_SA = pkg_resources.EntryPoint(
"sa", "certbot.plugins.standalone",
attrs=("Authenticator",),
dist=mock.MagicMock(key="certbot"))
EP_WR = pkg_resources.EntryPoint(
"wr", "certbot.plugins.webroot",
attrs=("Authenticator",),
dist=mock.MagicMock(key="certbot"))
class PluginEntryPointTest(unittest.TestCase):
"""Tests for certbot.plugins.disco.PluginEntryPoint."""
def setUp(self):
self.ep1 = pkg_resources.EntryPoint(
"ep1", "p1.ep1", dist=mock.MagicMock(key="p1"))
self.ep1prim = pkg_resources.EntryPoint(
"ep1", "p2.ep2", dist=mock.MagicMock(key="p2"))
# nested
self.ep2 = pkg_resources.EntryPoint(
"ep2", "p2.foo.ep2", dist=mock.MagicMock(key="p2"))
# project name != top-level package name
self.ep3 = pkg_resources.EntryPoint(
"ep3", "a.ep3", dist=mock.MagicMock(key="p3"))
from certbot.plugins.disco import PluginEntryPoint
self.plugin_ep = PluginEntryPoint(EP_SA)
def test_entry_point_to_plugin_name(self):
from certbot.plugins.disco import PluginEntryPoint
names = {
self.ep1: "p1:ep1",
self.ep1prim: "p2:ep1",
self.ep2: "p2:ep2",
self.ep3: "p3:ep3",
EP_SA: "sa",
}
for entry_point, name in names.iteritems():
self.assertEqual(
name, PluginEntryPoint.entry_point_to_plugin_name(entry_point))
def test_description(self):
self.assertEqual(
"Automatically use a temporary webserver",
self.plugin_ep.description)
def test_description_with_name(self):
self.plugin_ep.plugin_cls = mock.MagicMock(description="Desc")
self.assertEqual(
"Desc (sa)", self.plugin_ep.description_with_name)
def test_ifaces(self):
self.assertTrue(self.plugin_ep.ifaces((interfaces.IAuthenticator,)))
self.assertFalse(self.plugin_ep.ifaces((interfaces.IInstaller,)))
self.assertFalse(self.plugin_ep.ifaces((
interfaces.IInstaller, interfaces.IAuthenticator)))
def test__init__(self):
self.assertFalse(self.plugin_ep.initialized)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
self.assertTrue(self.plugin_ep.problem is None)
self.assertTrue(self.plugin_ep.entry_point is EP_SA)
self.assertEqual("sa", self.plugin_ep.name)
self.assertTrue(self.plugin_ep.plugin_cls is standalone.Authenticator)
def test_init(self):
config = mock.MagicMock()
plugin = self.plugin_ep.init(config=config)
self.assertTrue(self.plugin_ep.initialized)
self.assertTrue(plugin.config is config)
# memoize!
self.assertTrue(self.plugin_ep.init() is plugin)
self.assertTrue(plugin.config | is config)
# try to give different config
self.assertTrue(self.plugin_ep.init(123) is plugin)
self.assertTrue(plugin.config is config)
self.assertFalse(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_verify(self):
iface1 = mock.MagicMock(__name__="iface1")
iface2 = mock.MagicMock(__name__="iface2")
| iface3 = mock.MagicMock(__name__="iface3")
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin = mock.MagicMock()
exceptions = zope.interface.exceptions
with mock.patch("certbot.plugins."
"disco.zope.interface") as mock_zope:
mock_zope.exceptions = exceptions
def verify_object(iface, obj): # pylint: disable=missing-docstring
assert obj is plugin
assert iface is iface1 or iface is iface2 or iface is iface3
if iface is iface3:
raise mock_zope.exceptions.BrokenImplementation(None, None)
mock_zope.verify.verifyObject.side_effect = verify_object
self.assertTrue(self.plugin_ep.verify((iface1,)))
self.assertTrue(self.plugin_ep.verify((iface1, iface2)))
self.assertFalse(self.plugin_ep.verify((iface3,)))
self.assertFalse(self.plugin_ep.verify((iface1, iface3)))
def test_prepare(self):
config = mock.MagicMock()
self.plugin_ep.init(config=config)
self.plugin_ep.prepare()
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
# output doesn't matter that much, just test if it runs
str(self.plugin_ep)
def test_prepare_misconfigured(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.MisconfigurationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertTrue(self.plugin_ep.misconfigured)
self.assertTrue(isinstance(self.plugin_ep.problem,
errors.MisconfigurationError))
self.assertTrue(self.plugin_ep.available)
def test_prepare_no_installation(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.NoInstallationError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(),
errors.NoInstallationError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_prepare_generic_plugin_error(self):
plugin = mock.MagicMock()
plugin.prepare.side_effect = errors.PluginError
# pylint: disable=protected-access
self.plugin_ep._initialized = plugin
self.assertTrue(isinstance(self.plugin_ep.prepare(), errors.PluginError))
self.assertTrue(self.plugin_ep.prepared)
self.assertFalse(self.plugin_ep.misconfigured)
self.assertFalse(self.plugin_ep.available)
def test_repr(self):
self.assertEqual("PluginEntryPoint#sa", repr(self.plugin_ep))
class PluginsRegistryTest(unittest.TestCase):
"""Tests for certbot.plugins.disco.PluginsRegistry."""
def setUp(self):
from certbot.plugins.disco import PluginsRegistry
self.plugin_ep = mock.MagicMock(name="mock")
self.plugin_ep.__hash__.side_effect = TypeError
self.plugins = {"mock": self.plugin_ep}
self.reg = PluginsRegistry(self.plugins)
def test_find_all(self):
from certbot.plugins.disco import PluginsRegistry
with mock.patch("certbot.plugins.disco.pkg_resources") as mock_pkg:
mock_pkg.iter_entry_points.side_effect = [iter([EP_SA]),
iter([EP_WR])]
plugins = PluginsRegistry.find_all()
self.assertTrue(plugins["sa"].plugin_cls is standalone.Authenticator)
self.assertTrue(plugins["sa"].entry_point is EP_SA)
self.assertTrue(plugins["wr"].plugin_cls is webroot.Authenticator)
self.assertTrue(plugins["wr"].entry_point is EP_WR)
def test_getitem(self):
self.assertEqual(self.plugin_ep, self.reg["mock"])
def test_iter(self):
self.assertEqual(["mock"], list(self.reg))
def test_len(self):
self.assertEqual(1, len(self.reg))
self.plugins.clear()
self.assertEqual(0, len(self.reg))
def test_init(self):
self.plugin_ep.init.return_value = "baz"
self.assertEqual(["baz"], self.reg.init("bar"))
self.plugin_ep.init.assert_called_once_with |
allenlavoie/tensorflow | tensorflow/contrib/tpu/python/tpu/tpu_system_metadata.py | Python | apache-2.0 | 5,758 | 0.006947 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metadata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
_PINGING_MASTER_TIMEOUT_IN_MS = 60 * 1000 # 1 min
_RETRY_TIMES = 120
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins
_TPU_DEVICE_REG = re.compile(r'.*task:(\d+)/.*device:TPU:(\d+)$')
# _TPUSystemMetadata is used by TPUEstimator to hold TPU configuration,
# including num_cores and num_hosts.
_TPUSystemMetadata = collections.namedtuple('_TPUSystemMetadata', [
'num_cores',
'num_hosts',
'num_of_cores_per_host',
'topology',
'devices',
])
def _query_tpu_system_metadata(master_address, run_config,
query_topology=False):
"""Automatically detects the TPU system metadata in the system."""
tpu_core_count = 0
devices = []
device_dict = collections.defaultdict(list)
retry_count = 1
while True:
logging.info('Querying Tensorflow master (%s) for TPU system metadata.',
master_address)
try:
with ops.Graph().as_default():
with session_lib.Session(
master_address,
config=_get_session_config_with_timeout(
_PINGING_MASTER_TIMEOUT_IN_MS, run_config)) as sess:
devices = sess.list_devices()
for device in devices:
match = _TPU_DEVICE_REG.match(device.name)
if match:
host_id = match.group(1)
core_id = match.group(2)
device_dict[host_id].append(core_id)
tpu_core_count += 1
break
except errors.DeadlineExceededError:
msg = ('Failed to connect to the Tensorflow master. The TPU worker may '
'not be ready (still scheduling) or the Tensorflow master address '
'is incorrect: got (%s).' %
(master_address))
# TODO(xiejw): For local or grpc master we might not need retry logic
# here.
if retry_count <= _RETRY_TIMES:
logging.warning('%s', msg)
logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES)
retry_count += 1
else:
raise ValueError(msg)
num_of_cores_per_host = 0
if tpu_core_count:
num_cores_per_host_set = set(
[len(core_ids) for core_ids in device_dict.values()])
if len(num_cores_per_host_set) != 1:
raise RuntimeError(
'TPU cores on each host is not same. This should not happen!. '
'devices: {}'.format(devices))
num_of_cores_per_host = num_cores_per_host_set.pop()
topology = None
if query_topology:
if not tpu_core_count:
raise RuntimeError(
'Cannot find any TPU cores in the system (master address {}). '
'This usually means the master address is incorrect or the '
'TPU worker has some problems. Available devices: {}'.format(
master_address, devices))
topology = _obtain_topology(master_address, run_config)
metadata = _TPUSystemMetadata(
num_cores=tpu_core_count,
num_hosts=len(device_dict),
num_of_cores_per_host=num_of_cores_per_host,
topology=topology,
devices=devices)
if tpu_core_count:
logging.info('Found TPU system:')
logging.info('*** Num TPU Cores: %d', metadata.num_cores)
logging.info('*** Num TPU Workers: %d', metadata.num_hosts)
logging.info | ('*** Num TPU Cores Per Worker: %d',
metadata.num_of_cores_per_host)
for device in metadata.devices:
logging.info('*** Ava | ilable Device: %s', device)
else:
logging.info('Failed to find TPU: %s', metadata)
return metadata
def _obtain_topology(master_address, run_config):
try:
logging.info('Initializing TPU system (master: %s) to fetch topology '
'for model parallelism. This might take a while.',
master_address)
with ops.Graph().as_default():
session_config = _get_session_config_with_timeout(
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, run_config)
with session_lib.Session(
master_address, config=session_config) as sess:
topology = sess.run(tpu.initialize_system())
return topology
except errors.DeadlineExceededError:
raise ValueError(
'Fail to initialize TPU system with master (%s). '
'Please double check the TPU system is functional.' % (
master_address))
def _get_session_config_with_timeout(timeout_in_secs, run_config):
cluster_def = None
if run_config.session_config and run_config.session_config.cluster_def.job:
cluster_def = run_config.session_config.cluster_def
config = config_pb2.ConfigProto(
operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)
return config
|
ksteinfe/decodes | tests/test_voxel.py | Python | gpl-3.0 | 906 | 0.054084 | import unittest
import decodes.core as dc
from decodes.core import *
from decodes.extensions.voxel import *
class Tests(unittest.TestCase):
def test_constructor(self):
bounds = Bounds(center=Point(),dim_x=8,dim_y=8,dim_z=8)
vf = VoxelField(bounds,4,4,4)
vf.set(0,0,0,10.0)
vf.set(3,3,3,10.0)
self.assertEqual(vf.get(0,0,0),10.0)
self.assertEqual(vf.get(3,3,3),10.0)
self.assertEqual(vf.get(2,2,2 | ),0.0)
def test_bounds_and_cpt(self):
bounds = Bounds(center=Point(),dim_x=8,dim_y=8,dim_z=8)
vf = VoxelField(bounds,4,4,4)
self.ass | ertEqual(vf.dim_pixel,Vec(2,2,2))
self.assertEqual(vf.cpt_at(0,0,0),Point(-3,-3,-3))
vf.bounds = Bounds(center=Point(),dim_x=12,dim_y=12,dim_z=8)
self.assertEqual(vf.dim_pixel,Vec(3,3,2))
self.assertEqual(vf.cpt_at(0,0,0),Point(-4.5,-4.5,-3))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.