repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
khchine5/atelier | atelier/sphinxconf/dirtables.py | Python | bsd-2-clause | 6,048 | 0.001819 | # -*- coding: utf-8 -*-
# Copyright 2016 by Luc Saffre.
# License: BSD, see LICENSE for more details.
"""Defines the :rst:dir:`directory`, :rst:dir:`tickets_table` and
:rst:dir:`entry_intro` directives.
.. rst:directive:: directory
Inserts a table containing three columns 'title', 'author' and | 'date',
and one row for each `.rst` file found in this directory (except for
the calling file).
.. rst:directive:: tickets_table
This is used e.g. to build
http://lino-framework.org/tickets
.. rst:directive:: entry_intro
This doesn't yet work unfortunately.
"""
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import filter
| from builtins import object
import logging
logger = logging.getLogger(__name__)
from os.path import abspath, dirname, join
from docutils.parsers.rst import directives
from sphinx.util import docname_join
from sphinx.util.matching import patfilter
from atelier import rstgen
from .insert_input import InsertInputDirective
package_dir = abspath(dirname(__file__))
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2 import Environment
# from jinja2.sandbox import SandboxedEnvironment as Environment
template_dirs = [join(package_dir, 'templates')]
template_loader = FileSystemLoader(template_dirs)
template_env = Environment(loader=template_loader)
def render_entry(tplname, context):
try:
template = template_env.get_template(tplname)
except TemplateNotFound:
template = template_env.get_template('dirtables/entry.rst')
return template.render(context)
def rel_docname(a, b):
"""
>>> print(rel_docname('tickets/index','tickets/2'))
2
>>> print(rel_docname('tickets/index','/todo/index'))
/todo/index
"""
if b.startswith('/'):
return b
a1 = a.rsplit('/')[0] + '/'
if b.startswith(a1):
return b[len(a1):]
return b
class Entry(object):
def __init__(self, docname, title, meta):
self.docname = docname
self.title = title
self.meta = meta
@classmethod
def create(cls, env, docname):
return cls(rel_docname(env.docname, docname),
env.titles.get(docname),
env.metadata.get(docname))
class DirectoryTable(InsertInputDirective):
entry_class = Entry
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'filter': directives.unchanged,
'orderby': directives.unchanged,
}
def get_rst(self):
env = self.state.document.settings.env
entries = []
all_docnames = env.found_docs.copy()
found = set([env.docname]) # don't include myself
for entry in self.content:
if not entry:
continue
patname = docname_join(env.docname, entry)
docnames = sorted(patfilter(all_docnames, patname))
for docname in docnames:
if not docname in found:
found.add(docname)
entries.append(self.entry_class.create(env, docname))
expr = self.options.get('filter')
if expr:
def func(e):
return eval(expr, dict(e=e))
entries = list(filter(func, entries))
orderby = self.options.get('orderby')
if orderby:
def func(a):
return getattr(a, orderby, '')
entries = sorted(entries, key=func)
headers = self.get_headers()
rows = []
for e in entries:
rows.append(self.format_entry(e))
return rstgen.table(headers, rows)
def get_headers(self):
return ['title', 'author', 'date']
def format_entry(self, e):
cells = []
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(":doc:`%s`" % e.docname)
cells.append(str(e.meta.get('author', '')))
cells.append(str(e.meta.get('date', '')))
return cells
class TicketsTable(DirectoryTable):
def get_headers(self):
return ['title' + ' '*50, 'state', 'module', 'since', 'for']
def format_entry(self, e):
cells = []
# cells.append(e.docname)
cells.append(":doc:`%s`" % e.docname)
# text = ''.join([unicode(c) for c in e.title.children])
# cells.append(":doc:`%s <%s>`" % (text, e.docname))
cells.append(str(e.meta.get('state', '')))
# cells.append(unicode(e.meta.get('reporter', '')))
ref = e.meta.get('module', '')
if ref:
cells.append(":mod:`%s`" % ref)
else:
cells.append("(N/A)")
cells.append(str(e.meta.get('since', '')))
cells.append(str(e.meta.get('for', '')))
return cells
class EntryIntro(InsertInputDirective):
def get_rst(self):
env = self.state.document.settings.env
docname = env.docname
meta = env.process_metadata(docname, self.state.document)
# e = Entry.create(env, env.docname)
context = dict(this=self,
env=self.state.document.settings.env,
dir=dir,
document=self.state.document,
meta=meta)
template = 'dirtables/entry.rst'
return render_entry(template, context)
# from docutils import nodes
# from sphinx.roles import XRefRole
# RREFS
# class ReferingRefRole(XRefRole):
# def result_nodes(self, document, env, node, is_ref):
# # RREFS[]
# print("20140115 result_nodes", document, env, node, is_ref)
# return [node], []
def setup(app):
app.add_directive('directory', DirectoryTable)
app.add_directive('tickets_table', TicketsTable)
app.add_directive('entry_intro', EntryIntro)
# app.add_role(str('rref'), ReferingRefRole(
# lowercase=True,
# innernodeclass=nodes.emphasis,
# warn_dangling=True))
|
mathspace/django-two-factor-auth | two_factor/management/commands/two_factor_disable.py | Python | mit | 998 | 0 | from django.core.management.base import BaseCommand, CommandError
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User | = get_user_model()
from django_otp import devices_for_user
class Command(BaseCommand):
"""
Command for disabling two-factor authentication for certain users.
The command accepts any number of usernames, and will remove all OTP
devices for those users | .
Example usage::
manage.py disable bouke steve
"""
args = '<username username ...>'
help = 'Disables two-factor authentication for the given users'
def handle(self, *args, **options):
for username in args:
try:
user = User.objects.get_by_natural_key(username)
except User.DoesNotExist:
raise CommandError('User "%s" does not exist' % username)
for device in devices_for_user(user):
device.delete()
|
russell/fairy-slipper | fairy_slipper/cmd/wadl_to_swagger.py | Python | apache-2.0 | 37,276 | 0.000054 | # Copyright (c) 2015 Russell Sim <russell.sim@gmail.com>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from copy import copy
import json
import logging
import os
from os import path
import re
import textwrap
import xml.sax
from jinja2 import Environment
import prettytable
log = logging.getLogger(__name__)
TYPE_MAP = {
'string': 'string',
'xsd:string': 'string',
'csapi:string': 'string',
'xsd:int': 'integer',
'csapi:uuid': 'string',
'xsd:boolean': 'boolean',
'boolean': 'boolean',
'object': 'object',
'csapi:bool': 'boolean',
'xsd:bool': 'boolean',
'xsd:datetime': 'string',
'regexp': 'string',
'xsd:datetime': 'string',
'xsd:date': 'string',
'xsd:dict': 'object',
'dict': 'object',
'alarm': 'string',
'xsd:timestamp': 'string',
'xsd:char': 'string',
'list': 'array',
'csapi:flavorswithonlyidsnameslinks': 'string',
'csapi:imagestatus': 'string',
'csapi:imageswithonlyidsnameslinks': 'string',
'xsd:enum': 'string',
'xsd:anyuri': 'string',
'csapi:serverforupdate': 'string',
'capi:uuid': 'string',
'xsd:uuid': 'string',
'string': 'string',
'imag | eapi:string': 'string',
'imageapi:imagestatus': 'string',
'imageapi:uuid': 'string',
'csapi:uuid': 'string',
'csapi:serverforcreate': 'string',
'csapi:blockdevicemapping': 'string',
'csapi:serverswithonlyidsnameslinks': 'string',
'csapi:serverstatus': 'string',
'csapi:dict': 'object',
'imageforcreate': 'string',
'xsd:ip': 'string',
'xsd:base64binary': 'string',
'enum': 'array',
'xsd:float': 'number',
# TODO(arrsim) This array types also set the | items
# "tags": {
# "type": "array",
# "items": {
# "type": "string"
'xsd:list': 'array',
'array': 'array',
}
FORMAT_MAP = {
'xsd:anyURI': 'uri',
'xsd:datetime': 'date-time',
'xsd:ip': 'ipv4',
'regexp': 'regexp',
'xsd:timestamp': 'timestamp',
}
STYLE_MAP = {
'template': 'path',
'plain': 'body',
'query': 'query',
'header': 'header',
}
MIME_MAP = {
'json': 'application/json',
'txt': 'text/plain',
'xml': 'application/xml',
}
VERSION_RE = re.compile('v[0-9\.]+')
WHITESPACE_RE = re.compile('[\s]+', re.MULTILINE)
URL_TEMPLATE_RE = re.compile('{[^{}]+}')
CAPTION_RE = re.compile('[*`]*')
MARKUP_RE = re.compile('[.,:;)]+')
environment = Environment()
HTTP_REQUEST = """{{ method }} {{ url }} HTTP/1.1
{% for key, value in headers.items() -%}
{{ key }}: {{ value }}
{% endfor %}
"""
HTTP_REQUEST_TMPL = environment.from_string(HTTP_REQUEST)
HTTP_RESPONSE = """HTTP/1.1 {{ status_code }}
{% for key, value in headers.items() -%}
{{ key }}: {{ value }}
{% endfor %}
{{ body }}
"""
HTTP_RESPONSE_TMPL = environment.from_string(HTTP_RESPONSE)
def create_parameter(name, _in, description='',
type='xsd:string', required=True):
return {
"name": name,
"in": STYLE_MAP[_in],
"description": description,
"required": True if required == 'true' else False,
"type": TYPE_MAP[type.lower()],
"format": FORMAT_MAP.get(type, ''),
}
def join_url(parts):
"""Return a joined url without any duplicate slashes"""
return '/'.join(parts).replace('//', '/')
class SubParser(xml.sax.ContentHandler):
def __init__(self, parent):
# general state
self.tag_stack = []
self.attr_stack = []
self.parent = parent
self.result = None
self.kwargs = {}
def startElement(self, name, _attrs):
attrs = dict(_attrs)
self.tag_stack.append(name)
self.attr_stack.append(attrs)
return attrs
def endElement(self, name):
self.tag_stack.pop()
self.attr_stack.pop()
if not self.tag_stack:
self.parent.detach_subparser(self.result, **self.kwargs)
def search_stack_for(self, tag_name):
for tag, attrs in zip(reversed(self.tag_stack),
reversed(self.attr_stack)):
if tag == tag_name:
return attrs
def on_top_tag_stack(self, *args):
return self.tag_stack[-len(args):] == list(args)
class TableMixin(object):
def visit_table(self, attrs):
self.__table = prettytable.PrettyTable(hrules=prettytable.ALL)
self.__table.header = False
def depart_table(self):
self.content.append('\n\n')
self.content.append(str(self.__table))
self.content.append('\n\n')
def visit_caption(self, attrs):
self.content_stack.append([])
def depart_caption(self):
content = ''.join(self.content_stack.pop()).strip()
content = CAPTION_RE.sub('', content)
content = WHITESPACE_RE.sub(' ', content)
content = '**' + content + '**'
self.content.append(content)
def visit_th(self, attrs):
self.__table.header = True
def depart_th(self):
heading = self.content.pop().strip()
self.__table.field_names.append(heading)
self.__table.align[heading] = 'l'
self.__table.valign[heading] = 't'
self.__table.max_width[heading] = 80
def visit_tr(self, attrs):
self.__row = []
def visit_td(self, attrs):
self.content_stack.append([])
def depart_td(self):
self.__row.append(''.join(self.content_stack.pop()).strip())
def depart_tr(self):
if self.__row:
columns = len(self.__table.field_names)
self.__row.extend(['' for n in range(columns - len(self.__row))])
self.__table.add_row(self.__row)
class ParaParser(SubParser, TableMixin):
EMPHASIS = {
'bold': '**',
'italic': '*'
}
def __init__(self, parent):
super(ParaParser, self).__init__(parent)
self.content_stack = [[]]
self.current_emphasis = None
self.nesting = 0
self.no_space = False
self.fill_width = 67
self.wrapper = textwrap.TextWrapper(width=self.fill_width)
self.shortdesc = False
self.inline_markup_stack = []
self.hyperlink_end = False
self.litblockstr = ''
self.base_indent = ' '
self.markup_end = False
@property
def content(self):
return self.content_stack[-1]
def startElement(self, name, _attrs):
super(ParaParser, self).startElement(name, _attrs)
fn = getattr(self, 'visit_%s' % name, None)
if fn:
fn(dict(_attrs))
def endElement(self, name):
content = ''.join(self.content)
self.result = content
super(ParaParser, self).endElement(name)
fn = getattr(self, 'depart_%s' % name, None)
if fn:
fn()
def characters(self, content):
if not content:
return
# Fold up any white space into a single char
if not self.on_top_tag_stack('programlisting'):
content = WHITESPACE_RE.sub(' ', content)
if content == ' ':
return
if content[0] == '\n':
return
if self.content:
if self.content[-1].endswith('\n'):
content = ' ' * self.nesting + content.strip()
elif self.content[-1].endswith(' '):
content = content.strip()
elif (self.on_top_tag_stack('programlisting')):
if self.content[-1].endswith('<'):
pass
else:
if self.search_stack_for('itemizedlist') is None:
|
UK992/servo | components/script/dom/bindings/codegen/parser/tests/test_error_lineno.py | Python | mpl-2.0 | 907 | 0.005513 | import WebIDL
def WebIDLTest(parser, harness):
# Check that error messages put the '^' in the right place.
threw = False
input = """\
// This is a comment.
interface Foo {
};
/* This is also a comment. */
interface ?"""
try:
parser.parse(input)
results = parser | .finish()
except WebIDL.WebIDLError as e:
threw = True
lines = str(e).split('\n')
harness.check(len(lines), 3, 'Expected number of lines in error message')
harness.ok(lines[0].endswith('line 6:10'), 'First line of error should end with "line 6:10", but was "%s".' % lines[0])
harness.check(lines[1], 'interface ?', 'Second line of error message is the line which caused the error.')
harness.check(lines[2], ' | ' * (len('interface ?') - 1) + '^',
'Correct column pointer in error message.')
harness.ok(threw, "Should have thrown.")
|
srinath-chakravarthy/ovito | tests/scripts/test_suite/import_file.py | Python | gpl-3.0 | 1,809 | 0.007739 | import ovito
from ovito.io import import_file
test_data_dir = "../../files/"
node1 = import_file(test_data_dir + "LAMMPS/animation.dump.gz")
assert(len(ovito.dataset.scene_nodes) == 0)
import_file(test_data_dir + "CFG/fcc_coherent_twin.0.cfg")
import_file(test_data_dir + "CFG/shear.void.120.cfg")
import_file(test_data_dir + "Parcas/movie.0000000.parcas")
import_file(test_data_dir + "IMD/nw2.imd.gz")
import_file(test_data_dir + "FHI-aims/3_geometry.in.next_step")
import_file(test_data_dir + "GSD/test.gsd")
import_file(test_data_dir + "GSD/triblock.gsd")
import_file(test_data_dir + "PDB/SiShuffle.pdb")
import_file(test_data_dir + "PDB/trjconv_gromacs.pdb")
import_file(test_data_dir + "POSCAR/Ti_n1_PBE.n54_G7_V15.000.poscar.000")
import_file(test_data_dir + "NetCDF/C60_impact.nc")
import_file(test_data_dir + "NetCDF/sheared_aSi.nc")
import_file(test_data_dir + "VTK/mesh_test.vtk")
import_file(test_data_dir + "VTK/ThomsonTet_Gr1_rotmatNonRand_unstructGrid.vtk")
node = import_file(test_data_dir + "LAMMPS/multi_sequence_*.dump")
assert(ovito.dataset.anim.last_frame == 2)
node = import_file(test_data_dir + "LAMMPS/shear.void.dump.bin",
columns = ["Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z"])
try:
# This should generate an error:
node = import_file(test_data_dir + "LAMMPS/shear.void.dump.bin",
| columns = ["Particle Identifier", "Particle Type", "Position.X", "Position.Y", "Position.Z", "ExtraProperty"])
assert False
except RuntimeError:
pass
node = import_file(test_data_dir + "LAMMPS/animation*.dump")
assert(ovito.dataset.anim.last_frame == 0)
node = import_file(test_data_dir + "LAMMPS/animation*.dump", multiple_frames = | True)
assert(ovito.dataset.anim.last_frame == 10)
|
adriano-arce/Interview-Problems | Array-Problems/Find-Equilibrium/Find-Equilibrium.py | Python | mit | 778 | 0.002571 | def find_equilibrium(arr):
left_sum = 0
right_sum = sum(arr[1:]) # List slicing copies references to each value.
if left_sum | == right_sum and arr: # Don't return 0 if arr is empty.
return 0
for i in range(1, len(arr)):
left_sum += arr[i - 1]
right_sum -= arr[i]
if left_sum == right_sum:
return i
return -1
def main():
test_arrs = [
[],
[-7, 1, 5, 2, -4, 3, 0],
[1, 2, 3, 4, 5],
[3, 1, 2, 9, 4, 2],
[9, 2, -3, 1],
[7, | 6, -5, -8, 9]
]
for i, test_arr in enumerate(test_arrs):
print("Test Case #{}: {}".format(i + 1, test_arr))
print(" Equilibrium Index: {}".format(find_equilibrium(test_arr)))
if __name__ == "__main__":
main() |
mgood7123/UPM | Sources/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/GLUT/shader_test.py | Python | gpl-3.0 | 5,525 | 0.012489 | #! /usr/bin/env python
'''Tests rendering using shader objects from core GL or extensions
Uses the:
Lighthouse 3D Tutorial toon shader
http://www.lighthouse3d.com/opengl/glsl/index.php?toon2
By way of:
http://www.pygame.org/wiki/GLSLExample
'''
import OpenGL
OpenGL.ERROR_ON_COPY = True
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
# PyOpenGL 3.0.1 introduces this convenience module...
from OpenGL.GL.shaders import *
import time, sys
program = None
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
if not glUseProgram:
print 'Missing Shader Objects!'
sys.exit(1)
global program
program = compileProgram(
compileShader('''
varying vec3 normal;
void main() {
normal = gl_NormalMatrix * gl_Normal;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
''',GL_VERTEX_SHADER),
compileShader('''
varying vec3 normal;
void main() {
float intensity;
vec4 color;
vec3 n = normalize(normal);
vec3 l = normalize(gl_LightSource[0].position).xyz;
// quantize to 5 steps (0, .25, .5, .75 and 1)
intensity = (floor(dot(l, n) * 4.0) + 1.0)/4.0;
color = vec4(intensity*1.0, intensity*0.5, intensity*0.5,
intensity*1.0);
gl_FragColor = color;
}
''',GL_FRAGMENT_SHADER),)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The main drawing function.
def DrawGLScene():
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
# Move Left 1.5 units and into the screen 6.0 units.
glTranslatef(-1.5, 0.0, -6.0)
if program:
glUseProgram(program)
glutSolidSphere(1.0,32,32)
glTranslate( 1,0,2 )
glutSolidCube( 1.0 )
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
# If escape is pressed, kill everything.
if args[0] == '\x1b':
sys.exit()
def main():
global window
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(sys.argv)
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("Jeff Molofee's GL Code Tutorial ... NeHe '99")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(DrawGLScene)
|
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(keyPressed)
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
# Print message to console, and kic | k off the main to get it rolling.
if __name__ == "__main__":
print "Hit ESC key to quit."
main()
|
drufat/pybindcpp | pybindcpp/api.py | Python | gpl-3.0 | 790 | 0.002532 | import ctypes as ct
class Box(ct.Structure):
_fields_ = [
('tid', ct.c_size_t),
('ptr', ct.c_void_p),
('deleter', ct.CFUNCTYPE(None, ct.c_void_p)),
]
class TypeSystem(ct.Structure):
_fields_ = [
('type_counter', ct.c_size_t),
('add_type', ct.CFUNCTYPE(ct.c_size_t, ct.c_size_t, ct.c_char_p, ct.POINTER(ct.c_size_t), ct.c_size_t)),
('add_caller', ct.CFUNCTYPE(None, ct.c_size_t, Box)),
('add_callback', ct.CFUNCTYPE(None, ct.c_size_t, ct.c_size_t)),
('pre_init', ct.CFUNCTYPE(None)),
('post_init', ct.CFUNCTYPE(None)),
|
('add_box', ct.CFUNCT | YPE(None, ct.py_object, ct.c_char_p, Box)),
('import_func', ct.CFUNCTYPE(None, ct.c_char_p, ct.c_char_p, ct.c_size_t, ct.POINTER(Box))),
]
|
erinspace/osf.io | osf_tests/test_guid_auto_include.py | Python | apache-2.0 | 7,146 | 0.001539 | from django.utils import timezone
import pytest
from django_bulk_update.helper import bulk_update
from django.db.models import DateTimeField
from osf_tests.factories import UserFactory, PreprintFactory, NodeFactory
@pytest.mark.django_db
class TestGuidAutoInclude:
guid_factories = [
UserFactory,
PreprintFactory,
NodeFactory
]
@pytest.mark.parametrize('Factory', guid_factories)
def test_filter_object(self, Factory):
obj = Factory()
assert '__guids' in str(obj._meta.model.objects.filter(id=obj.id).query), 'Guids were not included in filter query for {}'.format(obj._meta.model.__name__)
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_all(self, Factory, django_assert_num_queries):
for _ in range(0, 5):
UserFactory()
with django_assert_num_queries(1):
wut = Factory._meta.model.objects.all()
for x in wut:
assert x._id is not None, 'Guid was None'
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_filter(self, Factory, django_assert_num_queries):
objects = []
for _ in range(0, 5):
objects.append(Factory())
new_ids = [o.id for o in objects]
with django_assert_num_queries(1):
wut = Factory._meta.model.objects.filter(id__in=new_ids)
for x in wut:
assert x._id is not None, 'Guid was None'
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_filter_order_by(self, Factory, django_assert_num_queries):
objects = []
for _ in range(0, 5):
objects.append(Factory())
new_ids = [o.id for o in objects]
with django_assert_num_queries(1):
wut = Factory._meta.model.objects.filter(id__in=new_ids).order_by('id')
for x in wut:
assert x._id is not None, 'Guid was None'
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_values(self, Factory, django_assert_num_queries):
objects = []
for _ in range(0, 5):
objects.append(Factory())
with django_assert_num_queries(1):
wut = Factory._meta.model.objects.values('id')
| for x in wut:
assert len(x) == 1, 'Too many keys in values'
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_exclude(self, Factory, django_assert_num_queries):
objects = []
for _ in range(0, 5):
objects.append(Factory())
try:
dtfield = [x.name for x in ob | jects[0]._meta.get_fields() if isinstance(x, DateTimeField)][0]
except IndexError:
pytest.skip('Thing doesn\'t have a DateTimeField')
with django_assert_num_queries(1):
wut = Factory._meta.model.objects.exclude(**{dtfield: timezone.now()})
for x in wut:
assert x._id is not None, 'Guid was None'
@pytest.mark.parametrize('Factory', guid_factories)
def test_update_objects(self, Factory):
objects = []
for _ in range(0, 5):
objects.append(Factory())
new_ids = [o.id for o in objects]
try:
dtfield = [x.name for x in objects[0]._meta.get_fields() if isinstance(x, DateTimeField)][0]
except IndexError:
pytest.skip('Thing doesn\'t have a DateTimeField')
qs = objects[0]._meta.model.objects.filter(id__in=new_ids)
assert len(qs) > 0, 'No results returned'
try:
qs.update(**{dtfield: timezone.now()})
except Exception as ex:
pytest.fail('Queryset update failed for {} with exception {}'.format(Factory._meta.model.__name__, ex))
@pytest.mark.parametrize('Factory', guid_factories)
def test_update_on_objects_filtered_by_guids(self, Factory):
objects = []
for _ in range(0, 5):
objects.append(Factory())
new__ids = [o._id for o in objects]
try:
dtfield = [x.name for x in objects[0]._meta.get_fields() if isinstance(x, DateTimeField)][0]
except IndexError:
pytest.skip('Thing doesn\'t have a DateTimeField')
qs = objects[0]._meta.model.objects.filter(guids___id__in=new__ids)
assert len(qs) > 0, 'No results returned'
try:
qs.update(**{dtfield: timezone.now()})
except Exception as ex:
pytest.fail('Queryset update failed for {} with exception {}'.format(Factory._meta.model.__name__, ex))
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_related_manager(self, Factory, django_assert_num_queries):
thing_with_contributors = Factory()
if not hasattr(thing_with_contributors, 'contributors'):
pytest.skip('Thing must have contributors')
try:
with django_assert_num_queries(1):
[x._id for x in thing_with_contributors.contributors.all()]
except Exception as ex:
pytest.fail('Related manager failed for {} with exception {}'.format(Factory._meta.model.__name__, ex))
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_count_objects(self, Factory, django_assert_num_queries):
objects = []
for _ in range(0, 5):
objects.append(Factory())
new_ids = [o.id for o in objects]
with django_assert_num_queries(1):
qs = objects[0]._meta.model.objects.filter(id__in=new_ids)
count = qs.count()
assert count == len(objects)
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_bulk_create_objects(self, Factory, django_assert_num_queries):
objects = []
Model = Factory._meta.model
kwargs = {}
if Factory == PreprintFactory:
# Don't try to save preprints on build when neither the subject nor provider have been saved
kwargs['finish'] = False
for _ in range(0, 5):
objects.append(Factory.build(**kwargs))
with django_assert_num_queries(1):
things = Model.objects.bulk_create(objects)
assert len(things) == len(objects)
@pytest.mark.parametrize('Factory', guid_factories)
@pytest.mark.django_assert_num_queries
def test_bulk_update_objects(self, Factory, django_assert_num_queries):
objects = []
ids = range(0, 5)
for id in ids:
objects.append(Factory())
try:
dtfield = [x.name for x in objects[0]._meta.get_fields() if isinstance(x, DateTimeField)][0]
except IndexError:
pytest.skip('Thing doesn\'t have a DateTimeField')
for obj in objects:
setattr(obj, dtfield, timezone.now())
with django_assert_num_queries(1):
bulk_update(objects)
|
foua-pps/atrain_match | atrain_match/utils/runutils.py | Python | gpl-3.0 | 9,928 | 0.000806 | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2019 atrain_match developers
#
# This file is part of atrain_match.
#
# atrain_match is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# atrain_match is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with atrain_match. If not, see <http://www.gnu.org/licenses/>.
"""
Utilities for running matching
"""
import re
import time
import numpy as np
import os
import logging
logger = logging.getLogger(__name__)
def read_config_info():
import os
from configparser import ConfigParser
CONF = ConfigParser()
from atrain_match.config import ATRAIN_MATCH_CONFIG_PATH
from atrain_match.config import ATRAIN_MATCH_CONFIG_FILE
config_file = os.path.join(ATRAIN_MATCH_CONFIG_PATH, ATRAIN_MATCH_CONFIG_FILE)
if not os.path.isfile(config_file):
raise IOError("Couldn't find config file %s." % (config_file))
CONF.read(config_file)
AM_PATHS = {}
for option, value in CONF.items('files', raw=True):
AM_PATHS[option] = value
SETTINGS = {}
for name, value in CONF.items('general', raw=True):
name = name.upper()
value = value.strip()
while ' ' in value:
value = value.replace(' ', '')
values = value.split(',')
if name in ['MIN_OPTICAL_DEPTH']:
value_ = [np.float(val_i) for val_i in values]
elif name in ["COMPILE_STATISTICS_TRUTH", "PLOT_MODES",
"PLOT_TYPES", "CTTH_TYPES",
'SATELLITES', 'YEARS', 'MONTHS']:
value_ = values
elif name in ['CNN_PCKL_PATH']:
value_ = values[0]
elif len(values) == 1 and 'true' in values[0].lower():
value_ = True
elif len(values) == 1 and 'false' in values[0].lower():
value_ = False
elif len(values) == 1 and re.match(r"\d+.*\d*", values[0]):
value_ = np.float(values[0])
SETTINGS[name.upper()] = value_
if (SETTINGS['COMPILE_RESULTS_SEPARATELY_FOR_SINGLE_LAYERS_ETC'] or
SETTINGS['CALCULATE_DETECTION_HEIGHT_FROM_5KM_DATA']):
logger.info("Setting ALSO_USE_5KM_FILES = True as "
"COMPILE_RESULTS_SEPARATELY_FOR_SINGLE_LAYERS_ETC = True or "
"CALCULATE_DETECTION_HEIGHT_FROM_5KM_DATA = True")
SETTINGS['ALSO_USE_5KM_FILES'] = True # 5km data is required also for 1km processing
SETTINGS['sec_timeThr'] = SETTINGS['MINUTES_TIMETHR']*60.0
SETTINGS['sec_timeThr_synop'] = SETTINGS['MINUTES_TIMETHR_SYNOP']*60.0
SETTINGS['SAT_ORBIT_DURATION'] = SETTINGS['SAT_ORBIT_DURATION_MINUTES']*60.0
return AM_PATHS, SETTINGS
def unzip_file(filename):
"""Unzip the file if file is bzipped = ending with 'bz2'"""
import tempfile
import bz2
import gzip
if filename.endswith('.bz2'):
bz2file = bz2.BZ2File(filename)
# tmpfilename = tempfile.mktemp()
tmpdir = tempfile.mkdtemp()
tmpfilename = os.path.join(tmpdir,
os.path.basename(filename).strip('.bz2'))
try:
ofpt = open(tmpfilename, 'wb')
ofpt.write(bz2file.read())
ofpt.close()
except IOError:
import traceback
traceback.print_exc()
logger.info("Failed to read bzipped file %s", str(filename))
os.remove(tmpfilename)
return None
return tmpfilename
elif filename.endswith('.gz'):
# tmpfilename = tempfile.mktemp()
tmpdir = tempfile.mkdtemp()
tmpfilename = os.path.join(tmpdir,
os.path.basename(filename).strip('.gz'))
with gzip.open(filename, 'rb') as f:
try:
ofpt = open(tmpfilename, 'wb')
ofpt.write(f.read())
ofpt.close()
except IOError:
import traceback
traceback.print_exc()
logger.info("Failed to read bzipped file %s", str(filename))
os.remove(tmpfilename)
return None
return tmpfilename
return None
def parse_scene(filename):
"""
Parse scene string (e.g. 'noaa19_20100110_1045_04767') and return
(satname, `datetime.datetime`, orbit).
Examples:
>>> parse_scene('noaa19_20100110_1045_04767')
('noaa19', datetime.datetime(2010, 1, 10, 10, 45), 4767)
>>> parse_scene('noaa19_20100110_1045_04767.okay')
('noaa19', datetime.datetime(2010, 1, 10, 10, 45), 4767)
Leading directories are removed:
>>> parse_scene('some/dir/noaa19_20100110_1045_04767')
('noaa19', datetime.datetime(2010, 1, 10, 10, 45), 4767)
"""
from datetime import datetime
import re
filename = os.path.basename(filename)
if not filename:
raise ValueError("No file %r" % filename)
match = re.match(r'(\w+)_([0-9]{8})_([0-9]{4})_([0-9]+)', filename)
if not match:
raise ValueError("Couldn't parse \"okay\" file %r" % filename)
satname, date_s, time_s, orbit_s = match.groups()
_datetime = datetime.strptime(date_s + time_s, '%Y%m%d%H%M')
orbit = int(orbit_s)
return satname, _ | datetime, orbit
def parse_scenesfile_v2014(filename):
"""
Parse pps file =S_NWC_CT_{satellite}_{orbit}_%Y%m%dT%H%M%S?Z_*.h5 or .nc
"""
from datetime import datet | ime
import re
filename = os.path.basename(filename)
if not filename:
raise ValueError("No file %r" % filename)
match = re.match(r"S_NWC_.+_([^_]+)_\d+_(\d+)T(\d\d\d\d\d\d).+", filename)
if not match:
raise ValueError("Couldn't parse pps file %r" % filename)
satname, date_s, time_s = match.groups()
_datetime = datetime.strptime(date_s + time_s, '%Y%m%d%H%M%S')
return satname, _datetime
def parse_scenesfile_cci(filename):
"""
Parse cci file: 20080613002200-ESACCI-L2_CLOUD-CLD_PRODUCTS-IMAGERGAC-NOAA18-fv1.0.nc
OR
Parse cci file: 20190713002200-ESACCI-L2_CLOUD-CLD_PRODUCTS-SEVIRI-MSG4-fv1.0.nc
"""
from datetime import datetime
import re
filename = os.path.basename(filename)
if not filename:
raise ValueError("No file %r" % filename)
# CCI data
if "IMAGERGAC" in filename:
match = re.match(
r"(\d\d\d\d\d\d\d\d)(\d\d\d\d).+IMAGERGAC-([^-]+)-", filename)
# CCI+ data
elif "SEVIRI" in filename:
match = re.match(
r"(\d\d\d\d\d\d\d\d)(\d\d\d\d).+SEVIRI-([^-]+)-", filename)
else:
raise ValueError("atrain_match not able to handle %r files" % filename)
if not match:
raise ValueError("Couldn't parse cci file %r" % filename)
date_s, time_s, satname = match.groups()
_datetime = datetime.strptime(date_s + time_s, '%Y%m%d%H%M')
return satname.lower(), _datetime
def parse_scenesfile_maia(filename):
"""
Parse maia file: # viiCT_npp_DB_20120817_S035411_E035535_DES_N_La052_Lo-027_00001.h5
"""
from datetime import datetime
import re
filename = os.path.basename(filename)
if not filename:
raise ValueError("No file %r" % filename)
match = re.match(r"\S\S\SCT_(\S\S\S)_\S\S_(\d+)_S(\d+)_", filename)
if not match:
raise ValueError("Couldn't parse maia file %r" % filename)
satname, date_s, time_s = match.groups()
_datetime = datetime.strptime(date_s + time_s, '%Y%m%d%H%M%S')
return satname, _datetime
def parse_scenesfile_reshaped(filename):
"""
Parse maia file: # 5km_noaa18_20090328_1855_99999_caliop_imager_match.h5
"""
from datetime import datetime
import re
filename = os.path.basename(filename)
if not filename:
raise ValueError("No file %r" % filename)
match = re.match(r"\d+km_( |
danielyule/naya | naya/json.py | Python | mit | 24,396 | 0.001435 | from io import StringIO
class TOKEN_TYPE:
OPERATOR = 0
STRING = 1
NUMBER = 2
BOOLEAN = 3
NULL = 4
class __TOKENIZER_STATE:
WHITESPACE = 0
INTEGER_0 = 1
INTEGER_SIGN = 2
INTEGER = 3
INTEGER_EXP = 4
INTEGER_EXP_0 = 5
FLOATING_POINT_0 = 6
FLOATING_POINT = 8
STRING = 9
STRING_ESCAPE = 10
STRING_END = 11
TRUE_1 = 12
TRUE_2 = 13
TRUE_3 = 14
FALSE_1 = 15
FALSE_2 = 16
FALSE_3 = 17
FALSE_4 = 18
NULL_1 = 19
NULL_2 = 20
NULL_3 = 21
UNICODE_1 = 22
UNICODE_2 = 23
UNICODE_3 = 24
UNICODE_4 = 25
def tokenize(stream):
def is_delimiter(char):
return char.isspace() or char in "{}[]:,"
token = []
charcode = 0
completed = False
now_token = ""
def process_char(char, charcode):
nonlocal token, completed, now_token
advance = True
add_char = False
next_state = state
if state == __TOKENIZER_STATE.WHITESPACE:
if char == "{":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "{")
elif char == "}":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "}")
elif char == "[":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "[")
elif char == "]":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, "]")
elif char == ",":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ",")
elif char == ":":
completed = True
now_token = (TOKEN_TYPE.OPERATOR, ":")
elif char == "\"":
next_state = __TOKENIZER_STATE.STRING
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
elif char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char == "-":
next_state = __TOKENIZER_STATE.INTEGER_SIGN
add_char = True
elif char == "f":
next_state = __TOKENIZER_STATE.FALSE_1
elif char == "t":
next_state = __TOKENIZER_STATE.TRUE_1
elif char == "n":
next_state = __TOKENIZER_STATE.NULL_1
elif not char.isspace():
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER:
if char in "0123456789":
add_char = True
elif char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, int("".join(token)))
advance = False
else:
raise ValueError("A number must contain only digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_0:
if char == ".":
next_state = __TOKENIZER_STATE.FLOATING_POINT_0
add_char = True
elif char | == "e" or char == 'E':
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.NUMBER, 0)
advance = False
else:
raise ValueError("A 0 must be fo | llowed by a '.' or a 'e'. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_SIGN:
if char == "0":
next_state = __TOKENIZER_STATE.INTEGER_0
add_char = True
elif char in "123456789":
next_state = __TOKENIZER_STATE.INTEGER
add_char = True
else:
raise ValueError("A - must be followed by a digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP_0:
if char == "+" or char == "-" or char in "0123456789":
next_state = __TOKENIZER_STATE.INTEGER_EXP
add_char = True
else:
raise ValueError("An e in a number must be followed by a '+', '-' or digit. Got '{0}'".format(char))
elif state == __TOKENIZER_STATE.INTEGER_EXP:
if char in "0123456789":
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number exponent must consist only of digits. Got '{}'".format(char))
elif state == __TOKENIZER_STATE.FLOATING_POINT:
if char in "0123456789":
add_char = True
elif char == "e" or char == "E":
next_state = __TOKENIZER_STATE.INTEGER_EXP_0
add_char = True
elif is_delimiter(char):
completed = True
now_token = (TOKEN_TYPE.NUMBER, float("".join(token)))
next_state = __TOKENIZER_STATE.WHITESPACE
advance = False
else:
raise ValueError("A number must include only digits")
elif state == __TOKENIZER_STATE.FLOATING_POINT_0:
if char in "0123456789":
next_state = __TOKENIZER_STATE.FLOATING_POINT
add_char = True
else:
raise ValueError("A number with a decimal point must be followed by a fractional part")
elif state == __TOKENIZER_STATE.FALSE_1:
if char == "a":
next_state = __TOKENIZER_STATE.FALSE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_2:
if char == "l":
next_state = __TOKENIZER_STATE.FALSE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_3:
if char == "s":
next_state = __TOKENIZER_STATE.FALSE_4
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.FALSE_4:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, False)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_1:
if char == "r":
next_state = __TOKENIZER_STATE.TRUE_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_2:
if char == "u":
next_state = __TOKENIZER_STATE.TRUE_3
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.TRUE_3:
if char == "e":
next_state = __TOKENIZER_STATE.WHITESPACE
completed = True
now_token = (TOKEN_TYPE.BOOLEAN, True)
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_1:
if char == "u":
next_state = __TOKENIZER_STATE.NULL_2
else:
raise ValueError("Invalid JSON character: '{0}'".format(char))
elif state == __TOKENIZER_STATE.NULL_2:
if char == "l":
next_state = __TOKENIZER_STATE.NULL_3
else:
|
valentin-krasontovitsch/ansible | lib/ansible/playbook/__init__.py | Python | gpl-3.0 | 4,859 | 0.00247 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins.loader import get_all_plugin_loaders
from ansible.utils.display import Display
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
self._loader = loader
self._file_name = None
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
| pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager, vars=None):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
| self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
cur_basedir = self._loader.get_basedir()
self._loader.set_basedir(self._basedir)
self._file_name = file_name
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(self._basedir, obj.subdir)
if os.path.isdir(to_bytes(plugin_path)):
obj.add_directory(plugin_path)
try:
ds = self._loader.load_from_file(os.path.basename(file_name))
except UnicodeDecodeError as e:
raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
# check for errors and restore the basedir in case this error is caught and handled
if not ds:
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("Empty playbook, nothing to do", obj=ds)
elif not isinstance(ds, list):
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("A playbook must be a list of plays, got a %s instead" % type(ds), obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if any(action in entry for action in ('import_playbook', 'include')):
if 'include' in entry:
display.deprecated("'include' for playbook includes. You should use 'import_playbook' instead", version="2.12")
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
which = entry.get('import_playbook', entry.get('include', entry))
display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
self._entries.append(entry_obj)
# we're done, so restore the old basedir in the loader
self._loader.set_basedir(cur_basedir)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
|
bioidiap/bob.ip.facedetect | doc/plot/detect_faces_tinyface.py | Python | gpl-3.0 | 1,124 | 0.00089 | import matplotlib.pyplot as plt
from bob.io.base import load
from bob.io.base.test_utils import datafile
from bob.io.image import imshow
from bob.ip.facedetect.tinyface import TinyFacesDetector
from matplotlib.patches import Rectangle
# load colored test image
color_image = load(datafile("test_image_mult | i_face.png", "bob.ip.facedetect"))
is_mxnet_available = True
try:
import mxnet
except Exception:
is_mxnet_available = False
if not is_mxnet_available:
imshow(color_image)
else:
# detect all faces
detector = TinyFacesDetector()
detections = detector.detect(color_image)
imshow(color_image)
plt.axis("off")
for annotations in detections:
topleft = annotations["topleft"]
bottomri | ght = annotations["bottomright"]
size = bottomright[0] - topleft[0], bottomright[1] - topleft[1]
# draw bounding boxes
plt.gca().add_patch(
Rectangle(
topleft[::-1],
size[1],
size[0],
edgecolor="b",
facecolor="none",
linewidth=2,
)
) |
ultmaster/eoj3 | backstage/server/views.py | Python | mit | 8,676 | 0.010489 | import logging
import multiprocessing
import threading
import traceback
from datetime import datetime
from django.contrib import messages
from django.core.cache import cache
from django.db import transaction
from django.db.models import F
from django.shortcuts import HttpResponseRedirect, reverse, get_object_or_404
from django.views.generic import FormView
from django.views.generic import View
from django.views.generic.list import ListView
from django_redis import get_redis_connection
from dispatcher.manage import update_token, list_spj, upload_spj
from dispatcher.models import Server, ServerProblemStatus
from dispatcher.semaphore import Semaphore
from polygon.rejudge import rejudge_submission_set
from problem.models import Problem, SpecialProgram
from problem.tasks import upload_problem_to_judge_server
from submission.models import Submission
from submission.util import SubmissionStatus
from .forms import ServerEditForm, ServerUpdateTokenForm
from ..base_views import BaseCreateView, BaseUpdateView, BaseBackstageMixin
logger = logging.getLogger(__name__)
class ServerCreate(BaseCreateView):
form_class = ServerEditForm
template_name = 'backstage/server/server_add.jinja2'
def get_redirect_url(self, instance):
return reverse('backstage:server')
class ServerUpdate(BaseUpdateView):
form_class = ServerEditForm
template_name = 'backstage/server/server_edit.jinja2'
queryset = Server.objects.all()
def get_redirect_url(self, instance):
return reverse('backstage:server')
class ServerList(BaseBackstageMixin, ListView):
template_name = 'backstage/server/server.jinja2'
queryset = Server.objects.all().order_by("-last_seen_time")
context_object_name = 'server_list'
def get_context_data(self, **kwargs): # pylint: disable=arguments-differ
data = super(ServerList, self).get_context_data(**kwargs)
redis_server = get_redis_connection("judge")
sem = Semaphore(redis_server)
sem.exists_or_init()
try:
data['semaphore_available_count'] = sem.available_count
data['semaphore_available_keys'] = redis_server.lrange(sem.available_key, 0,
sem.available_count) # 1 more actually
data['semaphore_available_keys'] = list(map(lambda s: s.decode(), data['semaphore_available_keys']))
data['semaphore_grabbed_keys'] = {}
for key, tt in redis_server.hgetall(sem.grabbed_key).items():
data['semaphore_grabbed_keys'][key.decode()] = sem.current_time - float(tt.decode())
data['server_synchronize_status_detail'] = cache.get('server_synchronize_status_detail', '')
data['server_synchronize_status'] = cache.get('server_synchronize_status', 0)
data['semaphore_ok'] = True
except:
pass
data['crashed_submi | ssion_count'] = Submission.objects.filter(status=SubmissionStatus.SYSTEM_ERROR).count()
return data
class ServerRefresh(BaseBackstageMixin, View):
def post(self, request, pk):
server = Server.objects.get(pk=pk)
server.serverproblemstatus_set.all().delete()
messages.success(request, "Server status has been refreshed.")
return HttpResponseRedirect(reverse('backstage:server'))
class ServerEnableOrDisable(BaseBackstageMixin, View):
def post(self, request, pk):
server | = Server.objects.get(pk=pk)
server.enabled = not server.enabled
server.save(update_fields=['enabled'])
try:
Semaphore(get_redis_connection("judge")).reset()
except:
pass
return HttpResponseRedirect(reverse('backstage:server'))
class ServerDelete(BaseBackstageMixin, View):
def post(self, request, pk):
# server = Server.objects.get(pk=pk)
# server.delete()
# messages.success(request, "Server <strong>%s</strong> is successfully removed." % server.name)
return HttpResponseRedirect(reverse('backstage:server'))
class ServerUpdateToken(BaseBackstageMixin, FormView):
form_class = ServerUpdateTokenForm
template_name = 'backstage/server/server_edit.jinja2'
def post(self, request, *args, **kwargs):
form = self.get_form()
if form.is_valid():
server = Server.objects.get(pk=kwargs.get('pk'))
if update_token(server, form.cleaned_data['new_password']):
messages.success(request, 'Token update succeeded.')
return HttpResponseRedirect(reverse('backstage:server'))
messages.error(request, 'Update token failed. Please recheck your server status.')
return HttpResponseRedirect(reverse('backstage:server'))
def sync_and_update_status(server, problem):
status, _ = ServerProblemStatus.objects.get_or_create(server=server, problem=problem)
try:
upload_problem_to_judge_server(problem, server)
status.last_status = ''
except:
status.last_status = traceback.format_exc()
status.save()
def upload_spj_exception_wrapper(server, program):
res = upload_spj(server, program)
if res["status"] != "received":
logger.error("%s", res)
def synchronize_func(server, problems):
with multiprocessing.Pool(server.concurrency) as p:
p.starmap(sync_and_update_status, [(server, problem) for problem in problems])
server.last_synchronize_time = datetime.now()
server.save(update_fields=['last_synchronize_time'])
def synchronize_func_v3(server, programs):
with multiprocessing.Pool(server.concurrency) as p:
p.starmap(upload_spj_exception_wrapper, [(server, program) for program in programs])
class ServerSynchronize(BaseBackstageMixin, View):
def post(self, request, pk):
server = get_object_or_404(Server, pk=pk)
if server.version >= 3:
programs = SpecialProgram.objects.all()
if request.GET.get("t") != "all":
exists = list_spj(server)
programs = programs.exclude(fingerprint__in=exists)
threading.Thread(target=synchronize_func_v3, args=(server, list(programs))).start()
else:
if request.GET.get("t") == "all":
problems = Problem.objects.all()
elif request.GET.get('t', '').isdigit():
problems = Problem.objects.filter(pk=request.GET['t'])
else:
problem_ids = server.serverproblemstatus_set.select_related("problem"). \
filter(last_synchronize__lt=F('problem__update_time')).values_list("problem_id", flat=True)
problems = Problem.objects.filter(id__in=problem_ids)
threading.Thread(target=synchronize_func, args=(server, list(problems))).start()
return HttpResponseRedirect(reverse('backstage:server'))
class ServerProblemStatusList(BaseBackstageMixin, ListView):
template_name = 'backstage/server/server_problem_status.jinja2'
context_object_name = 'server_problem_status_list'
def get_queryset(self):
NEVER = datetime(1990, 1, 1)
self.server = get_object_or_404(Server, pk=self.kwargs["pk"])
if self.server.version >= 3:
queryset = SpecialProgram.objects.all()
exists = list_spj(self.server)
for q in queryset:
q.exists = q.fingerprint in exists
return queryset
else:
with transaction.atomic():
does_not_exist = set(Problem.objects.values_list("id", flat=True)) - \
set(self.server.serverproblemstatus_set.all().values_list("problem_id", flat=True))
for problem in does_not_exist:
ServerProblemStatus.objects.create(server=self.server, problem_id=problem)
self.server.serverproblemstatus_set.filter(problem_id__in=does_not_exist).update(last_synchronize=NEVER)
return self.server.serverproblemstatus_set.select_related("problem").only("server_id", "problem_id",
"problem__title", "problem__alias",
"problem__update_time", "last_status",
"last_synchronize").all()
def get_context_data(self, **kwargs): # pylint: disable=arguments-differ
data = super().get_context_data(**kwargs)
data['server'] = self.server
return data
class ServerSemaphoreReset(BaseBackstageMixin, View):
def post(self, request, *args, **kwargs):
try:
Semaphore( |
pkmital/CADL | session-5/libs/celeb_vaegan.py | Python | apache-2.0 | 2,738 | 0.001096 | """
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from .utils import download
from skimage.transform import resize as imresize
def celeb_vaegan_download():
"""Download a pretrained celeb vae/gan network."""
# Load the model and labels
model = download('https://s3.amazonaws.com/cadl/models/celeb.vaegan.tfmodel')
labels = download('https://s3.amazonaws.com/cadl/celeb-align/list_attr_celeba.txt')
return model, labels
def get_celeb_vaegan_model():
"""Get a pretrained model.
Returns
-------
net : dict
{
'graph_def': tf.GraphDef
The graph definition
'labels': list
List of different possible attributes from celeb
'attributes': np.ndarray
One hot encoding of the attributes per image
[n_els x n_labels]
'preprocess': function
Preprocess function
}
"""
# Download the trained net
model, labels = celeb_vaegan_download()
# Parse the ids and synsets
txt = open(labels).readlines()
n_els = int(txt[0].strip())
labels = txt[1].strip().split()
n_labels = len(labels)
attributes = np.zeros((n_els, n_labels), dtype=bool)
for i, txt_i in enumerate(txt[2:]):
attributes[i] = (np.array(txt_i.strip().split()[1:]).astype(int) > 0)
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
| 'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/iss | ues/582')
net = {
'graph_def': graph_def,
'labels': labels,
'attributes': attributes,
'preprocess': preprocess,
}
return net
def preprocess(img, crop_factor=0.8):
"""Replicate the preprocessing we did on the VAE/GAN.
This model used a crop_factor of 0.8 and crop size of [100, 100, 3].
"""
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
r, c, *d = cropped.shape
if crop_factor < 1.0:
amt = (1 - crop_factor) / 2
h, w = int(c * amt), int(r * amt)
cropped = cropped[h:-h, w:-w]
rsz = imresize(cropped, (100, 100), preserve_range=False)
return rsz
|
viniciuschiele/flask-webapi | tests/test_status.py | Python | mit | 1,233 | 0 | from flask_webapi import status
from unittest import TestCase
class TestStatus(TestCase):
def test_is_informational(self):
self.assertFalse(status.is_informational(99))
self.assertFalse(status.is_informational(200))
for i in range(100, 199):
self.assertTrue(status.is_informational(i))
def test_is_success(self):
self.assertFalse(status.is_success(199))
self.assertFalse(status.is_success(300))
for i in range(200, 299):
self.assertTrue(status.is_success(i))
def test_is_redirect(self):
self.assertFalse(status.is_redirect(299))
self.assertFalse(status.is_redirect(400))
for i in range(300, 399):
self.assertTrue(status.is_redirect(i))
def test_is_client_error(self):
self.assertFalse(status.is_client_error(399))
self.assertFalse(status.is_client_error(500))
for i in range(400, 499):
self.assertTrue(status.is_client_error(i))
def test_is_server_error(self):
self.assertFalse(status.is_server_error(499))
self.a | ssertFals | e(status.is_server_error(600))
for i in range(500, 599):
self.assertTrue(status.is_server_error(i))
|
anton-golubkov/Garland | src/ipf/ipftype/ipfsmoothingtype.py | Python | lgpl-2.1 | 1,155 | 0.012121 | #------------------------------------------------------------- | ------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.or | g/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
import ipfdicttype
import cv
class IPFSmoothingType(ipfdicttype.IPFDictType):
""" Smoothing type dict
"""
name = "IPFSmoothingType"
dictionary = {"Blur" : cv.CV_BLUR,
"Gaussian" : cv.CV_GAUSSIAN,
"Median" : cv.CV_MEDIAN,
# "Bilateral" : cv.CV_BILATERAL, # TODO: Need understand algorithm and use param3 and param4
}
def __init__(self):
pass
@classmethod
def default_value(cls):
""" Return default value for this type """
return cls.dictionary["Blur"]
|
makaimc/txt2react | core/models.py | Python | mit | 292 | 0.003425 | from django.contrib.auth.models import User
from django.db import m | odels
from .utils import create_slug
class BaseModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
class Meta( | ):
abstract = True
|
tectronics/passwdmanager | setup.py | Python | gpl-3.0 | 280 | 0.035714 | from distutils.core import setup
import glob
import py2exe
setup(windows=[{"script":"passwdManager.py","icon_resources": | [(1,"icons/pm.ico")]},"upgrade.py"],
data_files=[("data",glob.glob("data/*.*")),
| ("icons",glob.glob("icons/*.png"))]
)
|
IAlwaysBeCoding/mrq | tests/fixtures/config-retry1.py | Python | mit | 105 | 0 |
TASKS = {
"tests.tasks.general.Retry": {
"max_retries" | : 1,
"retry_del | ay": 1
}
}
|
zhreshold/mxnet | tests/python/unittest/common.py | Python | apache-2.0 | 13,748 | 0.003782 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys, os, logging, functools
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
import pytest
from tempfile import TemporaryDirectory
import locale
xfail_when_nonstandard_decimal_separator = pytest.mark.xfail(
locale.localeconv()["decimal_point"] != ".",
reason="Some operators break when the decimal separator is set to anything other than \".\". "
"These operators should be rewritten to utilize the new FFI. Please see #18097 for more "
"information."
)
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
| np.random.seed()
seed = np.random.randint(0, np.ii | nfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def _assert_raise_cuxx_version_not_satisfied(min_version, cfg):
def less_than(version_left, version_right):
"""Compares two version strings in the format num(.[num])*"""
if not version_left or not version_right:
return False
left = version_left.split(".")
right = version_right.split(".")
# 0 pad shortest version - e.g.
# less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9")
longest = max(len(left), len(right))
left.extend([0] * (longest - len(left)))
right.extend([0] * (longest - len(right)))
# compare each of the version components
for l, r in zip(left, right):
if l == r:
continue
return int(l) < int(r)
return False
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
cuxx_off = os.getenv(cfg['TEST_OFF_ENV_VAR']) == 'true'
cuxx_env_version = os.getenv(cfg['VERSION_ENV_VAR'], None if cuxx_off else cfg['DEFAULT_VERSION'])
cuxx_test_disabled = cuxx_off or less_than(cuxx_env_version, min_version)
if not cuxx_test_disabled or mx.context.current_context().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def assert_raises_cudnn_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDNN_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDNN_VERSION',
'DEFAULT_VERSION': '7.3.1'
})
def assert_raises_cuda_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDA_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDA_VERSION',
'DEFAULT_VERSION': '10.1'
})
def with_seed(seed=None):
"""
A decorator for test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
pytest --verbose --capture=no <test_module_name.py>::<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
logger = default_logger()
# 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logger.log(log_level, test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at INFO le |
atodorov/anaconda | pyanaconda/modules/payloads/source/hmc/initialization.py | Python | gpl-2.0 | 2,546 | 0.000786 | #
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Pub | lic License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
| from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.util import execWithRedirect
from pyanaconda.payload.utils import unmount
from pyanaconda.modules.common.errors.payload import SourceSetupError
from pyanaconda.modules.common.task import Task
log = get_module_logger(__name__)
__all__ = ["TearDownHMCSourceTask", "SetUpHMCSourceTask"]
class TearDownHMCSourceTask(Task):
"""Task to teardown the SE/HMC source."""
def __init__(self, target_mount):
super().__init__()
self._target_mount = target_mount
@property
def name(self):
return "Tear down the SE/HMC source"
def run(self):
"""Tear down the installation source."""
unmount(self._target_mount)
class SetUpHMCSourceTask(Task):
"""Task to set up the SE/HMC source."""
def __init__(self, target_mount):
super().__init__()
self._target_mount = target_mount
@property
def name(self):
return "Set up the SE/HMC source"
def run(self):
"""Set up the installation source."""
log.debug("Trying to mount the content of HMC media drive.")
# Test the SE/HMC file access.
if execWithRedirect("/usr/sbin/lshmc", []):
raise SourceSetupError("The content of HMC media drive couldn't be accessed.")
# Mount the device.
if execWithRedirect("/usr/bin/hmcdrvfs", [self._target_mount]):
raise SourceSetupError("The content of HMC media drive couldn't be mounted.")
log.debug("We are ready to use the HMC at %s.", self._target_mount)
|
DBuildService/atomic-reactor | tests/plugins/test_store_metadata.py | Python | bsd-3-clause | 28,357 | 0.000846 | """
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import os
import json
from datetime import datetime, timedelta
from copy import deepcopy
from textwrap import dedent
from flexmock import flexmock
from osbs.api import OSBS
import osbs.conf
from osbs.exceptions import OsbsResponseException
from atomic_reactor.constants import (PLUGIN_KOJI_UPLOAD_PLUGIN_KEY,
| PLUGIN_VERIFY_MEDIA_KEY,
| PLUGIN_FETCH_SOURCES_KEY)
from atomic_reactor.build import BuildResult
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import ExitPluginsRunner, PluginFailedException
from atomic_reactor.plugins.pre_add_help import AddHelpPlugin
from atomic_reactor.plugins.post_rpmqa import PostBuildRPMqaPlugin
from atomic_reactor.plugins.exit_store_metadata_in_osv3 import StoreMetadataInOSv3Plugin
from atomic_reactor.plugins.pre_reactor_config import (ReactorConfigPlugin,
WORKSPACE_CONF_KEY,
ReactorConfig)
from atomic_reactor.util import LazyGit, ManifestDigest, df_parser, DockerfileImages
import pytest
from tests.constants import (LOCALHOST_REGISTRY, DOCKER0_REGISTRY, TEST_IMAGE, TEST_IMAGE_NAME,
INPUT_IMAGE, MOCK_SOURCE)
from tests.util import add_koji_map_in_workflow, is_string_type
DIGEST1 = "sha256:1da9b9e1c6bf6ab40f1627d76e2ad58e9b2be14351ef4ff1ed3eb4a156138189"
DIGEST2 = "sha256:0000000000000000000000000000000000000000000000000000000000000000"
DIGEST_NOT_USED = "not-used"
pytestmark = pytest.mark.usefixtures('user_params')
class Y(object):
pass
class X(object):
image_id = INPUT_IMAGE
source = Y()
source.dockerfile_path = None
source.path = None
dockerfile_images = DockerfileImages(['qwe:asd'])
dockerfile_images['qwe:asd'] = "sha256:spamneggs"
def parent_images_to_str(self):
result = {}
for key, val in self.dockerfile_images.items():
if val:
result[key.to_str()] = val.to_str()
else:
result[key.to_str()] = 'sha256:bacon'
return result
class XBeforeDockerfile(object):
def __init__(self):
self.image_id = INPUT_IMAGE
self.source = Y()
self.source.dockerfile_path = None
self.source.path = None
self.dockerfile_images = DockerfileImages([])
self.df_dir = None
def parent_images_to_str(self):
return {}
@property
def df_path(self):
raise AttributeError("Dockerfile has not yet been generated")
def prepare(docker_registries=None, before_dockerfile=False):
if docker_registries is None:
docker_registries = (LOCALHOST_REGISTRY, DOCKER0_REGISTRY,)
def update_annotations_on_build(build_id, annotations):
pass
def update_labels_on_build(build_id, labels):
pass
new_environ = deepcopy(os.environ)
new_environ["BUILD"] = dedent('''\
{
"metadata": {
"name": "asd",
"namespace": "namespace"
}
}
''')
flexmock(OSBS, update_annotations_on_build=update_annotations_on_build)
flexmock(OSBS, update_labels_on_build=update_labels_on_build)
config_kwargs = {
'namespace': 'namespace',
'verify_ssl': True,
'openshift_url': 'http://example.com/',
'use_auth': True,
'conf_file': None,
'build_json_dir': None
}
(flexmock(osbs.conf.Configuration)
.should_call("__init__")
.with_args(**config_kwargs))
flexmock(os)
os.should_receive("environ").and_return(new_environ) # pylint: disable=no-member
workflow = DockerBuildWorkflow(source=MOCK_SOURCE)
openshift_map = {
'url': 'http://example.com/',
'insecure': False,
'auth': {'enable': True},
}
workflow.plugin_workspace[ReactorConfigPlugin.key] = {}
workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\
ReactorConfig({'version': 1, 'openshift': openshift_map})
add_koji_map_in_workflow(workflow, hub_url='/', root_url='')
workflow.tag_conf.add_floating_image(TEST_IMAGE)
workflow.tag_conf.add_primary_image("namespace/image:version-release")
workflow.tag_conf.add_unique_image("namespace/image:asd123")
for docker_registry in docker_registries:
r = workflow.push_conf.add_docker_registry(docker_registry)
r.digests[TEST_IMAGE_NAME] = ManifestDigest(v1=DIGEST_NOT_USED, v2=DIGEST1)
r.digests["namespace/image:asd123"] = ManifestDigest(v1=DIGEST_NOT_USED,
v2=DIGEST2)
if before_dockerfile:
setattr(workflow, 'builder', XBeforeDockerfile())
setattr(workflow.builder, 'base_image_inspect', {})
else:
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'base_image_inspect', {'Id': '01234567'})
workflow.build_logs = [
"a", "b",
]
workflow.source.lg = LazyGit(None, commit="commit")
flexmock(workflow.source.lg)
# pylint: disable=no-member
workflow.source.lg.should_receive("_commit_id").and_return("commit")
# pylint: enable=no-member
return workflow
@pytest.mark.parametrize(('br_annotations', 'expected_br_annotations'), (
(None, None),
('spam', '"spam"'),
(['s', 'p', 'a', 'm'], '["s", "p", "a", "m"]'),
))
@pytest.mark.parametrize(('br_labels', 'expected_br_labels'), (
(None, None),
('bacon', 'bacon'),
(123, '123'),
))
@pytest.mark.parametrize('koji', [True, False])
@pytest.mark.parametrize(('help_results', 'expected_help_results', 'base_from_scratch'), (
(None, False, False),
({
'help_file': None,
'status': AddHelpPlugin.NO_HELP_FILE_FOUND,
}, None, False),
({
'help_file': 'help.md',
'status': AddHelpPlugin.HELP_GENERATED,
}, 'help.md', True),
))
@pytest.mark.parametrize(('verify_media_results', 'expected_media_results'), (
([], False),
(["application/vnd.docker.distribution.manifest.v1+json"],
["application/vnd.docker.distribution.manifest.v1+json"]),
))
def test_metadata_plugin(tmpdir, br_annotations, expected_br_annotations,
br_labels, expected_br_labels, koji,
help_results, expected_help_results, base_from_scratch,
verify_media_results, expected_media_results):
initial_timestamp = datetime.now()
workflow = prepare()
if base_from_scratch:
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla
FROM scratch
RUN yum install -y python"""
workflow.builder.base_from_scratch = base_from_scratch
else:
df_content = """
FROM fedora
RUN yum install -y python-django
CMD blabla"""
df = df_parser(str(tmpdir))
df.content = df_content
workflow.builder.dockerfile_images = DockerfileImages(df.parent_images)
workflow.builder.df_path = df.dockerfile_path
workflow.builder.df_dir = str(tmpdir)
workflow.prebuild_results = {
AddHelpPlugin.key: help_results
}
if help_results is not None:
workflow.annotations['help_file'] = help_results['help_file']
workflow.postbuild_results = {
PostBuildRPMqaPlugin.key: "rpm1\nrpm2",
}
workflow.exit_results = {
PLUGIN_VERIFY_MEDIA_KEY: verify_media_results,
}
workflow.fs_watcher._data = dict(fs_data=None)
if br_annotations or br_labels:
workflow.build_result = BuildResult(
image_id=INPUT_IMAGE,
annotations={'br_annotations': br_annotations} if br_annotations else None,
labels={'br_labels': br_labels} if br_labels else None,
)
timestamp = (initial_timestamp + timedelta(seconds=3)).isoformat()
workflow.plugins_timestamps = {
PostBuildRPMqaPlugin.key: timestamp,
}
workflow.plugins_durations = {
PostBuildRPMqaPlugin.key: 3.03,
|
renestvs/data_structure_project | data_structure/first_bim/data_structures.py | Python | mit | 4,324 | 0.007863 | from math import floor
from pip._vendor.requests.packages.urllib3.connectionpool import xrange
__author__ = 'rene_'
v = []
aux = []
size = []
############################################################
####################### HeapSort #########################
############################################################
def sift(i, n):
esq = 2*i + 1
dir = 2*i + 2
maior = i
print(esq, dir, maior)
if esq < n and v[esq] > v[i]:
maior = esq
print("maior = esq", esq)
if dir < n and v[dir] > v[maior]:
maior = dir
print("maior = dir", dir)
if (maior != i):
aux = v[i]
v[i] = v[maior]
v[maior] = aux
print("sift", maior, n)
sift(maior, n)
def sift_iterativo(i, n):
swap = True
while swap:
esq = 2*i + 1
dir = 2*i + 2
maior = i
if esq <= n and v[esq] > v[i]:
maior = esq
if dir <= n and v[dir] > v[maior]:
maior = dir
if (maior != i):
aux = v[i]
v[i] = v[maior]
v[maior] = aux
i = maior
else:
swap = False
def build():
for i in xrange(int(floor(len(v)/2)-1), -1, -1):
print("FOR - ", i)
sift(i,len(v)-1)
def heapsort():
print("==== build heap ====")
build()
print(v)
print("==== heapsort ====")
for i in xrange((len(v)-1), -1, -1):
aux = v[i]
v[i] = v[0]
v[0] = aux
print("ISOLA O MAIOR - ", v[i] )
print("sift for - 0, ", i-1)
sift_iterativo(0, i-1)
print("HEAP MAX - ", v)
def max():
if len(v) >= 0:
return v[0]
return -1 #erro não tratado
############################################################
###################### QuickSort #########################
############################################################
def quicksort(min,max):
print("====quicksort(",min, max,")")
while min < max:
print("====partition2(", min, max, ")")
p = partition2(min,max)
print("p-min < max-p = ", p-min, max-p, p-min < max-p)
if (p-min < max-p):
print("====quicksort 01(", min, p-1, ")")
quicksort(min, p-1)
min = p+1
else:
print("====quicksort 02(", p+1, max, ")")
quicksort(p+1,max)
max = p-1
def partition(left, right):
pivot = v[left]
l = left + 1
r = right
print("partition indices", pivot, l, r)
while l < r:
while l < right and v[l] < pivot:
l+=1
#print("left ", l)
while r > left and v[r] >= pivot:
r-=1
#print("right ", r)
if l < r:
print("TORCAR : v[l] - ", v[l], "v[r] - ", v[r])
aux = v[l]
v[l] = v[r]
v[r] = aux
print("TORCAR PIVOT: v[left] - ", v[left], "v[r] - ", v[r], "pivot - ", pivot)
v[left] = v[r]
v[r] = pivot
print("array", v)
return r
def partition2(left, right):
x = v[right]
i = left - 1
for j in xrange(left, right):
if v[j] <= x:
i += 1
aux = v[i]
v[i] = v[j]
| v[j] = aux
aux = v[i+1]
v[i+1] = v[right]
v[right] = aux
return i+1
############################################################
###################### MergeSort #########################
############################################################
def mergesort(i, f):
print("====MergeSort====")
if i < f:
m = int(floor((i+f)/2))
print(i,f)
print("====MergeSort01====")
mergesort(i,m) |
merge(i, m, f)
print("====merge01====")
print("i,m,f", i, m, f)
print("====MergeSort02====")
mergesort(m+1, f)
print("====merge02====")
print("i,m,f", i,m,f)
def merge(i, m, f):
i1 = i
i2 = i
i3 = m+1
while i2 <= m and i3 <= f:
if v[i2] < v[i3]:
i1+= 1
i2+= 1
aux[i1] = v[i2]
else:
i1 += 1
i3 += 1
aux[i1] = v[i3]
while i2 <= m:
i1+=1
i2+=1
aux[i1] = v[i2]
while i3 <= f:
i1 += 1
i3 += 1
aux[i1] = v[i3]
for j in xrange(i, f):
v[j] = aux[j]
|
davidwhogg/LensTractor | LensTractor.py | Python | gpl-2.0 | 14,549 | 0.017871 | #!/usr/bin/env python
# ============================================================================
'''
This file is part of the lenstractor project.
Copyright 2012 David W. Hogg (NYU) and Phil Marshall (Oxford).
'''
# ============================================================================
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
# Fo | nts, latex:
matplotlib.rc('font',**{'family':'serif', 'serif':['TimesNewRoman'], 'size':18.0})
matplotlib.rc('text', usetex=True)
import os
import logging
im | port numpy as np
import pyfits
import time
import string
from astrometry.util import util
import tractor
import lenstractor
# ============================================================================
def main():
"""
NAME
LensTractor.py
PURPOSE
Run the Tractor on a deck of single object cutout images.
Read in an image and its weight map, guess the PSF, put an object at the
centre image of the field and then optimize the catalog and PSF.
COMMENTS
The idea is to identify good lens candidates by principled model
selection: two well-defined models competing against each other, given
multi-epoch imaging data. The Nebula model (1 extended source, plus
N=2, 3 or 4 point sources, with sub-models denoted by "NebulaN") is very
flexible, so should be better at fitting the data in general than the
Lens model (1 extended source, plus 1 background point source). However,
when the Lens provides a good fit, it does so at lower cost (fewer
parameters), so should win by Bayesian information criteria (we use BIC
as a cheap proxy for evidence ratio).
The workflow we probably want to aim for is something like the following:
* Fit PSF images with PSF models; fix PSFs
* Try Nebula2
* Try Nebula4
if Nebula2 beats Nebula4:
Nebula = Nebula2
else:
Nebula = Nebula4
* Try Lens (initialised with Nebula)
if Lens beats Nebula:
Classification = 'Lens'
Return YES
else:
Classification = 'Nebula'
Return NO
Initialisation of Lens via Nebula could be tricky - there is some
parsing to be done, and decisions to be made... In practice we may end
up working just with the Nebula output, which should be at least
easier to interpret than a SExtractor catalog, for example.
Open questions:
Does it make sense to dogmatically associate the extended object with
the deflector?
YES: detection of a deflector galaxy will be needed for a convincing
candidate anyway.
NO: using the extended object to model a high S/N merging image
system should not be punished
To be investigated.
How are we going to interpret the point image positions if we do not
have an estimated deflector position?
OPTIONS
-h --help Print this message
-v --verbose Verbose operation
-s --sample Sample the posterior PDF instead of optimizing
-x --no-plots Do not plot progress
-l --lens Only fit lens model, initialized from scratch
INPUTS
*.fits Deck of postcard images
OPTIONAL INPUTS
-n --nebula K Only fit NebulaK model, initialized from scratch
--manual catalog Initialize model positions from catalog
--optimization-rounds Nr Number of rounds of optimization [2]
--optimization-steps-catalog Nc Number of steps per round spent
optimizing source catalog [10]
--optimization-steps-psf Np Number of steps per round spent
optimizing PSF catalog [2]
-o --output outstem Stem of output catalog filename
--survey name Name of survey (for io formats)
OUTPUTS
stdout Useful information
*.png Plots in png format
To be implemented:
lenstractor_progress.log Logged output
lenstractor_results.txt Model comparison results
lenstractor_lens.cat Lens model parameters, including lightcurves
lenstractor_nebula.cat Nebula model parameters, including lightcurves
EXAMPLES
python LensTractor.py -n 4 \
-o examples/ps1/H1413+117_10x10arcsec \
examples/ps1/H1413+117_10x10arcsec_55*fits > \
examples/ps1/H1413+117_10x10arcsec_Nebula4.log
python LensTractor.py -n 2 \
-o examples/sdss/0951+2635/0951+2635 \
examples/sdss/0951+2635/*sci.fits > \
examples/sdss/0951+2635/0951+2635_Nebula2.log
set id = KIDS_SLID_10058881_SID_8668
python LensTractor.py -v -l -z --survey KIDS \
-o examples/kids/${id} \
examples/kids/${id}_u_???.fits \
examples/kids/${id}_g_???.fits \
examples/kids/${id}_r_???.fits > \
examples/kids/${id}.log &
DEPENDENCIES
* The Tractor astrometry.net/svn/trunk/projects/tractor
* emcee github.com/danfm/emcee
* astrometry.net astrometry.net/svn/trunk/util
BUGS
- SDSS examples show bad WCS treatment...
- Possible problems with image alignment
- Memory leak: restrict no. of sampling iterations :-(
- Header PSF FWHM sometimes NaN, no recovery from this yet
FEATURE REQUESTS
- Lens initialisation, esp source positions, needs careful attention
- StepSizes need optimizing for lens model, esp source position
- Point source mags are not variable
- PSF not being optimized correctly - missing derivatives?
- PhotoCal may need optimizing if zpts are untrustworthy!
HISTORY
2012-07-06 First predicted Lens images Marshall/Hogg (Oxford/NYU)
2013-08- Adapted for KIDS Buddelmeier (Kapteyn)
2014-04- Refactoring for easier experimentation Marshall/Agnello (KIPAC/UCSB)
"""
# --------------------------------------------------------------------
from argparse import ArgumentParser
import sys
# Set available options:
parser = ArgumentParser()
# List of files:
parser.add_argument('inputfiles', metavar='filename', nargs='*') # '*' means there must be 0 or more
# Verbosity:
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Make more verbose')
# Optimizing only:
parser.add_argument('-z', '--optimize', dest='optimize', action='store_true', default=False, help='Optimize posterior PDF')
# Sampling only:
parser.add_argument('-s', '--sample', dest='MCMC', action='store_true', default=False, help='Sample posterior PDF')
# Plotting:
parser.add_argument('-x', '--no-plots', dest='noplots', action='store_true', default=False, help='Skip plotting')
# Lens model only:
parser.add_argument('-l', '--lens', dest='lens', action='store_true', default=False, help='Fit Lens model')
# Nebula model only:
parser.add_argument('-n', '--nebula', dest='K', type=int, default=0, help='Fit NebulaK model, provide K')
# Output filename:
parser.add_argument('-o', '--output', dest='outstem', type=str, default='lenstractor.cat', help='Output catalog filename stem')
# Survey we are working on (affects data read-in):
parser.add_argument('--survey', dest='survey', type=str, default="PS1", help="Survey (SDSS, PS1 or KIDS)")
# Use SDSS sky server to get data:
parser.add_argument('--SDSS', dest='rcfstring', type=str, default="None", help="Use SDSS skyserver to return cutouts, supply run,camcol,field,ra,dec,roi")
# Manual input of model initialization:
parser.add_argument('--manual', dest='catalog', type=str, default="None", help="Catalog of Nebula model parameters, for initializing positions")
# Read in options and arguments - note only sci and wht images are supplied:
args = parser.parse_args()
if (args.rcfstring == 'None' and len(args.inputfiles) < 1):
# parser.print_help()
print main.__doc__ # Whoah! What does this do?! Some sort of magic. |
BhallaLab/moose-examples | tutorials/ChemicalOscillators/repressillator.py | Python | gpl-2.0 | 3,118 | 0.018281 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import moose
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pylab
import numpy
import sys
def main():
"""
This example illustrates the classic **Repressilator** model, based on
Elowitz and Liebler, Nature 2000. The model has the basic architecture::
A ---| B---| C
T |
| |
|____________|
where **A**, **B**, and **C** are genes whose products repress
eachother. The plunger symbol indicates inhibition. The model
uses the Gillespie (stochastic) method by default but you can run it
using a deterministic method by saying ``python repressillator.py gsl``
Good things to do with this model include:
* Ask what it would take to change period of repressillator:
* Change inhibitor rates::
inhib = moose.element( '/model/kinetics/TetR_gene/inhib_reac' )
moose.showfields( inhib )
inhib.Kf *= 0.1
* Change degradation rates::
degrade = moose.element( '/model/kinetics/TetR_gene/TetR_degradation' )
degrade.Kf *= 10.0
* Run in stochastic mode:
* Change volumes, figure out how many molecules are present::
lac = moose.element( '/model/kinetics/lac_gene/lac' )
print lac.n``
* Find when it becomes hopelessly unreliable with small volumes.
"""
#solver = "gsl" # Pick any of gsl, gssa, ee..
solver = "gssa" # Pick any of gsl, gssa, ee..
mfile = '../../genesis/Repressillator.g'
runtime = 6000.0
if ( len( sys.argv ) >= 2 ):
solver = sys.argv[1]
modelId = moose.loadModel( mfile, 'model', solver )
# Increase volume so that the stochastic solver gssa
# gives an interesting output
compt = moose.element( '/model/kinetics' )
compt.volume = 1e-19
dt = moose.element( '/clock' ).tickDt[18]
moose.reinit()
moose.start( runtime )
# Display all plots.
img = mpimg.imread( 'repressillatorOsc.pn | g' )
fig = plt.figure( figsize=(12, 10 ) )
png = fig.add_subplot( 211 )
imgplot = plt.imshow( img )
ax = fig.add_subplot( 212 )
x = moose.wildcardFind( '/model/#graphs/conc#/#' )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Time (seconds)' )
for x in moose.wildcardFind( '/model/#graphs/conc#/#' ):
t = numpy | .arange( 0, x.vector.size, 1 ) * dt
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
ajaniv/django-core-models | django_core_models/core/urls.py | Python | mit | 908 | 0 | """
.. module:: django_core_models.core.urls
:synopsis: django_core_models core application urls module
django_core_models *core* application urls module.
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^annotations/$',
views.AnnotationList.a | s_view(),
name='annotation-list'),
url(r'^annotations/(?P<pk>[0-9]+)/$',
views.AnnotationDetail.as_view(),
name='annotation-detail'),
url(r'^categories/$', views.CategoryList.as_view(),
name='category-list'),
url(r'^categories/(?P<pk>[0-9]+)/$',
views.CategoryDetail.as_view(),
name='category-detail'),
url( | r'^currencies/$',
views.CurrencyList.as_view(),
name='currency-list'),
url(r'^currencies/(?P<pk>[0-9]+)/$',
views.CurrencyDetail.as_view(),
name='currency-detail'),
]
|
sunqm/pyscf | examples/grad/12-excited_state_casscf_grad.py | Python | apache-2.0 | 3,547 | 0.015788 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytical nuclear gradients of CASCI excited state.
'''
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import lib
import inspect
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g'
)
mf = scf.RHF(mol).run()
mc = mcscf.CASSCF(mf, 4, 4).state_average ([0.25, 0.25, 0.25, 0.25])
mc.run()
# PySCF-1.6.1 and newer supports the .Gradients method to create a grad
# object after grad module was imported. It is equivalent to call the
# .nuc_grad_method method.
from pyscf import grad
mc = mcscf.CASSCF(mf, 4, 4).state_average ([0.25, 0.25, 0.25, 0.25]).run ()
mc.conv_tol = 1e-10
e3_nosymm = mc.e_states[3]
g3_nosymm = mc.Gradients().kernel(state=3)
print('Gradients of the 3rd | excited state')
print(g3_nosymm)
# The active orbitals | here should be O atom 2py (b2) and 2pz (a1) and
# two OH antibonding orbitals (a1 and b1). The four states in order
# are singlet A1, triplet B2, singlet B2, and triplet A1.
#
# Use gradients scanner.
#
# Note the returned gradients are based on atomic unit.
#
g_scanner = mc.nuc_grad_method().as_scanner(state=2)
e2_nosymm, g2_nosymm = g_scanner(mol)
print('Gradients of the 2nd excited state')
print(g2_nosymm)
#
# Specify state ID for the gradients of another state.
#
# Unless explicitly specified as an input argument of set_geom_ function,
# set_geom_ function will use the same unit as the one specified in mol.unit.
#
# This has two nearby local minima consisting of different orbitals, although
# the spins and symmetries of the states are the same as above in both cases.
# The local minimum at the state-average energy of -74.7425 Eh has O atom
# 2py (b2), 2pz (a1), 3s (a1), and 3pz (a1) orbitals. That at -74.7415 has O
# atom 2py (b2), 2pz (a1), and 3py (b2) and one OH antibonding orbital (a1).
mol.set_geom_('''O 0. 0. 0.1
H 0. -0.757 0.587
H 0. 0.757 0.587''')
e3_nosymm_shift, g3_nosymm_shift = g_scanner(mol, state=3)
print (g_scanner.base.e_tot, g_scanner.base.e_states)
print('Energy of the 3rd excited state at a shifted geometry =', e3_nosymm_shift,'Eh')
print('Gradients of the 3rd excited state at a shifted geometry:')
print(g3_nosymm_shift)
#
# State-average mix to average states of selected spins or symmetries
#
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g',
symmetry = True
)
mol.build ()
mf = scf.RHF (mol).run ()
from pyscf import fci
fcisolvers = [fci.solver (mol, symm=True) for i in (1,2)]
fcisolvers[0].nroots = fcisolvers[1].nroots = 2
fcisolvers[0].wfnsym = 'A1'
fcisolvers[1].wfnsym = 'B2'
mc = mcscf.addons.state_average_mix (mcscf.CASSCF (mf, 4, 4), fcisolvers,
[0.25, 0.25, 0.25, 0.25])
mc.conv_tol = 1e-10
mc.kernel ()
# The states are now ordered first by solver, then by energy, so the 3rd
# excited state is now at index = 1.
g_scanner = mc.nuc_grad_method ().as_scanner (state=1)
e3_symm, g3_symm = g_scanner (mol)
mol.set_geom_('''O 0. 0. 0.1
H 0. -0.757 0.587
H 0. 0.757 0.587''')
e3_symm_shift, g3_symm_shift = g_scanner (mol, state=1)
print('Gradients of the 3rd excited state using symmetry')
print(g3_symm)
print('Gradients of the 3rd excited state at a shifted geometry using symmetry')
print(g3_symm_shift)
|
NESCent/dplace | dplace_app/tests/test_api.py | Python | mit | 14,228 | 0.003093 | # coding: utf8
from __future__ import unicode_literals
import json
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from clldutils.path import Path
from dplace_app.models import *
from dplace_app.load import load
from dplace_app.loader import sources
class Test(APITestCase):
"""Tests rest-framework API"""
def _fixture_teardown(self):
try:
APITestCase._fixture_teardown(self)
except:
pass
def get_json(self, urlname, *args, **kw):
kw.setdefault('format', 'json')
reverse_args = kw.pop('reverse_args', [])
response = self.client.get(reverse(urlname, args=reverse_args), *args, **kw)
self.assertEqual(response.status_code, status.HTTP_200_OK)
try: # csv download doesn't return json
return json.loads(response.content)
except:
return response.content
def obj_in_results(self, obj, response):
return getattr(obj, 'id', obj) in [x['id'] for x in response['results']]
def setUp(self):
sources._SOURCE_CACHE = {}
load(Path(__file__).parent.joinpath('data'))
def test_society_detail(self):
self.client.get(reverse('view_society', args=('society1',)))
def test_society_search(self):
res = self.client.get(reverse('view_society', args=('society1',)))
self.assertIn('Söciety1'.encode('utf8'), res.content)
def test_api_variable(self):
res = self.get_json(
'variable-detail',
reverse_args=[Variable.objects.get(label='1').id])
self.assertIsInstance(res['index_categories'][0], dict)
def test_zip_legends(self):
response = self.client.post(reverse('download'))
self.assertIn('attachment', response['Content-Disposition'])
def test_get_categories(self):
response = self.get_json('get_categories', {'query': json.dumps({})})
self.assertIsInstance(response, list)
response = self.get_json(
'get_categories',
{'query': json.dumps(dict(source=Source.objects.get(name='Ethnographic Atlas').id))})
self.assertIsInstance(response, list)
def test_min_and_max(self):
response = self.get_json(
'min_and_max',
{'query': json.dumps(
dict(environmental_id=Variable.obj | ects.get(name='Rainfall').id))})
self.assertIsInstance(response, dict)
self.assertIn('min', response)
self.assertIn('max', response)
def test_cont_variable(self):
response = self.client.get('cont_variable')
self.assertEqual(response.status_code, 404)
response = self.client.get('cont_variable', {'query': 'not-json-parseable'})
self.assertEqual(response.status_code, 404)
response = self.client.get('cont_var | iable', {'query': '[]'})
self.assertEqual(response.status_code, 404)
response = self.get_json(
'cont_variable',
{'query': json.dumps(dict(bf_id=Variable.objects.get(label='2').id))})
self.assertIsInstance(response, list)
def test_geo_api(self):
response = self.client.get(reverse('geographicregion-list'), format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 2)
self.assertEqual(
response.data['results'][0]['region_nam'],
GeographicRegion.objects.get(region_nam='Region1').region_nam)
def test_all_languages(self):
response_dict = self.get_json('language-list')
self.assertEqual(response_dict['count'], 3)
for lang in Language.objects.all():
self.assertTrue(self.obj_in_results(lang, response_dict))
def test_all_language_trees(self):
response_dict = self.get_json('languagetree-list')
self.assertEqual(response_dict['count'], 3)
self.assertEqual(
set(t['source']['name'] for t in response_dict['results']),
{'semitic', 'a phylogeny', 'Abkhaz-Adyge'})
def test_family2_languages(self):
response_dict = self.get_json(
'language-list',
{'family': LanguageFamily.objects.first().id})
self.assertEqual(response_dict['count'], 2)
def test_all_variables(self):
response_dict = self.get_json('variable-list')
self.assertEqual(response_dict['count'], 5)
self.assertEqual(
len([x for x in response_dict['results'] if x['type'] == 'cultural']), 2)
for name in ['Rainfall', 'Temperature', 'Subsistence economy: gathering']:
env_var = Variable.objects.get(name=name)
self.assertTrue(self.obj_in_results(env_var, response_dict))
def test_category1_variables(self):
response_dict = self.get_json(
'variable-list',
{'index_categories': [Category.objects.get(name='Climate').id]})
for name, assertion in [
('Rainfall', self.assertTrue),
('Temperature', self.assertTrue),
('Ecology1', self.assertFalse)
]:
env_var = Variable.objects.get(name=name)
assertion(self.obj_in_results(env_var, response_dict))
def test_code_description_order(self):
"""
Make sure 2 comes before 10
"""
response_dict = self.get_json(
'codedescription-list', {'variable': Variable.objects.get(label='1').id})
self.assertEqual(
[res['code'] for res in response_dict['results']],
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'NA'])
def test_filter_source(self):
response_dict = self.get_json(
'variable-list', {'source': Source.objects.get(name='Ethnographic Atlas').id})
self.assertEqual(response_dict['count'], 2)
def test_category2_variables(self):
response_dict = self.get_json(
'variable-list',
{'index_categories': [Category.objects.get(name='Economy').id]})
self.assertEqual(response_dict['count'], 2)
def test_category3_variables(self):
response_dict = self.get_json(
'variable-list',
{'index_categories': [Category.objects.get(name='Subsistence').id]})
self.assertEqual(response_dict['count'], 1)
self.assertEqual(
response_dict['results'][0]['name'],
Variable.objects.get(label='1').name)
self.assertFalse(self.obj_in_results(
Variable.objects.get(label='2'), response_dict))
def test_csv_download(self):
response = self.client.get(reverse('csv_download'))
self.assertEqual(response.content.split()[0], '"Research')
response = self.get_json(
'csv_download',
{'query': json.dumps({'p': [
GeographicRegion.objects.get(region_nam='Region1').id]})})
self.assertIn('Region1'.encode('utf8'), response)
def test_csv_download_var(self):
response = self.client.get(reverse('csv_download'))
self.assertEqual(response.content.split()[0], '"Research')
response = self.get_json(
'csv_download',
{'query': json.dumps({'c': ['%s-%s' % (
CodeDescription.objects.get(code='1').variable.id,
CodeDescription.objects.get(code='1').id)]})})
self.assertIn('Herero', response.decode('utf8'))
def test_trees_from_societies(self):
response = self.get_json(
'trees_from_societies',
{s.ext_id: s.id for s in Society.objects.all()})
self.assertEqual(response, [])
#
# find societies:
#
def get_results(self, urlname='find_societies', no_escape=False, **data):
method = self.client.post
if urlname == 'find_societies':
method = self.client.get
_data = []
for k, v in data.items():
for vv in v:
if no_escape:
_data.append((k, vv))
else:
if str(k) == 'c':
_data.append((k, vv))
|
vladan-m/ggrc-core | src/ggrc_workflows/migrations/versions/20140725082539_468290d5494f_add_sort_index_to_task_groups_and_cycle_.py | Python | apache-2.0 | 2,077 | 0.008666 |
"""Add sort index to task_groups and cycle_task_groups
Revision ID: 468290d5494f
Revises: 1f1ab1d371b6
Create Date: 2014-07-25 08:25:39.074611
"""
# revision identifiers, used by Alembic.
revision = '468290d5494f'
down_revision = '1f1ab1d371b6'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import | table, column, select
def _set_sort_index(table_1, table_1_id, table_2):
connection = op.get_bind()
rows_1 = connection.execute(
select([table_1.c.id])
)
for row_1 in rows_1:
rows_2 = connection.execute(
select([table_2.c.id, table_2.c.sort_index])
.where(table_2.c[table_1_id] == row_1.id)
)
from decimal import Decimal
max_index = Decimal(9007199254740991.0)
current_index = max_index / 2
for row_2 in rows_2:
op.execute(
table_2.up | date()
.values(sort_index=str(current_index))
.where(table_2.c.id == row_2.id)
)
current_index = (current_index + max_index) / 2
def upgrade():
op.add_column('task_groups', sa.Column('sort_index',
sa.String(length=250), nullable=False))
op.add_column('cycle_task_groups', sa.Column('sort_index',
sa.String(length=250), nullable=False))
workflows_table = table(
'workflows',
column('id', sa.Integer)
)
task_groups_table = table(
'task_groups',
column('id', sa.Integer),
column('sort_index', sa.String),
column('workflow_id', sa.Integer),
)
cycles_table = table(
'cycles',
column('id', sa.Integer),
column('sort_index', sa.String),
column('workflow_id', sa.Integer),
)
cycle_task_groups_table = table(
'cycle_task_groups',
column('id', sa.Integer),
column('sort_index', sa.String),
column('cycle_id', sa.Integer),
)
_set_sort_index(workflows_table, 'workflow_id', task_groups_table)
_set_sort_index(cycles_table, 'cycle_id', cycle_task_groups_table)
def downgrade():
op.drop_column('cycle_task_groups', 'sort_index')
op.drop_column('task_groups', 'sort_index')
|
PyThaiNLP/pythainlp | pythainlp/util/keyboard.py | Python | apache-2.0 | 6,583 | 0 | # -*- coding: utf-8 -*-
"""
Functions related to keyboard layout.
"""
EN_TH_KEYB_PAIRS = {
"Z": "(",
"z": "ผ",
"X": ")",
"x": "ป",
"C": "ฉ",
"c": "แ",
"V": "ฮ",
"v": "อ",
"B": "\u0e3a", # พินทุ
"b": "\u0e34", # สระอุ
"N": "\u0e4c", # การันต์
"n": "\u0e37", # สระอือ
"M": "?",
"m": "ท",
"<": "ฒ",
",": "ม",
">": "ฬ",
".": "ใ",
"?": "ฦ",
"/": "ฝ",
"A": "ฤ",
"a": "ฟ",
"S": "ฆ",
"s": "ห",
"D": "ฏ",
"d": "ก",
"F": "โ",
"f": "ด",
"G": "ฌ",
"g": "เ",
"H": "\u0e47", # ไม้ไต่คู้
"h": "\u0e49", # ไม้โท
"J": "\u0e4b", # ไม้จัตวา
"j": "\u0e48", # ไม้เอก
"K": "ษ",
"k": "า",
"L": "ศ",
"l": "ส",
":": "ซ",
";": "ว",
'"': ".",
"'": "ง",
"Q": "๐",
"q": "ๆ",
"W": '"',
"w": "ไ",
"E": "ฎ",
"e": "\u0e33", # สระอำ
"R": "ฑ",
"r": "พ",
"T": "ธ",
"t": "ะ",
"Y": "\u0e4d", # นิคหิต
"y": "\u0e31", # ไม้หันอากาศ
"U": "\u0e4a", # ไม้ตรี
"u": "\u0e35", # สระอ ี
"I": "ณ",
"i": "ร",
"O": "ฯ",
"o": "น",
"P": "ญ",
"p": "ย",
"{": "ฐ",
"[": "บ",
"}": ",",
"]": "ล",
"|": "ฅ",
"\\": "ฃ",
"~": "%",
"`": "_",
"@": "๑",
"2": "/",
"#": "๒",
"3": "-",
"$": "๓",
"4": "ภ",
"%": "๔",
"5": "ถ",
"^": "\u0e39", # สระอู
"6": "\u0e38", # สระอุ
"&": "฿",
"7": "\u0e36", # สระอึ
"*": "๕",
"8": "ค",
"(": "๖",
"9": "ต",
")": "๗",
"0": "จ",
"_": "๘",
"-": "ข",
"+": "๙",
"=": "ช",
}
TH_EN_KEYB_PAIRS = {v: k for k, v in EN_TH_KEYB_PAIRS.items()}
EN_TH_TRANSLATE_TABLE = str.maketrans(EN_TH_KEYB_PAIRS)
TH_EN_TRANSLATE_TABLE = str.maketrans(TH_EN_KEYB_PAIRS)
TIS_820_2531_MOD = [
["-", "ๅ", "/", "", "_", "ภ", "ถ", "ุ", "ึ", "ค", "ต", "จ", "ข", "ช"],
["ๆ", "ไ", "ำ", "พ", "ะ", "ั", "ี", "ร", "น", "ย", "บ", "ล", "ฃ"],
["ฟ", "ห | ", "ก", "ด", "เ", "้", "่", "า", "ส", "ว", "ง"],
["ผ", "ป", "แ", "อ", "ิ", "ื", "ท", "ม", "ใ", "ฝ"],
]
TIS_820_2531_MOD_SHIFT = [
["%", "+", "๑", "๒", "๓", "๔", "ู", "฿", "๕", "๖", "๗", "๘", "๙"],
["๐", "\"", "ฎ", "ฑ", "ธ", "ํ", "๊", "ณ", "ฯ", "ญ", "ฐ", ",", "ฅ"],
["ฤ", "ฆ", "ฏ", "โ", "ฌ", "็", | "๋", "ษ", "ศ", "ซ", "."],
["(", ")", "ฉ", "ฮ", "ฺ", "์", "?", "ฒ", "ฬ", "ฦ"],
]
def eng_to_thai(text: str) -> str:
"""
Corrects the given text that was incorrectly typed using English-US
Qwerty keyboard layout to the originally intended keyboard layout
that is the Thai Kedmanee keyboard.
:param str text: incorrect text input (type Thai with English keyboard)
:return: Thai text where incorrect typing with
a keyboard layout is corrected
:rtype: str
:Example:
Intentionally type "ธนาคารแห่งประเทศไทย", but got "Tok8kicsj'xitgmLwmp"::
from pythainlp.util import eng_to_thai
eng_to_thai("Tok8kicsj'xitgmLwmp")
# output: ธนาคารแห่งประเทศไทย
"""
return text.translate(EN_TH_TRANSLATE_TABLE)
def thai_to_eng(text: str) -> str:
"""
Corrects the given text that was incorrectly typed using Thai Kedmanee
keyboard layout to the originally intended keyboard layout
that is the English-US Qwerty keyboard.
:param str text: incorrect text input (type English with Thai keyboard)
:return: English text where incorrect typing with
a keyboard layout is corrected
:rtype: str
:Example:
Intentionally type "Bank of Thailand", but got "ฺฟืา นด ธ้ฟรสฟืก"::
from pythainlp.util import eng_to_thai
thai_to_eng("ฺฟืา นด ธ้ฟรสฟืก")
# output: 'Bank of Thailand'
"""
return text.translate(TH_EN_TRANSLATE_TABLE)
def thai_keyboard_dist(c1: str, c2: str, shift_dist: float = 0.0) -> float:
"""
Calculate euclidean distance between two Thai characters
according to their location on a Thai keyboard layout.
A modified TIS 820-2531 standard keyboard layout, which is developed
from Kedmanee layout and is the most commonly used Thai keyboard layout,
is used in distance calculation.
The modified TIS 820-2531 is TIS 820-2531 with few key extensions
proposed in TIS 820-2536 draft. See Figure 4, notice grey keys, in
https://www.nectec.or.th/it-standards/keyboard_layout/thai-key.html
Noted that the latest TIS 820-2538 has slight changes in layout from
TIS 820-2531. See Figure 2, notice the Thai Baht sign and ฅ-ฃ pair, in
https://www.nectec.or.th/it-standards/std820/std820.html
Since TIS 820-2538 is not widely adopted by keyboard manufacturer,
this function uses the de facto standard modified TIS 820-2531 instead.
:param str c1: first character
:param str c2: second character
:param str shift_dist: return value if they're shifted
:return: euclidean distance between two characters
:rtype: float
:Example:
from pythainlp.util import thai_keyboard_dist
thai_keyboard_dist("ด", "ะ")
# output: 1.4142135623730951
thai_keyboard_dist("ฟ", "ฤ")
# output: 0.0
thai_keyboard_dist("ฟ", "ห")
# output: 1.0
thai_keyboard_dist("ฟ", "ก")
# output: 2.0
thai_keyboard_dist("ฟ", "ฤ", 0.5)
# output: 0.5
"""
def get_char_coord(
ch: str, layouts=[TIS_820_2531_MOD, TIS_820_2531_MOD_SHIFT]
):
for layout in layouts:
for row in layout:
if ch in row:
r = layout.index(row)
c = row.index(ch)
return (r, c)
raise ValueError(ch + " not found in given keyboard layout")
coord1 = get_char_coord(c1)
coord2 = get_char_coord(c2)
distance = (
(coord1[0] - coord2[0]) ** 2 + (coord1[1] - coord2[1]) ** 2
) ** (0.5)
if distance == 0 and c1 != c2:
return shift_dist
return distance
|
sserrot/champion_relationships | venv/Lib/site-packages/win32comext/shell/demos/servers/copy_hook.py | Python | mit | 2,881 | 0.009372 | # A sample shell copy hook.
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer
# * Attempt to move or copy a directory.
# * Note our hook's dialog is displayed.
import sys, os
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
import winerror
# Our shell extension.
class ShellExtension:
_reg_progid_ = "Python.ShellExten | sion.CopyHook"
| _reg_desc_ = "Python Sample Shell Extension (copy hook)"
_reg_clsid_ = "{1845b6ba-2bbd-4197-b930-46d8651497c1}"
_com_interfaces_ = [shell.IID_ICopyHook]
_public_methods_ = ["CopyCallBack"]
def CopyCallBack(self, hwnd, func, flags,
srcName, srcAttr, destName, destAttr):
# This function should return:
# IDYES Allows the operation.
# IDNO Prevents the operation on this folder but continues with any other operations that have been approved (for example, a batch copy operation).
# IDCANCEL Prevents the current operation and cancels any pending operations.
print("CopyCallBack", hwnd, func, flags, srcName, srcAttr, destName, destAttr)
return win32gui.MessageBox(hwnd, "Allow operation?", "CopyHook",
win32con.MB_YESNO)
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
print(ShellExtension._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellExtension._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
#!/usr/bin/env python
|
bigfootproject/pyostack | pyostack/metering.py | Python | apache-2.0 | 712 | 0.001404 | import ceilometerclient.client as clclient
import logging
log = logging.getLogger(__name__)
class Metering:
'''Wrapper for the OpenStack MEtering service (Ceilometer)'''
def __init__(self, conf):
creds = self._get_creds(conf)
self.ceilo = clclient.get_client(2, **creds)
def _get_creds(self, conf):
d = {}
| d['os_username'] = conf.get("environment", "OS_USERNAME")
d['os_password'] = conf.get("environment", "OS_PASSWORD")
d['os_auth_url'] = conf.get("environment", "OS_AUTH_URL")
| d['os_tenant_name'] = conf.get("environment", "OS_TENANT_NAME")
return d
def meter_list(self, query=None):
return self.ceilo.meters.list()
|
pradeeppanga/dcos-universe | docs/tutorial/helloworld.py | Python | apache-2.0 | 1,033 | 0.00484 | import time
import http.server
import os
HOST_NAME = '0.0.0.0' # Host name of the http server
# Gets the port number from $PORT0 environment variable
| PORT_NUMBER = int(os.environ['PORT0'])
class MyHandler(http.server.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type" | , "text/html")
s.end_headers()
s.wfile.write("<html><head><title>Time Server</title></head>".encode())
s.wfile.write("<body><p>The current time is {}</p>".format(time.asctime()).encode())
s.wfile.write("</body></html>".encode())
if __name__ == '__main__':
server_class = http.server.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print(time.asctime(), "Server Starts - {}:{}".format(HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - {}:{}".format(HOST_NAME, PORT_NUMBER))
|
javierLiarte/tdd-goat-python | lists/views.py | Python | gpl-2.0 | 687 | 0.021834 | from django.http import HttpResponse
from django.shortcuts import redirect, render
from lists.models import Item, List
# Create your views here.
def home_page(request):
return render(request, 'home.html')
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
return render(request, 'list.html', {'list': list_,})
def new_list(request):
list_ = List.objects.creat | e()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.i | d))
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect('/lists/%d/' % (list_.id)) |
lwiss/Hackathon_Satellite_Imagery | FCN.py | Python | mit | 10,362 | 0.004053 | from __futur | e__ import print_function
import tensorflow as tf
import numpy as np
import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
import readAerialDataset
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "20", "batch size for training" | )
tf.flags.DEFINE_string("logs_dir", "logs/", "path to logs directory")
tf.flags.DEFINE_string("data_dir", "data/", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-3", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "visualize", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e4 + 1)
NUM_OF_CLASSESS = 2 #TODO change to 2
IMAGE_SIZE = 224 #TODO change. Is the number of pixels right?
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if FLAGS.debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
return net
def inference(image, keep_prob):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
print("setting up vgg initialized conv layers ...")
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
pred_annotation, logits = inference(image, keep_probability)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=tf.squeeze(annotation, squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", loss)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
for var in trainable_var:
utils.add_to_regularization_and_summary(var)
train_op = train(loss, trainable_var)
print("Setting up summary op...")
summary_op = tf.summary.merge_all()
print("Setting up image reader...")
#train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir)
train_records, valid_records = readAerialDataset.read_dataset(FLAGS.data_dir)
print(len(train_records))
print(len(valid_records))
print("Setting up dataset reader")
image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
if FLAGS.mode == 'train':
train_dataset_reader = dataset.BatchDatset(train_records, image_options)
validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
sess = tf.Session()
print("Setting up Saver...")
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
saver.restore(sess, 'logs/model.ckpt-13500')
if FLAGS.mode == "train":
for itr in xrange(MAX_ITERATION):
train_images, train_annotations = t |
stavinsky/lsshipper | lsshipper/reader_aio.py | Python | mit | 777 | 0 | import os
import aiofiles
async def get_line(name, offset=0, sep=b'\r\n', chunk_size=4096):
name = os.path.abspath(name)
async with a | iofiles.open(name, 'r+b') as f:
await f.seek(offset)
tmp_line = b""
chunk = await f.read(chunk_size)
while chunk:
if len(tmp_line):
chunk = tmp_line + chunk
tmp_line = b''
while chunk:
line, s, chunk = chunk.partition(sep)
if 0 in line:
return |
if s is sep:
offset = offset + len(line) + len(sep)
yield (line, offset)
if s is b'':
tmp_line = tmp_line + line
chunk = await f.read(chunk_size)
|
Lorze/recipe | Old_and_obsolete/startme.py | Python | mit | 5,028 | 0.044749 | #!/usr/bin/env python
import pyforms
from pyforms import BaseWidget
from pyforms.Controls import ControlText
from pyforms.Controls import ControlButton
from pyforms.Controls import ControlList
import subprocess
import codecs
import glob
import re
import sys
import os
#i think, i have a problem with the encoding on this one, will first have to figure that out before finishing
class Rezepte(BaseWidget):
def __init__(self):
super(Rezepte,self).__init__('Rezepte')
#Define the organization of the forms
self._formset = ['',('_button','_allbutton','_set',' '),('_newfile','_newbutton', ' '),('_persons',' '),('_openbutton',' ','_rebutton'),('_filelist')]
#Definition of the forms fields
files = glob.glob("Rezepte/*txt")
files.sort()
self.files = files
self._newfile = ControlText('','Name')
self._persons = ControlText('Personen','%s'%personnr('perso | ns'))
self._button = ControlButton('Kompilieren')
self._set = ControlButton('Speichern')
self._allbutton = ControlButton('alle Kompilieren')
self._rebutton = ControlButton('reload')
self._newbutton = ControlButton('Rezept erstellen')
self._openbutton = ControlButton('open')
self._filelist = ControlList()
self._filelist.horizontalHeaders = ['Titel', 'Personen | ', 'Kompilieren',' ']
self.sumtitle=[]
for name in files:
title =openfile(name)
persons = personnr(title)
comp = compil(title)
self.sumtitle.append(title)
self._filelist += [title, persons, comp]
#Define the button action
self._button.value = self.__buttonAction
self._allbutton.value = self.__allbuttonAction
self._openbutton.value = self.__openbuttonAction
self._newbutton.value = self.__newbuttonAction
self._set.value = self.__setAction
self._rebutton.value = self.__reAction
#compiles only some chosen recipes
def __buttonAction(self):
"""Button action event"""
save(self)
subprocess.call(["python3", "sample.py", "-c some"])
#only saves
def __setAction(self):
"""Button action event"""
save(self)
def __reAction(self):
"""Button action event"""
save(self)
os.execl(sys.executable, *([sys.executable]+sys.argv))
#compiles all recipes
def __allbuttonAction(self):
"""Button action event"""
save(self)
subprocess.call(["python3", "sample.py"])
#opens selected files
def __openbuttonAction(self):
"""Button action event"""
for number in self._filelist.mouseSelectedRowsIndexes :
subprocess.call(["xdg-open", "%s"%(self.files[number])])
#creates new file with given name
def __newbuttonAction(self):
"""Button action event"""
self._newfileValue = self._newfile.value
f = codecs.open("Rezepte/%s.txt"%(self._newfileValue.lower()),'w',encoding='utf-8')
f.write('[%s]\ntime: \ndevice: \npersons:1 \n\n>1ELL Beispielflussigkeit \nkochen ' %self._newfileValue)
f.close()
subprocess.call(["xdg-open", "Rezepte/%s.txt"%(self._newfileValue.lower())])
def openfile(name):
f = codecs.open("%s"%(name),'r',encoding='utf-8')
line = f.readline()
titleRegex = re.compile('\[([\w\s\',-]+)\]', re.UNICODE)
match = titleRegex.match(line)
title = match.group(1).strip().encode('utf-8')
f.close()
return title
#should safe all things set im the GUI, does not save namechanges
def save(self):
persons = self._persons.value.encode('utf-8')
specpers = self._filelist.value
sumtitle = self.sumtitle
for title,pers in zip(sumtitle,specpers):
pers[0]=title
data=[]
data.append('[persons]%s\n'%persons)
for pers in specpers:
if pers[1] != '':
data.append('[%s]%s\n'%(pers[0],pers[1]))
with open('persons.txt', 'w') as file:
file.writelines(data)
file.close()
data = []
for pers in specpers:
if pers[2] != '':
data.append('[%s]\n'%(pers[0]))
with open('compile.txt', 'w') as file:
file.writelines(data)
file.close()
#reads persons.txt, where person settings are saved
def personnr(title):
f = codecs.open('persons.txt', 'r', encoding='utf-8')
setPersonRegex = re.compile('\[([\w\s\',-]+)\]([0-9]+)', re.UNICODE)
while True:
line = f.readline()
line = line.split("#")[0] #discad comments
if line == '':
break
if setPersonRegex.match(line) != None:
match = setPersonRegex.match(line)
if match != None:
if match.group(1).strip().encode('utf-8') == title:
persons = match.group(2).strip().encode('utf-8')
elif match.group(1) == 'persons':
persons = ''
else :
continue
continue
else:
continue
return persons
#reads compile.txt, in which the compilation settings will be saved
def compil(name):
f = codecs.open('compile.txt', 'r', encoding='utf-8')
compileRegex = re.compile('\[([\w\s\',-]+)\]', re.UNICODE)
while True:
line = f.readline()
line = line.split("#")[0] #discad comments
match = compileRegex.match(line)
comp = ''
if line == '':
break
if match != None:
if match.group(1).strip().encode('utf-8') == '%s'%name:
comp = 1
break
continue
else:
continue
return comp
#Execute the application
if __name__ == "__main__": pyforms.startApp( Rezepte )
|
EnEff-BIM/EnEffBIM-Framework | MapAPI/mapapi/molibs/MSL/Blocks/Sources/BooleanPulse.py | Python | mit | 694 | 0 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 12:10:45 2015
@author: pre
"""
import random
import mapapi.MapClasses as MapHierarchy
class Boolean | Pulse(MapHierarchy.MapComponent):
"""Representation of AixLib.Fluid.Movers.Pump
"""
def init_me(self):
self.target_location = "Modelica.Blocks.Sources.BooleanPulse"
self.target_name = "booleanPulse"+str(random.randint(1, 100))
self.width = self.add_parameter(name="width", value=50)
self.period = self.add_parameter(name="period", value=None)
self.startTime = self.add_paramet | er(name="startTime", value=0)
self.y = self.add_connector(name='y', type='Boolean')
return True
|
JanlizWorldlet/FeelUOwn | src/base/utils.py | Python | mit | 1,039 | 0.000962 | # -*- coding:utf-8 -*-
import platform
imp | ort asyncio
import json
from base.logger import LOG
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
def func_coroutine(func):
"""make the decorated function run in EventLoop
"""
def wrapper(* | args, **kwargs):
LOG.debug("In func_coroutine: before call ")
LOG.debug("function name is : " + func.__name__)
APP_EVENT_LOOP = asyncio.get_event_loop()
APP_EVENT_LOOP.call_soon(func, *args)
LOG.debug("In func_coroutine: after call ")
return wrapper
def write_json_into_file(data_json, filepath):
try:
with open(filepath, "w") as f:
data_str = json.dumps(data_json, indent=4)
f.write(data_str)
return True
except Exception as e:
LOG.error(str(e))
LOG.error("Write json into file failed")
return False |
unicon-pte-ltd/funnel | funnel/testing.py | Python | apache-2.0 | 4,075 | 0.002209 | # -*- coding: utf-8 -*-
#
# Copyright 2013 Unicon Pte. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
try:
from discover import DiscoveringTestLoader
except ImportError:
from unittest import TestLoader as DiscoveringTestLoader
from funnel.queue import Manager
from optparse import OptionParser
import sys
from tornado.ioloop import IOLoop
from tornado.tes | ting import AsyncTestCase
try:
from unittest2 import TextT | estRunner
except ImportError:
from unittest import TextTestRunner
import types
HOST = '127.0.0.1'
class AsyncWorkerTestCase(AsyncTestCase):
def setUp(self):
super(AsyncWorkerTestCase, self).setUp()
self.publisher = self.get_publisher()
self.publisher.connect(host=HOST)
self.publisher.start_consuming(self.stop, no_ack=True)
self._worker = self.get_worker()
self._worker.start(rpc=True, host=HOST)
def get_publisher(self):
return Manager()
def get_worker(self):
raise NotImplementedError()
def get_new_ioloop(self):
return IOLoop.current()
def publish(self, message, **kwargs):
if "routing_key" not in kwargs:
kwargs["routing_key"] = self._worker.queue_name
self.publisher.call(message, **kwargs)
return self.wait()
def doCleanups(self):
self._worker.destruct()
self.publisher.close_connection()
super(AsyncWorkerTestCase, self).doCleanups()
# Copied from discover.py https://pypi.python.org/pypi/discover
if hasattr(types, 'ClassType'):
class_types = (types.ClassType, type)
else:
class_types = type
def _do_discovery(argv, verbosity, Loader):
parser = OptionParser()
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='funnel.tests',
help="Directory to start discovery ('funnel.tests' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='*.py',
help="Pattern to match tests ('*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
opts, args = parser.parse_args(argv)
if len(args) > 3:
_usage_exit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(opts, name, value)
if opts.verbose:
verbosity = 2
start_dir = opts.start
pattern = opts.pattern
top_level_dir = opts.top
loader = Loader()
return loader.discover(start_dir, pattern, top_level_dir), verbosity
def _run_tests(tests, testRunner, verbosity, exit):
if isinstance(testRunner, class_types):
try:
testRunner = testRunner(verbosity=verbosity)
except TypeError:
# didn't accept the verbosity argument
testRunner = testRunner()
result = testRunner.run(tests)
if exit:
sys.exit(not result.wasSuccessful())
return result
def main(argv=None, testRunner=None, testLoader=None, exit=True, verbosity=1):
if testLoader is None:
testLoader = DiscoveringTestLoader
if testRunner is None:
testRunner = TextTestRunner
if argv is None:
argv = sys.argv[1:]
tests, verbosity = _do_discovery(argv, verbosity, testLoader)
return _run_tests(tests, testRunner, verbosity, exit)
|
caterinaurban/Lyra | src/lyra/unittests/numerical/interval/forward/indexing3/list_of_range1.py | Python | mpl-2.0 | 104 | 0.009615 |
L: List[in | t] = list(range(10))
# FINAL: L -> 0@[0, | 0], 1@[1, 1], 2@[2, 2], _@[3, 9]; len(L) -> [10, 10] |
baskiotisn/soccersimulator | soccersimulator/events.py | Python | gpl-2.0 | 1,862 | 0.004834 | # -*- coding: utf-8 -*-
class Events(object):
def __init__(self):
for e in self.__events__:
self.__getattr__(e)
def __getattr__(self, name):
if hasattr(self.__class__, '__events__'):
assert name in self.__class__.__events__, \
"Event '%s' is not declared" % name
self.__dict__[name] = ev = _EventSlot(name)
return ev
def __str__(self):
return 'Events :' + str(list(self))
__repr__ = __str__
def __len__(self):
if len(self.__dict__) != 0:
return len(self.__dict__.values()[0])
return 0
def __iter__(self):
def gen(dictitems=self.__dict__.items()):
for attr, val in dictitems:
if isinstance(val, _EventSlot):
yield val
return gen()
class _EventSlot(object):
def __init__(self, name):
self.targets = []
self.__name__ = name
def __repr__(self):
return self.__name__
def __call__(self, *a, **kw):
return [f(*a, **kw) for f in self.targets]
def __iadd__(self, f):
self.targets.append(f)
return self
def __isub__(self, f):
while f in self.targets: self.targets.remove(f)
return self
def __len__(self):
return len(self.targets)
class SoccerEvents(Events):
__events__ = ('begin_match', 'begin_round', 'update_round | ', 'end_round', 'end_match', 'is_ready','send_strategy')
def __iadd__(self, f):
for e in self:
try:
e += getattr(f, str(e))
except:
pass
return self
def __isub__(self, f):
for e in self:
try:
| while getattr(f, str(e)) in e.targets: e.targets.remove(getattr(f, str(e)))
except:
pass
return self
|
lgbouma/astrobase | astrobase/lcmodels/__init__.py | Python | mit | 763 | 0.002621 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# lcmodels - Waqas Bhatti (wbhatti@astro.princeton.edu) - Oc | t 2017
# License: MIT. See the LICENSE file for more details.
'''This contains various light curve models for variable stars. Useful for
first order fits to distinguish between variable types, and for generating these
variables' light curves for a recovery simulation.
- :py:mod:`astrobase.lcmodels.transits`: trapezoid-shaped planetary transit
light curves.
- :py:mod:`astrobase.lcmodels.eclipses`: double inverted-gaussian shaped
eclipsing binary light curves.
- :py:m | od:`astrobase.lcmodels.flares`: stellar flare model from Pitkin+ 2014.
- :py:mod:`astrobase.lcmodels.sinusoidal`: sinusoidal light curve generation for
pulsating variables.
'''
|
DevicePilot/synth | synth/analysis/mergejson.py | Python | mit | 3,010 | 0.002658 | """
MERGEJSON - command-line utility
Given a set of .json files, this merges them in time order.
Each file must contain a JSON list of messages, of the form:
[
{ "$id" : 123, "$ts" : 456, "other" : "stuff" },
{ "$id" : 234, "$ts" : 567, "more" : "stuff" }
]
Each message is a dict containing at least a $id and $ts field, already sorted by rising $ts.
One line p | er message.
"""
#
#
# Copyright (c) 2019 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Softwa | re"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, os, glob
import json
import json_inc
import logging
def merge_json_files(file_list, output_filestem):
out = json_inc.Writer(output_filestem)
logging.info("Merging files "+str(file_list))
# Open all files
inc_files = []
for f in file_list:
inc_files.append(json_inc.Reader(f))
# Scan along files, consuming them in time order
while True:
# Reached end of all files?
something_to_do = False
for f in inc_files:
if not f.at_eof():
something_to_do = True
if not something_to_do:
break
# First earliest timestamp
earliest_ts = None
earliest_file = None
for f in inc_files:
if not f.at_eof(): # Haven't reached the end of this file
if earliest_ts is None:
# logging.info(str(f.props))
earliest_ts = f.props["$ts"]
earliest_file = f
else:
if f.props["$ts"] < earliest_ts:
earliest_ts = f.props["$ts"]
earliest_file = f
out.write(earliest_file.props)
earliest_file.consume_row()
out.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# Merge all the CSV files together
files = []
for f in sys.argv[1:]:
files.extend(glob.glob(f))
merge_json_files(files, "merge")
|
minimaxir/keras-cntk-docker | lstm_benchmark.py | Python | mit | 2,306 | 0.000434 | '''Compare LSTM implementations on the IMDB sentiment classification task.
implementation=0 preprocesses input to the LSTM which typically results in
faster computations at the expense of increased peak memory usage as the
preprocessed input must be kept in memory.
implementation=1 does away with the preprocessing, meaning that it might take
a little longer, but should require less peak memory.
implementation=2 concatenates the input, output and forget gate's weights
into one, large matrix, resulting in faster computation time as the GPU can
utilize more cores, at the expense of reduced regularization because the same
dropout is shared across the gates.
Note that the relative performance of the different implementations can
vary depending on your device, your model and the size of your data.
'''
import time
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Embedding, Dense, LSTM, Dropout
from keras.datasets import imdb
max_features = 20000
max_length = 80
embedding_dim = 256
batch_size = 128
epochs = 10
modes = [0, 1, 2]
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)
X_train = sequence.pad_sequences(X_train, max_length)
X_test = sequence.pad_sequences(X_test, max_length)
# Compile and train different models while meauring performance. |
results = []
for mode in modes:
print('Testing mode: implementation={}'.f | ormat(mode))
model = Sequential()
model.add(Embedding(max_features, embedding_dim,
input_length=max_length))
model.add(Dropout(0.2))
model.add(LSTM(embedding_dim,
dropout=0.2,
recurrent_dropout=0.2,
implementation=mode))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
start_time = time.time()
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test))
average_time_per_epoch = (time.time() - start_time) / epochs
results.append((history, average_time_per_epoch))
print(results) |
taylorhutchison/ShapefileReaderPy | ShapefileIndexReader.py | Python | mit | 1,778 | 0.004499 | """
This script exposes a class used to read the Shapefile Index format
used in conjunction with a shapefile. The Index file gives the record
number and content length for every record stored in the main shapefile.
This is useful if you need to extract | specific features from a shapefile
without reading the entire file.
How to use:
from ShapefileIndexReader import ShapefileIndex
shx = ShapefileIndex(Path/To/index.shx)
shx.read()
The 'shx' object will expose three properties
1) Path - the path given to the shapefile, if it exists
2) Offsets - an array of byte offsets for each recor | d in the main shapefile
3) Lengths - an array of 16-bit word lengths for each record in the main shapefile
"""
import os
__author__ = 'Sean Taylor Hutchison'
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = 'Sean Taylor Hutchison'
__email__ = 'seanthutchison@gmail.com'
__status__ = 'Development'
class ShapefileIndex:
Records = []
def __bytes_to_index_records(self,file_bytes):
file_length = len(file_bytes)
num_records = int((file_length - 100) / 8)
for record_counter in range(0,num_records):
byte_position = 100 + (record_counter * 8)
offset = int.from_bytes(file_bytes[byte_position:byte_position+4], byteorder='big')
length = int.from_bytes(file_bytes[byte_position+4:byte_position+8], byteorder='big')
self.Records.append([offset,length])
def read(self):
with open(self.Path, 'rb') as shpindex:
self.__bytes_to_index_records(shpindex.read())
def __init__(self, path=None):
if path and os.path.exists(path) and os.path.splitext(path)[1] == '.shx':
self.Path = path
else:
raise FileNotFoundError |
amaxwell/datatank_py | datatank_py/DTStructuredMesh2D.py | Python | bsd-3-clause | 4,176 | 0.010297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
from datatank_py.DTStructuredGrid2D import DTStructuredGrid2D, _squeeze2d
import numpy as np
class DTStructuredMesh2D(object):
"""2D structured mesh object.
This class corresponds to DataTank's DTStructuredMesh2D.
"""
dt_type = ("2D Structured Mesh",)
"""Type strings allowed by DataTank"""
def __init__(self, values, grid=None):
"""
:param values: 2D array of values
:param grid: DTStructuredGrid2D object (defaults to unit grid) or the name of a previously saved grid
Note that the values array must be ordered as (y, x) for compatibility
with the grid and DataTank.
"""
super(DTStructuredMesh2D, self).__init__()
values = _squeeze2d(values)
shape = np.shape(values)
assert len(shape) == 2, "values array must be 2D"
if isinstance(grid, basestring) == False:
if grid == None:
grid = DTStructuredGrid2D(range(shape[1]), range(shape[0]))
assert shape == grid.shape(), "grid shape %s != value shape %s" % (grid.shape(), shape)
self._grid = grid
self._values = values
def grid(self):
""":returns: a :class:`datatank_py.DTStructuredGrid2D.DTStructuredGrid2D` instance"""
return self._grid
def values(self):
""":returns: a 2D numpy array of values at each grid node"""
return self._values
def __dt_type__(self):
return "2D Structured Mesh"
def __str__(self):
return self.__dt_type__() + ":\n " + str(self._grid) + "\n" + " Values:\n " + str(self._values)
def __dt_write__(self, datafile, name):
datafile.write_anonymous(self._grid, name)
datafile.write_anonymous(self._values, name + "_V")
def write_with_shared_grid(self, datafile, name, grid_name, time, time_index):
"""Allows saving a single grid and sharing it amongst d | ifferent time
values of a variable.
:param datafile: a :class:`datatank_py.DTDataFile.DTDataFile` open for writing
:param name: the mesh variable's name
:param grid_name: the grid name to be shared (will not be visible in DataTank)
:param time: the time value for this step (DataTank's ``t`` variable)
:param time_index: the corresponding integer index of this time step
This is an advanced technique, but it can give | a significant space savings in
a data file. It's not widely implemented, since it's not clear yet if this
is the best API.
"""
if grid_name not in datafile:
datafile.write_anonymous(self._grid, grid_name)
datafile.write_anonymous(self.__dt_type__(), "Seq_" + name)
varname = "%s_%d" % (name, time_index)
datafile.write_anonymous(grid_name, varname)
datafile.write_anonymous(self._values, varname + "_V")
datafile.write_anonymous(np.array((time,)), varname + "_time")
@classmethod
def from_data_file(self, datafile, name):
grid = DTStructuredGrid2D.from_data_file(datafile, name)
values = datafile[name + "_V"]
return DTStructuredMesh2D(values, grid=grid)
if __name__ == '__main__':
from DTDataFile import DTDataFile
with DTDataFile("test/structured_mesh2D.dtbin", truncate=True) as df:
xvals = np.exp(np.array(range(18), dtype=np.float) / 5)
yvals = np.exp(np.array(range(20), dtype=np.float) / 5)
grid = DTStructuredGrid2D(xvals, yvals)
values = np.zeros(len(xvals) * len(yvals))
for i in xrange(len(values)):
values[i] = i
# DataTank indexes differently from numpy; the grid is z,y,x ordered
values = values.reshape(grid.shape())
mesh = DTStructuredMesh2D(values, grid=grid)
df["2D mesh"] = mesh
|
xfleckx/BeMoBI_Tools | analytics/BeMoBI_PyAnalytics/BeMoBI_PyAnalytics.py | Python | mit | 682 | 0.013196 | import os
import pandas as pd
import seaborn as sns
dataDir = '..\\Test_Data\\'
pilotMarkerDataFile = 'Pilot.csv'
df = pd.read_csv( dataDir + '\\' + pilotMarkerDataFile,sep='\t', engine='python')
repr(df.head())
# TODO times per position
# plotting a heatmap http://stanford.edu/~mwaskom/software/seaborn/examples/many_pairwise_correlations.html
## Generate a custom diverging colormap
#cmap = sns.diverging_palett | e(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
#sns.heatmap(timesAtPositions, mask=mask, cmap=cmap, vmax=.3,
# square=True, xticklabels=5, yticklabels=5,
# | linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) |
thenenadx/forseti-security | google/cloud/security/common/gcp_api/bigquery.py | Python | apache-2.0 | 4,496 | 0.000222 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE- | 2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for the BigQuery API client."""
import gflags as flags
from ratelimiter import RateLimiter
from google.cloud.security.common.gcp_api import _base_clie | nt
from google.cloud.security.common.util import log_util
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
FLAGS = flags.FLAGS
flags.DEFINE_integer('max_bigquery_api_calls_per_100_seconds', 17000,
'BigQuery Discovery requests per 100 seconds.')
LOGGER = log_util.get_logger(__name__)
class BigQueryClient(_base_client.BaseClient):
"""BigQuery Client manager."""
API_NAME = 'bigquery'
# TODO: Remove pylint disable.
# pylint: disable=invalid-name
DEFAULT_QUOTA_TIMESPAN_PER_SECONDS = 100
# pylint: enable=invalid-name
def __init__(self):
super(BigQueryClient, self).__init__(
api_name=self.API_NAME)
self.rate_limiter = self.get_rate_limiter()
def get_rate_limiter(self):
"""Return an appropriate rate limiter."""
return RateLimiter(FLAGS.max_bigquery_api_calls_per_100_seconds,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
def get_bigquery_projectids(self):
"""Request and page through bigquery projectids.
Returns: A list of project_ids enabled for bigquery.
['project-id',
'project-id',
'...']
If there are no project_ids enabled for bigquery an empty list will
be returned.
"""
key = 'projects'
bigquery_projects_api = self.service.projects()
request = bigquery_projects_api.list()
paged_results = self._build_paged_result(
request, bigquery_projects_api, self.rate_limiter)
flattened_result = self._flatten_list_results(paged_results, key)
project_ids = []
for result in flattened_result:
project_ids.append(result.get('id'))
return project_ids
def get_datasets_for_projectid(self, project_id):
"""Return BigQuery datasets stored in the requested project_id.
Args:
project_id: String representing the project id.
Returns: A list of datasetReference objects for a given project_id.
[{'datasetId': 'dataset-id',
'projectId': 'project-id'},
{...}]
"""
key = 'datasets'
bigquery_datasets_api = self.service.datasets()
request = bigquery_datasets_api.list(projectId=project_id, all=True)
paged_results = self._build_paged_result(
request, bigquery_datasets_api, self.rate_limiter)
flattened_result = self._flatten_list_results(paged_results, key)
datasets = []
for result in flattened_result:
datasets.append(result.get('datasetReference'))
return datasets
def get_dataset_access(self, project_id, dataset_id):
"""Return the access portion of the dataset resource object.
Args:
project_id: String representing the project id.
dataset_id: String representing the dataset id.
Returns: A list of access lists for a given project_id and dataset_id.
[{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'OWNER', 'specialGroup': 'projectOwners'},
{'role': 'OWNER', 'userByEmail': 'user@domain.com'},
{'role': 'READER', 'specialGroup': 'projectReaders'}]
"""
key = 'access'
bigquery_datasets_api = self.service.datasets()
request = bigquery_datasets_api.get(projectId=project_id,
datasetId=dataset_id)
paged_results = self._build_paged_result(
request, bigquery_datasets_api, self.rate_limiter)
return self._flatten_list_results(paged_results, key)
|
sander76/home-assistant | tests/components/template/test_select.py | Python | apache-2.0 | 8,099 | 0.00037 | """The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN | as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from home | assistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_templates_with_entities(hass, calls):
"""Test tempalates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "Hello Name",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options):
"""Verify select's state."""
state = hass.states.get(_TEST_SELECT)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
|
deliveryhero/lymph-sqlalchemy | lymph/sqlalchemy/utils.py | Python | apache-2.0 | 878 | 0 |
class BulkInsert(object):
"""
Usage:
user_insert = BulkInsert(session)
address_insert = BulkInsert(session, dependencies=[user_insert])
for user in users:
user_insert.add(user)
from address in user_addresses:
address_insert.add(address)
address_insert.flush()
"""
|
def __init__(self, session, count=250, depe | ndencies=None):
self.session = session
self.count = count
self._objects = []
self.dependencies = dependencies or []
def add(self, obj):
self._objects.append(obj)
if len(self._objects) >= self.count:
self.flush()
def flush(self):
for dependency in self.dependencies:
dependency.flush()
self.session.bulk_save_objects(self._objects)
self.session.flush()
self._objects = []
|
RoozbehFarhoodi/McNeuron | data_transforms.py | Python | mit | 6,699 | 0.000149 | """Collection of useful data transforms."""
# Imports
import numpy as np
from McNeuron import Neuron
import scipy
import scipy.linalg # SciPy Linear Algebra Library
from numpy.linalg import inv
def get_leaves(nodes, parents):
"""
Compute the list of leaf nodes.
Parameters
----------
nodes: list
list of all nodes in the tree
parents: list
list of parents for each node
Returns
-------
leaves: list
sorted list of leaf nodes
"""
leaves = np.sort(list(set(nodes) - set(parents)))
return leaves
def encode_prufer(parents, verbose=0):
"""
Convert the parents sequence to a prufer sequence.
Parameters
----------
parents: list
list of parents for each node
verbose: bool
default is False
Returns
-------
prufer: list
corresponding prufer sequence
"""
n_nodes = len(parents)
nodes = range(n_nodes)
prufer = list()
for n in range(n_nodes - 2) | :
# Recalculate a | ll the leaves
leaves = get_leaves(nodes, parents)
if verbose:
print 'leaves', leaves
# Add the parent of the lowest numbered leaf to the sequence
leaf_idx = np.where(nodes == leaves[0])[0][0]
prufer.append(parents[leaf_idx])
if verbose:
print 'prufer', prufer
# Remove the lowest numbered leaf and its corresponding parent
del nodes[leaf_idx]
del parents[leaf_idx]
if verbose:
print 'nodes', nodes
print 'parents', parents
print 60*'-'
return prufer
def decode_prufer(prufer, verbose=0):
"""
Convert the prufer sequence to a parents sequence.
Parameters
----------
prufer: list
prufer sequence
verbose: bool
default is False
Returns
-------
parents: list
corresponding list of parents for each node
"""
n_nodes = len(prufer) + 2
n_prufer = len(prufer)
nodes = range(n_nodes)
parents = -1 * np.ones(n_nodes)
for n in range(n_prufer):
if verbose:
print nodes
print prufer
leaves = list(get_leaves(nodes, prufer))
k = leaves[0]
j = prufer[0]
if k == 0:
k = leaves[1]
if verbose:
print k, j
parents[k] = j
leaf_idx = np.where(nodes == k)[0][0]
del nodes[leaf_idx]
del prufer[0]
if verbose:
print 60*'-'
parents[nodes[1]] = nodes[0]
return list(parents.astype(int))
def reordering_prufer(parents, locations):
"""
Reorder a given parents sequence.
Parent labels < children labels.
Parameters
----------
parents: numpy array
sequence of parents indices
starts with -1
locations: numpy array
n - 1 x 3
Returns
-------
parents_reordered: numpy array
sequence of parents indices
locations_reordered: numpy array
n - 1 x 3
"""
length = len(parents)
# Construct the adjacency matrix
adjacency = np.zeros([length, length])
adjacency[parents[1:], range(1, length)] = 1
# Discover the permutation with Schur decomposition
full_adjacency = np.linalg.inv(np.eye(length) - adjacency)
full_adjacency_permuted, permutation_matrix = \
scipy.linalg.schur(full_adjacency)
# Reorder the parents
parents_reordered = \
np.argmax(np.eye(length) - np.linalg.inv(full_adjacency_permuted),
axis=0)
parents_reordered[0] = -1
# Reorder the locations
locations = np.append([[0., 0., 0.]], locations, axis=0)
locations_reordered = np.dot(permutation_matrix, locations)
return parents_reordered, locations_reordered[1:, :]
def swc_to_neuron(matrix):
"""
Return the Neuron object from swc matrix.
Parameters
----------
matrix: numpy array
numpy array of the size n_nodes*7.
Return
------
Neuron: Neuron
a neuron obj with the given swc format.
"""
return Neuron(file_format='Matrix of swc', input_file=matrix)
def downsample_neuron(neuron,
method='random',
number=30):
"""
Downsampling neuron with different methods.
Parameters
----------
neuron: Neuron
given neuron to subsample.
number: int
the number of subsamling.
method: str
the methods to subsample. It can be: 'random', 'regularize','prune',
'strighten', 'strighten-prune'.
Return
------
Neuron: Neuron
a subsampled neuron with given number of nodes.
"""
if(method == 'random'):
return subsample.random_subsample(neuron, number)
def get_data(neuron_database, method, subsampling_numbers):
"""
Preparing data for the learning.
Parameters
----------
neuron_database: list
the elements of the list are Neuron obj.
method: str
the method to subsample.
subsampling_numbers: array of int
The range of number to subsample.
Returns
-------
data: dic
a dic of two classes: 'morphology' and 'geometry'.
'geometry' is a list of size sampling_division. The i-th element of the
list is an array of size (datasize* n_nodes - 1*3).
'morphology' is a list of size sampling_division. The i-th element of
the list is an array of size (datasize* n_nodes* n_nodes -2).
"""
l = len(neuron_database)
morph = np.zeros([l, subsampling_numbers - 2])
geo = np.zeros([l, subsampling_numbers - 1, 3])
data = dict()
for i in range(l):
sub_neuron = downsample_neuron(neuron=neuron_database[i],
method=method,
number=subsampling_numbers)
par = sub_neuron.parent_index
par[0] = -1
morph[i, :] = encode_prufer(par.tolist())
geo[i, :, :] = sub_neuron.location[:, 1:].T
data['morphology'] = dict()
data['morphology']['n'+str(subsampling_numbers)] = morph
data['geometry'] = dict()
data['geometry']['n'+str(subsampling_numbers)] = geo
return data
def make_swc_from_prufer_and_locations(data):
# the prufer code and the location are given.
parents_code = np.array(decode_prufer(list(data['morphology'])))
location = data['geometry']
M = np.zeros([len(parents_code), 7])
M[:, 0] = np.arange(1, len(parents_code)+1)
M[0, 1] = 1
M[1:, 1] = 2
M[1:, 2:5] = location
parents_code[1:] = parents_code[1:] + 1
M[:, 6] = parents_code
return Neuron(file_format='Matrix of swc', input_file=M)
|
callowayproject/django-snippets | snippets/models.py | Python | apache-2.0 | 321 | 0.012461 | from django.db import models
class Snippet(models.Model):
"""A text snippet. Not meant for use by anyone other than a designer"""
name = m | ode | ls.CharField(max_length=255)
snippet = models.TextField(blank=True)
class Meta:
pass
def __unicode__(self):
return self.snippet
|
Wei1234c/Elastic_Network_of_Things_with_MQTT_and_MicroPython | codes/node/asynch_result.py | Python | gpl-3.0 | 1,312 | 0.007622 | # coding: utf-8
import time
import config_mqtt
class Asynch_result:
def __init__(self, correlation_id, requests, yield_to):
self.correlation_id = correlation_id
self._requests_need_result = requests
self.yield_to = yield_to
def get(self, timeout = config_mqtt.ASYNCH_RESULT_TIMEOUT):
# time.sleep(config_mqtt.ASYNCH_RESULT_WAIT_BEFORE_GET)
start_time = time.time()
request = self._requests_need_result.get(self.correlation_id)
if request:
while True:
current_time = time.time()
if request.get('is_replied'):
| result = request.get('result')
# self._requests_need_result.pop(self.correlation_id)
return result
else:
if current_time - start_time > timeout: # timeout
# self._requests_need | _result.pop(self.correlation_id)
raise Exception('Timeout: no result returned for request with correlation_id {}'.format(self.correlation_id))
else:
self.yield_to()
else:
raise Exception('No such request for request with correlation_id {}'.format(self.correlation_id))
|
yu4u/age-gender-estimation | demo.py | Python | mit | 4,936 | 0.003039 | from pathlib import Path
import cv2
import dlib
import numpy as np
import argparse
from contextlib import contextmanager
from omegaconf import OmegaConf
from tensorflow.keras.utils import get_file
from src.factory import get_model
pretrained_model = "https://github.com/yu4u/age-gender-estimation/releases/download/v0.6/EfficientNetB3_224_weights.11-3.44.hdf5"
modhash = '6d7f7b7ced093a8b3ef6399163da6ece'
def get_args():
parser = argparse.ArgumentParser(description="This script detects faces from web cam input, "
"and estimates age and gender for the detected faces.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--weight_file", type=str, default=None,
help="path to weight file (e.g. weights.28-3.73.hdf5)")
parser.add_argument("--margin", type=float, default=0.4,
help="margin around detected face for age-gender estimation")
parser.add_argument("--image_dir", type=str, default=None,
help="target image directory; if set, images in image_dir are used instead of webcam")
args = parser.parse_args()
return args
def draw_label(image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.8, thickness=1):
size = cv2.getTextSize(label, font, font_scale, thickness)[0]
x, y = point
cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
@contextmanager
def video_capture(*args, **kwargs):
cap = cv2.VideoCapture(*args, **kwargs)
try:
yield cap
finally:
cap.release()
def yield_images():
# capture video
with video_capture(0) as cap:
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
# get video frame
ret, img = cap.read()
if not ret:
raise RuntimeError("Failed to capture image")
yield img
def yield_images_from_dir(image_dir):
image_dir = Path(image_dir)
for image_path in image_dir.glob("*.*"):
img = cv2.imread(str(image_path), 1)
if img is not None:
h, w, _ = img.shape
r = 640 / max(w, h)
yield cv2.resize(img, (int(w * r), int(h * r)))
def main():
args = get_args()
weight_file = args.weight_file
margin = args.margin
image_dir = args.image_dir
if not weight_file:
weight_file = get_file("weights.28-3.73.hdf5", pretrained_model, cache_subdir="pretrained_models",
file_hash=modhash, cache_dir=str(Path(__file__).resolve().parent))
# for face detection
detector = dlib.get_frontal_face_detector()
# load model and weights
model_name, img_size = Path(weight_file).stem.split("_")[:2]
img_size = int(img_size)
cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
model = get_model(cfg)
model.load_weights(weight_file)
image_generator = yield_images_from_dir(image_dir) if image_dir else yield_images()
for img in image_ | generator:
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
# detect faces using dlib detector
detected = detector(input_img, 1)
faces = np.empty((len(dete | cted), img_size, img_size, 3))
if len(detected) > 0:
for i, d in enumerate(detected):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - margin * w), 0)
yw1 = max(int(y1 - margin * h), 0)
xw2 = min(int(x2 + margin * w), img_w - 1)
yw2 = min(int(y2 + margin * h), img_h - 1)
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
# cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
# predict ages and genders of the detected faces
results = model.predict(faces)
predicted_genders = results[0]
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = results[1].dot(ages).flatten()
# draw results
for i, d in enumerate(detected):
label = "{}, {}".format(int(predicted_ages[i]),
"M" if predicted_genders[i][0] < 0.5 else "F")
draw_label(img, (d.left(), d.top()), label)
cv2.imshow("result", img)
key = cv2.waitKey(-1) if image_dir else cv2.waitKey(30)
if key == 27: # ESC
break
if __name__ == '__main__':
main()
|
MatthewJohn/vacker | vacker/media/__init__.py | Python | apache-2.0 | 1,105 | 0.000905 |
from bson.objectid import ObjectId
import datetime
import vacker.database
import vacker.config
class File(object):
def __init__(self, file_id, document=None):
self._id = file_id
self._document = self._get_document() if document is None else document
def _get_document(self):
pass
def get_id(self):
return self._id
def get_path(self):
return self._document['g_path']
def get_mime_type(self):
return self._document['g_mime_type']
def get_checksums(self | ):
return self._document['g_sha1'], self._document['g_sha512']
def delete(self):
database_connection = vacker.database.Database.get_database()
database_connection.media.remove({'_id': ObjectId(self.get_id())})
def _get_thumbnail_path(self):
return '{0}/{1}'.format(
vacker.config.Config.get('THUMBNAIL_DIR | '),
self._document['g_sha512'])
def get(self, attr, default_val=None):
return self._document.get(attr, default_val)
def get_thumbnail(self):
return self._get_thumbnail_path()
|
sputnick-dev/weboob | modules/allrecipes/module.py | Python | agpl-3.0 | 2,008 | 0 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from | weboob.capabilities.recipe import CapRecipe, Recipe
from weboob.tools.backend import Module
from .browser import AllrecipesBrowser
from urllib import quote_plus
__all__ = ['AllrecipesModule']
class AllrecipesModule(Module, CapRecipe):
NAME = 'allrecipes'
MAINT | AINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '1.1'
DESCRIPTION = u'Allrecipes English recipe website'
LICENSE = 'AGPLv3+'
BROWSER = AllrecipesBrowser
def get_recipe(self, id):
return self.browser.get_recipe(id)
def iter_recipes(self, pattern):
return self.browser.iter_recipes(quote_plus(pattern.encode('utf-8')))
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
rec = self.get_recipe(recipe.id)
recipe.picture_url = rec.picture_url
recipe.instructions = rec.instructions
recipe.ingredients = rec.ingredients
recipe.comments = rec.comments
recipe.author = rec.author
recipe.nb_person = rec.nb_person
recipe.cooking_time = rec.cooking_time
recipe.preparation_time = rec.preparation_time
return recipe
OBJECTS = {
Recipe: fill_recipe,
}
|
camilonova/sentry | src/sentry/api/endpoints/team_groups_new.py | Python | bsd-3-clause | 1,628 | 0.001229 | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from rest_framework.response import Response
from sentry.api.base import Endpoint
from sentry.api.permissions import assert_perm
from sentry.api.serializers import serialize
from sentry.models import Group, GroupStatus, Project, Team
class TeamGroupsNewEndpoint(Endpoint):
def get(self, request, team_id):
"""
Return a list of the newest groups for a given team.
The resulting query will find groups which have been seen since the
cutoff date, and then sort those by score, returning the highest scoring
groups first.
"""
team = Team.objects.get_from_cache(
id=team_id,
)
assert_perm(team, request.user, request.auth)
minutes = int(request.REQUEST.get( | 'minutes', 15))
limit = min(100, int(request.REQUEST.get('limit', 10)))
project_list = Project.objects.get_for_user(user=request.user, team=team)
project_dict = dict((p.id, p) for p in project_list)
cutoff = timedelta(minutes=minutes)
| cutoff_dt = timezone.now() - cutoff
group_list = list(Group.objects.filter(
project__in=project_dict.keys(),
status=GroupStatus.UNRESOLVED,
active_at__gte=cutoff_dt,
).extra(
select={'sort_value': 'score'},
).order_by('-score', '-first_seen')[:limit])
for group in group_list:
group._project_cache = project_dict.get(group.project_id)
return Response(serialize(group_list, request.user))
|
softlayer/softlayer-python | SoftLayer/CLI/block/access/password.py | Python | mit | 849 | 0.002356 | """Modifies a password for a volume's access"""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
@click.command()
@click.argument('access_id')
@click.option('--password', '-p', multiple=False, |
help='Password you want to set, this command will fail if the password is not strong')
@environment.pass_env
def cli(env, access_id, password):
"""Changes a password for a volume's access.
access id is the allowed_host_id from slcli block access-list
"""
block_manager = SoftLayer.BlockStorageManager(env.client)
result = block_manager.set_credential_password(access_id=access_id, password=password) |
if result:
click.echo('Password updated for %s' % access_id)
else:
click.echo('FAILED updating password for %s' % access_id)
|
sjschmidt44/django-imager | imagersite/imager_images/migrations/0003_face.py | Python | mit | 822 | 0.002433 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0002_auto_20150730_2342'),
]
operations = [
migrations.CreateModel(
name='Face',
fields=[
('id', models.AutoField(verbose_name='ID | ', serialize=False, auto_created=True, primary_key=True)),
('x', models.IntegerField()),
('y', models.IntegerField()),
('width', models.IntegerField()),
| ('height', models.IntegerField()),
('name', models.CharField(max_length=256)),
('photo', models.ForeignKey(related_name='faces', to='imager_images.Photos')),
],
),
]
|
mozilla/popcorn_maker | vendor-local/lib/python/mock.py | Python | bsd-3-clause | 73,348 | 0.001541 | # mock.py
# Test tools for mocking and patching.
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail | : fuzzyman AT voidspace DOT org DOT uk
# mock 0.8.0
# http://www.voidspace.org.uk/python/mock/
# Released subject to the BSD License
# Please see http://www.voidspace.org.uk/python/license.shtml
# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
# Comments, suggestions and bug reports welcome.
__all__ = (
'Mock',
'MagicMock',
'mocksignature',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR', |
'NonCallableMock',
'NonCallableMagicMock',
)
__version__ = '0.8.0'
import pprint
import sys
try:
import inspect
except ImportError:
# for alternative platforms that
# may not have inspect
inspect = None
try:
from functools import wraps
except ImportError:
# Python 2.4 compatibility
def wraps(original):
def inner(f):
f.__name__ = original.__name__
f.__doc__ = original.__doc__
f.__module__ = original.__module__
return f
return inner
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
try:
next
except NameError:
def next(obj):
return obj.next()
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
inPy3k = sys.version_info[0] == 3
# Needed to work around Python 3 bug where use of "super" interferes with
# defining __class__ as a descriptor
_super = super
self = 'im_self'
builtin = '__builtin__'
if inPy3k:
self = '__self__'
builtin = 'builtins'
FILTER_DIR = True
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
# getsignature and mocksignature heavily "inspired" by
# the decorator module: http://pypi.python.org/pypi/decorator/
# by Michele Simionato
def _getsignature(func, skipfirst):
if inspect is None:
raise ImportError('inspect module not available')
if inspect.isclass(func):
func = func.__init__
# will have a self arg
skipfirst = True
elif not (inspect.ismethod(func) or inspect.isfunction(func)):
func = func.__call__
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
# instance methods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
_msg = ("_mock_ is a reserved argument name, can't mock signatures using "
"_mock_")
assert '_mock_' not in regargs, _msg
if varargs is not None:
assert '_mock_' not in varargs, _msg
if varkwargs is not None:
assert '_mock_' not in varkwargs, _msg
if skipfirst:
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _getsignature2(func, skipfirst, instance=False):
if inspect is None:
raise ImportError('inspect module not available')
if isinstance(func, ClassTypes) and not instance:
try:
func = func.__init__
except AttributeError:
return
skipfirst = True
elif not isinstance(func, FunctionTypes):
# for classes where instance is True we end up here too
try:
func = func.__call__
except AttributeError:
return
try:
regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
except TypeError:
# C function / method, possibly inherited object().__init__
return
# instance methods and classmethods need to lose the self argument
if getattr(func, self, None) is not None:
regargs = regargs[1:]
if skipfirst:
# this condition and the above one are never both True - why?
regargs = regargs[1:]
signature = inspect.formatargspec(regargs, varargs, varkwargs, defaults,
formatvalue=lambda value: "")
return signature[1:-1], func
def _check_signature(func, mock, skipfirst, instance=False):
if not _callable(func):
return
result = _getsignature2(func, skipfirst, instance)
if result is None:
return
signature, func = result
# can't use self because "self" is common as an argument name
# unfortunately even not in the first place
src = "lambda _mock_self, %s: None" % signature
checksig = eval(src, {})
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
#funcopy.__dict__.update(func.__dict__)
funcopy.__module__ = func.__module__
if not inPy3k:
funcopy.func_defaults = func.func_defaults
return
funcopy.__defaults__ = func.__defaults__
funcopy.__kwdefaults__ = func.__kwdefaults__
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original. This is effectively mocksignature2.
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _getsignature2(original, skipfirst, instance)
if result is None:
# was a C function (e.g. object().__init__ ) that can't be mocked
return
signature, func = result
src = "lambda %s: None" % signature
context = {'_mock_': mock}
checksig = eval(src, context)
_copy_func_details(func, checksig)
name = original.__name__
if not _isidentifier(name):
name = 'funcopy'
context = {'checksig': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
checksig(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def mocksignature(func, mock=None, skipfirst=False):
"""
mocksignature(func, mock=None, skipfirst=False)
Create a new function with the same signature as `func` that delegates
to `mock`. If `skipfirst` is True the first argument is skipped, useful
for methods where `self` needs to be omitted from the new function.
If you don' |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/nexus/__init__.py | Python | apache-2.0 | 56 | 0 | fro | m nexus.go_rest_client import GlobusOnlineRestClien | t
|
ppanczyk/ansible | lib/ansible/modules/source_control/git.py | Python | gpl-3.0 | 44,717 | 0.002236 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION | = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) che | ckouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
dest:
required: true
description:
- The path of where the repository should be checked out. This
parameter is required, unless C(clone) is set to C(no).
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case C(refspec) needs
to be specified if the given revision is not already available.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), ensure that "-o StrictHostKeyChecking=no" is
present as an ssh options.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
umask:
required: false
default: null
version_added: "2.2"
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be present in the GPG keyring.
archive:
required: false
version_added: "2.4"
description:
- Specify archive file path with extension. If specified, creates an
archive file of the specified format containing the tree structure
for the source tree.
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"]
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
version: release-0.22
# Example read-write git checkout from github
- git:
repo: ssh://git@github.com/mylogin/hello.git
dest: /home/mylogin/hello
# Example just ensuring the repo checkout exists
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
update: no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
clone: no
update: no
# Example checkout a github repo and use refspec to fetch all pull requests
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
# Example Create git archive from repo
- git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
archive: /tmp/ansible-examples.zip
'''
RETURN = '''
after:
description: last commit revision of the repository retrieved during the update
returned: success
type: string
sample: 4c |
BurtBiel/azure-cli | src/command_modules/azure-cli-network/azure/cli/command_modules/network/zone_file/exceptions.py | Python | mit | 1,506 | 0.015936 | #---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license informatio | n.
#---------------------------------------------------------------------------------------------
#The MIT License (MIT)
#Copyright (c) 2016 Blockstack
#Permission is hereby granted, free of charge, to a | ny person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#pylint: skip-file
class InvalidLineException(Exception):
pass |
dallascard/guac | core/experiment/rerun.py | Python | apache-2.0 | 1,879 | 0.007451 | import experiment
from ..util import dirs
from ..util import file_handling as fh
from optparse import OptionParser
import sys
def main():
usage = "%prog project logfile "
parser = OptionParser(usage=usage)
parser.add_option('-n', dest='new_name', default=None,
help='New name for experiment: default= old name + _rerun')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(opt | ions, args) = parser.parse_args()
project = args[0]
log_filename = args[1]
new_name = options.new_name
log = fh.read_json(log_filename)
if new_name is None:
new_name = log['name'] + '_rerun'
log['name'] = new_name
float_vars = ['best_alpha', 'alpha_exp_base', 'max_alpha_exp', 'min_alpha_exp', 'orig_T', 'tau']
for v in float_vars:
| if v in log:
if log[v] is not None:
log[v] = float(log[v])
else:
log[v] = None
#if log['reuse'] == 'False':
# log['reuse'] = False
#else:
# log['reuse'] = True
# convert list stirng to list
#list_vars = ['feature_list', 'additional_label_files', 'additional_label_weights']
#for v in list_vars:
# if v in log:
# print v
# print log[v]
# quoted_strings = [p.strip() for p in log[v][1:-1].split(',')]
# print quoted_strings
# log[v] = [p[1:-1] for p in quoted_strings]
# print log[v]
# print '\n'
#print log
#if 'additional_label_weights' in log:
# log['additional_label_weights'] = [float(w) for w in log['additional_label_weights']]
dirs.make_base_dir(project)
print log
result = experiment.run_experiment(**log)
print result
if __name__ == '__main__':
main() |
yafraorg/yapki | server-admin/model/user.py | Python | apache-2.0 | 261 | 0 | from typing import List
from pydantic import BaseModel
class UserBase(BaseModel):
email: str
name: str
class UserCreate(UserBase):
password: str
| class User(UserBase):
id: int
is_active: bool
class Config:
| orm_mode = True
|
mindbender-studio/config | polly/plugins/maya/publish/extract_model.py | Python | mit | 2,116 | 0 | import pyblish.api
class ExtractMindbenderModel(pyblish.api.InstancePlugin):
"""Produce a stripped down Maya file from instance
This plug-in takes into account only nodes relevant to models
and discards anything else, especially deformers along with
their intermediate nodes.
"""
label = "Model"
order = pyblish.api.ExtractorOrder
hosts = ["maya"]
families = ["mindbender.model"]
def process(self, instance):
import os
import polly
from maya import cmds
from avalon import maya
dirname = polly.format_staging_dir(
root=instance.context.data["workspaceDir"],
time=instance.context.data["time"],
name=instance.data["name"])
try:
os.makedirs(dirname)
except OSError:
pass
filename = "{name}.ma".format(**instance.data)
path = os.path.join(dirname, filename)
# Perform extraction
self.log.inf | o("Performing extraction..")
with maya.maintained_selection(), maya.without_extension():
self.log.info("Extracting %s" % str(list(instance)))
cmds.select(instance, noExpand=True)
cmds.file(path,
force=True,
typ="mayaAscii",
exportSelected=True,
preserveReferences=False,
# Shader assignment is the responsibility of
# riggers, | for animators, and lookdev, for rendering.
shader=False,
# Construction history inherited from collection
# This enables a selective export of nodes relevant
# to this particular plug-in.
constructionHistory=False)
# Store reference for integration
if "files" not in instance.data:
instance.data["files"] = list()
instance.data["files"].append(filename)
instance.data["stagingDir"] = dirname
self.log.info("Extracted {instance} to {path}".format(**locals()))
|
songmonit/CTTMSONLINE | openerp/addons/base/ir/ir_translation.py | Python | agpl-3.0 | 23,442 | 0.0061 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import tools
import openerp.modules
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
TRANSLATION_TYPE = [
('field', 'Field'),
('model', 'Object'),
('rml', 'RML (deprecated - use Report)'), # Pending deprecation - to be replaced by report!
('report', 'Report/Template'),
('selection', 'Selection'),
('view', 'View'),
('wizard_button', 'Wizard Button'),
('wizard_field', 'Wizard Field'),
('wizard_view', 'Wizard View'),
('xsl', 'XSL'),
('help', 'Help'),
('code', 'Code'),
('constraint', 'Constraint'),
('sql_constraint', 'SQL Constraint')
]
class ir_translation_import_cursor(object):
"""Temporary cursor for optimizing mass insert into ir.translation
Open it (attached to a sql cursor), feed it with translation data and
finish() it in order to insert multiple translations in a batch.
"""
_table_name = 'tmp_ir_translation_import'
def __init__(self, cr, uid, parent, context):
""" Initializer
Store some values, and also create a temporary SQL table to accept
the data.
@param parent an instance of ir.translation ORM model
"""
self._cr = cr
self._uid = uid
self._context = context
self._overwrite = context.get('overwrite', False)
self._debug = False
self._parent_table = parent._table
# Note that Postgres will NOT inherit the constraints or indexes
# of ir_translation, so this copy will be much faster.
cr.execute('''CREATE TEMP TABLE %s(
imd_model VARCHAR(64),
imd_name VARCHAR(128)
) INHERITS (%s) ''' % (self._table_name, self._parent_table))
def push(self, trans_dict):
"""Feed a translation, as a dictionary, into the cursor
"""
params = dict(trans_dict, state="translated" if trans_dict['value'] else "to_translate")
if params['type'] == 'view':
# ugly hack for QWeb views - pending refactoring of translations in master
if params['imd_model'] == 'website':
params['imd_model'] = "ir.ui.view"
# non-QWeb views do not need a matching res_id -> force to 0 to avoid dropping them
elif params['res_id'] is None:
params['res_id'] = 0
self._cr.execute("""INSERT INTO %s (name, lang, res_id, src, type, imd_model, module, imd_name, value, state, comments)
VALUES (%%(name)s, %%(lang)s, %%(res_id)s, %%(src)s, %%(type)s, %%(imd_model)s, %%(module)s,
%%(imd_name)s, %%(value)s, %%(state)s, %%(comments)s)""" % self._table_name,
params)
def finish(self):
""" Transfer the data from the temp table to ir.translation
"""
cr = self._cr
if self._debug:
cr.execute("SELECT count(*) FROM %s" % self._table_name)
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: We have %d entries to process", c)
# Step 1: resolve ir.model.data references to res_ids
cr.execute("""UPDATE %s AS ti
SET res_id = imd.res_id
FROM ir_model_data AS imd
WHERE ti.res_id IS NULL
AND ti.module IS NOT NULL AND ti.imd_name IS NOT NULL
AND ti.module = imd.module AND ti.imd_name = imd.name
AND ti.imd_model = imd.model; """ % self._table_name)
if self._debug:
cr.execute("SELECT module, imd_name, imd_model FROM %s " \
"WHERE res_id IS NULL AND module IS NOT NULL" % self._table_name)
for row in cr.fetchall():
_logger.info("ir.translation.cursor: missing res_id for %s.%s <%s> ", *row)
# Records w/o res_id must _not_ be inserted into our db, because they are
# referencing non-existent data.
cr.execute("DELETE FROM %s WHERE res_id IS NULL AND module IS NOT NULL" % \
self._table_name)
find_expr = "irt.lang = ti.lang AND irt.type = ti.type " \
" AND irt.name = ti.name AND irt.src = ti.src " \
" AND irt.module = ti.module " \
" AND ( " \
" (ti.type NOT IN ('model', 'view')) " \
" OR (ti.type = 'model' AND ti.res_id = irt.res_id) " \
" OR (ti.type = 'view' AND irt.res_id IS NULL) " \
" OR (ti.type = 'view' AND irt.res_id IS NOT NULL AND ti.res_id = irt.res_id)) "
# Step 2: update existing (matching) translations
if self._overwrite:
cr.execute("""UPDATE ONLY %s AS irt
SET value = ti.value,
state = 'translated'
FROM %s AS ti
WHERE %s AND ti.value IS NOT NULL AND ti.value != ''
""" % (self._parent_table, self._table_name, find_expr))
# Step 3: insert new translations
cr.execute("""INSERT INTO %s(name, lang, res_id, src, type, value, module, state, comments)
SELECT name, lang, res_id, src, type, value, module, state, comments
FROM %s AS ti
WHERE NOT EXISTS(SELECT 1 FROM ONLY %s AS irt WHERE %s);
""" % (self._parent_table, self._table_name, self._parent_table, find_expr))
if self._debug:
cr.execute('SELECT COUNT(*) FROM ONLY %s' % self._parent_table)
c1 = cr.fetchone()[0]
cr.execute('SELECT COUNT(*) FROM ONLY %s AS irt, %s AS ti WHERE %s' % \
(self._parent_table, self._table_name, find_expr))
c = cr.fetchone()[0]
_logger.debug("ir.translation.cursor: %d entries now in ir.translation, %d common entries with tmp", c1, c)
# Step 4: cleanup
cr.execute("DROP TABLE %s" % self._table_name)
return True
class ir_translation(osv.osv):
_name = "ir.translation"
_log_access = False
def _get_language(self, cr, uid, context):
lang_model = self.pool.get('res.lang')
lang_ids = lang_model.search(cr, uid, [('translatable', '=', True)], context=context)
lang_data = lang_model.read( | cr, uid, lang_ids, ['code', 'name'], context=context)
return [(d['code'], d['name']) for d in lang_data]
def _get_src(self, cr, uid, ids, name, arg, context=None):
''' Get source name for the translation. If object type is model then
return the value store in db. Otherwise return value store in src field
'''
if context is None:
context = {}
res = dict.fromkeys(ids, False)
| for record in self.browse(cr, uid, ids, context=context):
if record.type != 'model':
res[record.id] = record.src
else:
model_name, field = record.name.split(',')
model = self.pool.get(model_name)
if model is not None:
# Pass context without lang, need to read real stored field, not translation
context_no_lang = dict(context, lang=None)
|
OpenNingia/l5r-character-manager | l5rcm/api/insight.py | Python | gpl-3.0 | 3,390 | 0.003835 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
class InsightCalculator(object):
MODE1 = 1,
MODE2 = 2,
MODE3 = 3
def __init__(self, model):
self.mode = InsightCalculator.MODE1
self.model = model
def calculate(self):
if self.mode == InsightCalculator.MODE1:
return self.insight_calculation_1()
if self.mode == InsightCalculator.MODE2:
return self.insight_calculation_2()
if self.mode == InsightCalculator.MODE3:
return self.insight_calculation_3()
return 0
def calculate_rank(self):
value = self.calculate()
if value > 349:
return int((value - 349)/25 + 10)
if value > 324: return 9
if value > 299: return 8
if value > 274: return 7
if value > 249: return 6
if value > 224: return 5
if value > 199: return 4
if value > 174: return 3
if value > 149: return 2
return 1
def get_current_rank(self):
'''Returns current character Insight Rank'''
last_rank_advancement = model.get_last_rank_advancement()
return last_rank_advancement.rank
def insight_calculation_1(self):
'''Default insight calculation method = Rings*10+Skills+SpecialPerks'''
model = self.model
n = 0
for r, v in model.iter_rings():
n += v *10
for s in model.get_skills():
n += model.get_skill_rank(s)
n += 3*model.cnt_rule('ma_insight_plus_3')
n += 7*model.cnt_rule('ma_insi | ght_plus_7')
return n
def insight_calculation_2(model):
'''Another insight calculation method. Similar to 1, but ignoring
rank 1 skills
'''
model = self.model
n = 0
for r, v in model.iter_rings():
n += v *10
for s in model.get_skills():
sk = model.get_skill_rank(s)
if sk > 1:
n += sk
n += 3*model.cnt_rule('ma_insight_plus_3')
| n += 7*model.cnt_rule('ma_insight_plus_7')
return n
def insight_calculation_3(model):
'''Another insight calculation method. Similar to 2, but
school skill are counted even if rank 1
'''
model = self.model
school_skills = model.get_school_skills()
n = 0
for r, v in model.iter_rings():
n += v *10
for s in model.get_skills():
sk = model.get_skill_rank(s)
if sk > 1 or s in school_skills:
n += sk
n += 3*model.cnt_rule('ma_insight_plus_3')
n += 7*model.cnt_rule('ma_insight_plus_7')
return n |
profxj/xastropy | xastropy/igm/setup_package.py | Python | bsd-3-clause | 206 | 0 | def get_package_data():
# Installs the testing data files. Unable to get package_data
# to deal with a directory hierarchy of files, so just explicitly list.
| return {'xastropy.igm': ['fN/*. | p']}
|
alxgu/ansible | lib/ansible/modules/system/iptables.py | Python | gpl-3.0 | 26,931 | 0.00156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules.If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command should operate on.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with C(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(esp),
C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from I(/etc/protocols) is also allowed.
- A C(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- C(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
flags_set:
description:
- Flags to be set.
type: list
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches make up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an arr | ay and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
default: []
jump:
description:
- This specifie | s the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of host to send the cloned packets.
- This option is only valid when C(jump) is set to C(TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if C(jump) is set to C(LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- S |
cherbib/fofix | src/Svg.py | Python | gpl-2.0 | 7,545 | 0.016965 | #####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
from OpenGL.GL import *
import numpy as np
from numpy import array, float32
import math
import Log
from Texture import Texture
from PIL import Image
#stump: the last few stubs of DummyAmanith.py are inlined here since this
# is the only place in the whole program that uses it now that we've pruned
# the dead SVG code.
class SvgContext(object):
def __init__(self, geometry):
self.geometry = geometry
self.transform = SvgTransform()
self.setGeometry(geometry)
self.setProjection(geometry)
glMatrixMode(GL_MODELVIEW)
def setGeometry(self, geometry = None):
glViewport(geometry[0], geometry[1], geometry[2], geometry[3])
glScalef(geometry[2] / 640.0, geometry[3] / 480.0, 1.0)
def setProjection(self, geometry = None):
geometry = geometry or self.geometry
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(geometry[0], geometry[0] + geometry[2], geometry[1], geometry[1] + geometry[3], -100, 100)
glMatrixMode(GL_MODELVIEW)
self.geometry = geometry
def clear(self, r = 0, g = 0, b = 0, a = 0):
glDepthMask(1)
glEnable(GL_COLOR_MATERIAL)
glClearColor(r, g, b, a)
glClear(GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
class SvgTransform(object):
def __init__(self, baseTransform = None):
self.reset()
if baseTransform is not None:
self.ops = baseTransform.ops[:]
def transform(self, transform):
self.ops.extend(transform.ops)
def reset(self):
self.ops = []
def translate(self, dx, dy):
# The old code did this with a matrix addition, not a multiplication.
# We get the same effect by doing the translations before anything else.
self.ops.insert(0, lambda: glTranslatef(dx, dy, 0))
def rotate(self, angle):
self.ops.append(lambda: glRotatef(math.degrees(angle), 0.0, 0.0, 1.0))
def scale(self, sx, sy):
self.ops.append(lambda: glScalef(sx, sy, 1.0))
def applyGL(self):
for op in self.ops:
op()
class ImgDrawing(object):
def __init__(self, context, ImgData):
self.ImgData = None
self.texture = None
self.context = context
self.cache = None
self.filename = ImgData
# Detect the type of data passed in
if type(ImgData) == file:
self.ImgData = ImgData.read()
elif type(ImgData) | == str:
self.texture = Texture(ImgData)
elif isinstance(ImgData, Image.Image): #stump: let a PIL image be passed in
self.texture = Texture()
self.texture.loadImage(ImgData)
# Make sure we have a valid texture
if not self.texture:
if type(ImgData) == str:
e = "Unable to load texture for %s." % ImgData
else:
| e = "Unable to load texture for SVG file."
Log.error(e)
raise RuntimeError(e)
self.pixelSize = self.texture.pixelSize
self.position = [0.0,0.0]
self.scale = [1.0,1.0]
self.angle = 0
self.color = (1.0,1.0,1.0)
self.rect = (0,1,0,1)
self.shift = -.5
self.createArrays()
def createArrays(self):
self.vtxArray = np.zeros((4,2), dtype=float32)
self.texArray = np.zeros((4,2), dtype=float32)
self.createVtx()
self.createTex()
def createVtx(self):
vA = self.vtxArray #short hand variable casting
#topLeft, topRight, bottomRight, bottomLeft
vA[0,0] = 0.0; vA[0,1] = 1.0
vA[1,0] = 1.0; vA[1,1] = 1.0
vA[2,0] = 1.0; vA[2,1] = 0.0
vA[3,0] = 0.0; vA[3,1] = 0.0
def createTex(self):
tA = self.texArray
rect = self.rect
#topLeft, topRight, bottomRight, bottomLeft
tA[0,0] = rect[0]; tA[0,1] = rect[3]
tA[1,0] = rect[1]; tA[1,1] = rect[3]
tA[2,0] = rect[1]; tA[2,1] = rect[2]
tA[3,0] = rect[0]; tA[3,1] = rect[2]
def convertToTexture(self, width, height):
if self.texture:
return
e = "SVG drawing does not have a valid texture image."
Log.error(e)
raise RuntimeError(e)
def width1(self):
width = self.pixelSize[0]
if width:
return width
else:
return 0
#myfingershurt:
def height1(self):
height = self.pixelSize[1]
if height:
return height
else:
return 0
def widthf(self, pixelw):
width = self.pixelSize[0]
if width:
wfactor = pixelw/width
return wfactor
else:
return 0
def setPosition(self, x, y):
self.position = [x,y]
def setScale(self, width, height):
self.scale = [width, height]
def setAngle(self, angle):
self.angle = angle
def setRect(self, rect):
if not rect == self.rect:
self.rect = rect
self.createTex()
def setAlignment(self, alignment):
if alignment == 0: #left
self.shift = 0
elif alignment == 1:#center
self.shift = -.5
elif alignment == 2:#right
self.shift = -1.0
def setColor(self, color):
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
self.color = color
def draw(self):
glMatrixMode(GL_TEXTURE)
glPushMatrix()
glMatrixMode(GL_PROJECTION)
glPushMatrix()
self.context.setProjection()
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glTranslate(self.position[0], self.position[1], 0.0)
glScalef(self.scale[0], self.scale[1], 1.0)
glRotatef(self.angle, 0, 0, 1)
glScalef(self.pixelSize[0], self.pixelSize[1], 1)
glTranslatef(self.shift, -.5, 0)
glColor4f(*self.color)
glEnable(GL_TEXTURE_2D)
self.texture.bind()
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(self.vtxArray)
glTexCoordPointerf(self.texArray)
glDrawArrays(GL_QUADS, 0, self.vtxArray.shape[0])
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glDisable(GL_TEXTURE_2D)
glPopMatrix()
glMatrixMode(GL_TEXTURE)
glPopMatrix()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
|
rvosa/peyotl | peyotl/test/test_oti.py | Python | bsd-2-clause | 2,232 | 0.006272 | #! /usr/bin/env python
from peyotl.api import OTI
from peyotl.test.support.pathmap import get_test_ot_service_domains
from peyotl.utility import get_logger
import unittest
import os
_LOG = get_logger(__name__)
@unittest.skipIf('RUN_WEB_SERVICE_TESTS' not in os.environ,
'RUN_WEB_SERVICE_TESTS is not in your environment, so tests that use ' \
'Open Tree of Life web services are disabled.')
class TestOTI(unittest.TestCase):
def setUp(self):
d = get_test_ot_service_domains()
self.oti = OTI(d)
def testFindAllStudies(self):
x = self.oti.find_all_studies(verbose=True)
self.assertTrue(len(x) > 0)
self.assertTrue('ot:studyId' in x[0])
def testStudyTerms(self):
t_set = self.oti.study_search_term_set
self.assertTrue(bool(t_set))
r = self.oti.find_studies({'ot:studyPublication': '10.1073/pnas.0709121104'})
self.assertTrue(len(r) > 0)
def testNodeTerms(self):
if self.oti.use_v1:
t_set = self.oti.node_search_term_set
self.assertTrue('ot:ottId' in t_set)
nl = self.oti.find_nodes(ottId=990437)
self.assertTrue(len(nl) > 0)
f = nl[0]
self.assertTrue('matched_trees' in f)
t = f['matched_trees']
self.assertTrue(len(t) > 0)
tr = t[0]
self.assertTrue('matched_nodes' in tr)
n = tr['matched_nodes']
self.assertTrue(len(n) > 0)
def testBadNodeTerms(self):
if self.oti.use_v1:
| qd = {'bogus key': 'Aponogeoton ulvaceus 1 2'}
self.assertRaises(ValueError, self.oti.find_nodes, qd)
def testTreeTerms(self):
qd = {'ot:ottTaxonName': 'Aponogeton ulvaceus'}
if self.oti.use_v1:
nl = self.oti.find_trees(qd)
self.assertTrue(len(nl) > 0)
f = nl[0]
self.assertTrue('matched_trees' in f)
t = f['matched_trees']
| self.assertTrue(len(t) > 0)
def testBadTreeTerms(self):
qd = {'bogus key': 'Aponogeoton ulvaceus 1 2'}
self.assertRaises(ValueError, self.oti.find_trees, qd)
if __name__ == "__main__":
unittest.main(verbosity=5)
|
rvianello/rdkit | Code/DataManip/MetricMatrixCalc/test_list.py | Python | bsd-3-clause | 221 | 0.0181 | tests = [("testExecs/testMatCalc.exe", "", {})]
longTests = []
if __na | me__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| |
jdavisp3/twisted-intro | twisted-server-1/poetry-proxy.py | Python | mit | 3,478 | 0.000288 | # This is the Twisted Poetry Proxy, version 1.0
import optparse
from twisted.internet.defer import Deferred, maybeDeferred
from twisted.internet.protocol import ClientFactory, ServerFactory, Protocol
def parse_args():
usage = """usage: %prog [options] [hostname]:port
This is the Poetry Proxy, version 1.0.
python poetry-proxy.py [hostname]:port
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-server-1/poetry-proxy.py 10000
to proxy the poem for the server running on port 10000.
"""
parser = optparse.OptionParser(usage)
help = "The port to listen on. Default to a random available port."
parser.add_option('--port', type='int', help=help)
help = "The interface to listen on. Default is localhost."
parser.add_option('--iface', help=help, default='localhost')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Provide exactly one server address.')
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return o | ptions, parse_address(args[0])
class PoetryProxyProtocol(Protocol):
def connectionMade(self):
d = maybeDeferred(self.factory.service.get_poem)
d.addCallback(self.transport.write)
d.addBoth(lambda r: self.transport.loseConnection())
class PoetryProxyFactory(ServerFactory):
protocol = PoetryProxyProtocol
def __init__(self, service): |
self.service = service
class PoetryClientProtocol(Protocol):
poem = b''
def dataReceived(self, data):
self.poem += data
def connectionLost(self, reason):
self.poemReceived(self.poem)
def poemReceived(self, poem):
self.factory.poem_finished(poem)
class PoetryClientFactory(ClientFactory):
protocol = PoetryClientProtocol
def __init__(self):
self.deferred = Deferred()
def poem_finished(self, poem):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.callback(poem)
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
class ProxyService(object):
poem = None # the cached poem
def __init__(self, host, port):
self.host = host
self.port = port
def get_poem(self):
if self.poem is not None:
print('Using cached poem.')
return self.poem
print('Fetching poem from server.')
factory = PoetryClientFactory()
factory.deferred.addCallback(self.set_poem)
from twisted.internet import reactor
reactor.connectTCP(self.host, self.port, factory)
return factory.deferred
def set_poem(self, poem):
self.poem = poem
return poem
def main():
options, server_addr = parse_args()
service = ProxyService(*server_addr)
factory = PoetryProxyFactory(service)
from twisted.internet import reactor
port = reactor.listenTCP(options.port or 0, factory,
interface=options.iface)
print('Proxying %s on %s.' % (server_addr, port.getHost()))
reactor.run()
if __name__ == '__main__':
main()
|
xpansa/pmis | analytic_schedule/__init__.py | Python | agpl-3.0 | 984 | 0.001016 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either v | ersion 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General | Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import model |
r3c/cottle | docs/conf.py | Python | mit | 2,382 | 0.00126 | # -*- coding: utf-8 -*-
"""
Resources:
https://pythonhosted.org/an_example_pypi_project/sphinx.html
https://github.com/djungelorm/sphinx-csharp
https://sublime-and-sphinx-guide.readthedocs.io/en/latest/code_blocks.html
https://docutils.sourceforge.net/docs/user/rst/quickref.html
"""
import sys
import os
extensions = [
'sphinx_csharp.csharp',
'sphinx_rtd_theme'
]
# Add any paths | that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cottle Documentation'
copyright = u'2019, Rémi Caput'
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# List of pa | tterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# See: https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes']
html_theme_options = {
'style_external_links': True
}
html_logo = '../res/icon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'CottleDocumentation'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Cottle.tex', u'Cottle Documentation',
u'Cottle', 'manual'),
]
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cottle', u'Cottle Documentation',
[u'Cottle'], 1)
]
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cottle', u'Cottle Documentation',
u'Cottle', 'Cottle', 'Cottle Documentation.',
'Miscellaneous'),
]
|
algorythmic/bash-completion | test/t/unit/test_unit_tilde.py | Python | gpl-2.0 | 1,246 | 0 | import pytest
from conftest import assert_bash_exec
@pytest.mark.bashcomp(cmd=None, ignore_env=r"^\+COMPREPLY=")
class TestUnitTilde:
def test_1(self, bash):
assert_bash_exec(bash, | "_tilde >/dev/null")
def test_2(self, bash):
"""Test environment non-pollution, detected at te | ardown."""
assert_bash_exec(
bash, 'foo() { local aa="~"; _tilde "$aa"; }; foo; unset foo'
)
def test_3(self, bash):
"""Test for https://bugs.debian.org/766163"""
assert_bash_exec(bash, "_tilde ~-o")
def _test_part_full(self, bash, part, full):
res = (
assert_bash_exec(
bash,
'_tilde "~%s"; echo "${COMPREPLY[@]}"' % part,
want_output=True,
)
.strip()
.split()
)
assert res
assert res[0] == "~%s" % full
def test_4(self, bash, part_full_user):
"""~full should complete to ~full unmodified."""
_, full = part_full_user
self._test_part_full(bash, full, full)
def test_5(self, bash, part_full_user):
"""~part should complete to ~full."""
part, full = part_full_user
self._test_part_full(bash, part, full)
|
rgbconsulting/rgb-addons | hw_serial/drivers/serial_driver.py | Python | agpl-3.0 | 2,035 | 0.001474 | # -*- coding: utf-8 -*-
# See README file for full copyright and licensing details.
import os
import logging
_logger = logging.getLogger(__name__)
try:
import serial
except ImportError:
_logger.error('Odoo module hw_serial depends on the pyserial python module')
serial = None
class SerialDriver(object):
def serial_do_operation(self, operation, params):
result = {}
ser = self.serial_open(params)
if ser:
try:
if operation == 'read':
data = ser.readline()
result['data'] = data
else:
data = params.get('data', '')
encoding = params.get('encoding', None)
if encoding:
data = data.decode(encoding)
ser.write(data)
result['status'] = 'ok'
except serial.SerialException, message:
result['status'] = 'error'
result['message'] = str(message)
finally:
ser.close()
else:
result['status'] = 'error'
result['message'] = 'The serial port was not found!'
return r | esult
def serial_open(self, params):
try:
p | ort = params.get('port', '/dev/ttyUSB0')
if not os.path.exists(port):
_logger.error('Serial port not found')
return None
return serial.Serial(port,
baudrate=int(params.get('baudrate', 9600)),
bytesize=int(params.get('bytesize', 8)),
stopbits=int(params.get('stopbits', 1)),
parity=params.get('parity', 'E'),
timeout=float(params.get('timeout', 20)) / 1000,
writeTimeout=float(params.get('timeout', 20)) / 1000)
except Exception as e:
_logger.error(str(e))
return None
|
MarkAWard/optunity | notebooks/sklearn-svr.py | Python | bsd-3-clause | 6,245 | 0.003363 | {
"metadata": {},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"# Example of tuning an SVR model in scikit-learn with Optunity\n",
"# This example requires sklearn\n",
"import math\n",
"import itertools\n",
"import optunity\n",
"import optunity.metrics\n",
"import sklearn.svm\n",
"import matplotlib.pylab as plt\n",
"import time\n",
"\n",
"# CREATE THE TRAINING SET\n",
"from sklearn.datasets import load_diabetes\n",
"diabetes = load_diabetes()\n",
"n = diabetes.data.shape[0]\n",
"\n",
"data = diabetes.data\n",
"targets = diabetes.target\n",
"\n",
"# we will use nested 3-fold cross-validation\n",
"# in the outer cross-validation pmseedure\n",
"# we make the decorator explicitly so we can reuse the same folds\n",
"# in both tuned and untuned approaches\n",
"outer_cv = optunity.cross_validated(x=data, y=targets, num_folds=3)\n",
"\n",
"# compute area under mse curve of default parameters\n",
"def compute_mse_standard(x_train, y_train, x_test, y_test):\n",
" model = sklearn.svm.SVR().fit(x_train, y_train)\n",
" predictions = model.predict(x_test)\n",
" return optunity.metrics.mse(y_test, predictions)\n",
"\n",
"# decorate with cross-validation\n",
"compute_mse_standard = outer_cv(compute_mse_standard)\n",
"mse_standard = compute_mse_standard()\n",
"print('Nested cv mean squared error of non-tuned model: ' + str(mse_standard))\n",
"\n",
"# compute area under mse curve with tuned parameters\n",
"# we use 2x5 fold cross-validation while tuning\n",
"def compute_mse_tuned(x_train, y_train, x_test, y_test):\n",
"\n",
" # define objective function\n",
" @optunity.cross_validated(x=x_train, y=y_train, num_iter=2, num_folds=5)\n",
" def tune_cv(x_train, y_train, x_test, y_test, C, gamma):\n",
" model = sklearn.svm.SVR(C=C, gamma=gamma).fit(x_train, y_train)\n",
" predictions = model.predict(x_test)\n",
" return optunity.metrics.mse(y_test, predictions)\n",
"\n",
" # optimize parameters\n",
" optimal_pars, _, _ = optunity.minimize(tune_cv, 200, C=[0, 10], gamma=[0, 10], pmap=optunity.pmap)\n",
" # if you are running this in IPython, optunity.pmap will not work\n",
" # more info at: https://github.com/claesenm/optunity/issues/8\n",
" # comment out the above line and replace by the one below:\n",
" # optimal_pars, _, _ = optunity.minimize(inner_cv, 150, C=[0, 10], gamma=[0, 0.1])\n",
"\n",
" tuned_model = sklearn.svm.SVR(**optimal_pars).fit(x_train, y_train)\n",
" predictions = tuned_model.predict(x_test)\n",
" return optunity.metrics.mse(y_test, predictions)\n",
"\n",
"# decorate with cross-validation\n",
"compute_mse_tuned = outer_cv(compute_mse_tuned)\n",
"\n",
"t = time.time()\n",
"mse_tuned = compute_mse_tuned()\n",
"diff = time.time() - t\n",
"print('Nested cv mean squared error of tuned model: ' + str(mse_tuned))\n",
"print('Tuning time (approx): ' + str(diff/3) + ' seconds') # we tuned 3 times\n",
"\n",
"\n",
"# generate folds, so we know the indices of test instances at any point\n",
"folds = optunity.generate_folds(data.shape[0], num_folds=3)\n",
"\n",
"# create another cross-validation decorator\n",
"# we will compare nested cross-validation results for both tuned and untuned models\n",
"# to do this, we will perform nested cross-validation but aggregate results using the identity function\n",
"# this will yield the predictions\n",
"outer_cv = optunity.cross_validated(x=data, y=targets, num_folds=3, folds=[folds],\n",
" aggregator=optunity.cross_validation.identity)\n",
"\n",
"def svr_untuned_predictions(x_train, y_train, x_test, y_test):\n",
" model = sklearn.svm.SVR().fit(x_train, y_train)\n",
" return model.predict(x_test).tolist()\n",
"\n",
"\n",
"def svr_tuned_predictions(x_train, y_train, x_test, y_test):\n",
" @optunity.cross_validated(x=x_train, y=y_train, num_iter=2, num_folds=5)\n",
" def tune_cv(x_train, y_train, x_test, y_test, C, gamma):\n",
" model = sklearn.svm.SVR(C=C, gamma=gamma).fit(x_train, y_train)\n",
" predictions = model.predict(x_test)\n",
" return optunity.metrics.mse(y_test, predictions)\n",
"\n",
" optimal_pars, _, _ = optunity.minimize(tune_cv, 200, C=[0, 20],\n",
" gamma=[0, 10], pmap=optunity.pmap)\n",
" tu | ned_model = sklearn.svm.SVR(**optimal_pars).fit(x_train, y_train)\n",
" return tuned_model.predict(x_test).tolist()\n",
"\n",
"svr_untuned_predictions = outer_cv(svr_untuned_predictions)\n",
"svr_tu | ned_predictions = outer_cv(svr_tuned_predictions)\n",
"\n",
"untuned_preds = svr_untuned_predictions()\n",
"tuned_preds = svr_tuned_predictions()\n",
"\n",
"true_targets = [targets[i] for i in itertools.chain(*folds)]\n",
"untuned = list(itertools.chain(*untuned_preds))\n",
"tuned = list(itertools.chain(*tuned_preds))\n",
"\n",
"#for y, u, t in zip(true_targets, untuned, tuned):\n",
"# print(str(y) + ' :: ' + str(u) + ' :: ' + str(t))\n",
"\n",
"print('plotting results')\n",
"\n",
"plt.plot(range(len(true_targets)), sorted(map(lambda x, y: math.fabs(x-y), tuned, true_targets)), 'b')\n",
"plt.plot(range(len(true_targets)), sorted(map(lambda x, y: math.fabs(x-y), untuned, true_targets)), 'r')\n",
"plt.xlabel('k largest error')\n",
"plt.ylabel('absolute error')\n",
"plt.legend(['tuned model', 'default hyperparameters'])\n",
"plt.show()"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
} |
ZuckermanLab/NMpathAnalysis | nmpath/nmm.py | Python | gpl-3.0 | 21,745 | 0.000276 | '''
Created on Jul 28, 2016
'''
import numpy as np
from auxfunctions import map_to_integers, normalize_markov_matrix
from auxfunctions import pops_from_nm_tmatrix, pops_from_tmatrix
from auxfunctions import pseudo_nm_tmatrix, weighted_choice
from mfpt import direct_mfpts, non_markov_mfpts, fpt_distribution
from mfpt import direct_fpts, markov_mfpts
from ensembles import DiscreteEnsemble, DiscretePathEnsemble
from msmtools.estimation import transition_matrix
class NonMarkovModel(DiscreteEnsemble):
'''Non Markovian Model
----------------------
Fits a regular Markov model from a list of 1D trajectories of integers
For example:
trajectories = [ [1 , 2, 0, ...], [2, 2, 1, ...], [3, 1, 2, ...], ...]
If only one sequence is given in trajectories, the format is the same:
trajectories = [ [1 , 2, 0, ...] ]
Parameters
----------
lag_time (integer, default: 1)
Lag time of the model.
sliding_window (boolean)
Use a sliding window of length lag_time to compute the count matrix
stateA, stateB (python lists)
Define the initial and final macrostates in form of python lists
for example: stateA=[0,2,5], stateB = [1]
Attributes
----------
n_states : int
nm_cmatrix: array, with shape (2 n_states, 2 n_states)
Stores the number of transitions between states, the i,j element cij
stores the number of transitions observed from i to j.
populations: array, shape (n_states,)
Equilibrium population, the steady state solution of of the
transition matrix
'''
def __init__(self, trajectories, stateA, stateB,
lag_time=1, clean_traj=False, sliding_window=True,
reversible=True, markovian=False,
coarse_macrostates=False, **kwargs):
if coarse_macrostates:
for traj in trajectories:
for i, _ in enumerate(traj):
if traj[i] in stateA:
traj[i] = stateA[0]
elif traj[i] in stateB:
traj[i] = stateB[0]
stateA = [stateA[0]]
stateB = [stateB[0]]
self._lag_time = lag_time
self.trajectories = trajectories
self.stateA = stateA
self.stateB = stateB
self.sliding_window = sliding_window
self.reversible = reversible
self.markovian = markovian
self.n_variables = 1 # by construction
self.discrete = True # by construction
if (self._lag_time < 1) or (int(self._lag_time) != int(self._lag_time)):
raise ValueError('The lag time should be an integer \
greater than 1')
if clean_traj:
self.n_states = max([max(traj) for traj in self.trajectories]) + 1
else:
self._map_trajectories_to_integers()
# print("The trajectories are being mapped to a (new) "
# "list of integers. See/print the attribute seq_map "
# "for details")
self.fit()
def _map_trajectories_to_integers(self):
# Clean the sequences
seq_map = {}
new_trajs = []
for seq in self.trajectories:
newseq, m_dict = map_to_integers(seq, seq_map)
new_trajs.append(newseq)
self.stateA = [seq_map[i] for i in self.stateA]
self.stateB = [seq_map[i] for i in self.stateB]
self.n_states = len(seq_map)
self.trajectories = new_trajs
self.seq_map = seq_map
def fit(self):
'''Fits the the non-Markovian model from a list of sequences
'''
# Non-Markovian count matrix
nm_cmatrix = np.zeros((2 * self.n_states, 2 * self.n_states))
# Markovian count matrix
markov_cmatrix = np.zeros((self.n_states, self.n_states))
lag = self._lag_time
if not self.sliding_window:
step = lag
else:
step = 1
for traj in self.trajectories:
for start in range(lag, 2 * lag, step):
prev_color = None
for i in range(start, len(traj), lag):
# Color determination
if traj[i] in self.stateA:
color = "A"
elif traj[i] in self.stateB:
color = "B"
else:
color = prev_color
# Count matrix for the given lag time
if prev_color == "A" and color == "B":
nm_cmatrix[2 * traj[i - lag], 2 * traj[i] + 1] += 1.0
elif prev_color == "B" and color == "A":
nm_cmatrix[2 * traj[i - lag] + 1, 2 * traj[i]] += 1.0
elif prev_color == "A" and color == "A":
nm_cmatrix[2 * traj[i - lag], 2 * traj[i]] += 1.0
elif prev_color == "B" and color == "B":
nm_cmatrix[2 * traj[i - lag] + 1, 2 * traj[i] + 1] += 1.0
prev_color = color
markov_cmatrix[traj[i - lag], traj[i]] += 1.0
nm_tmatrix = normalize_markov_matrix(nm_cmatrix)
markov_tmatrix = normalize_markov_matrix(markov_cmatrix, reversible=True)
#markov_tmatrix = transition_matrix(markov_cmatrix, self.reversible)
self.nm_tmatrix = nm_tmatrix
self.nm_cmatrix = nm_cmatrix
self.markov_cmatrix = markov_cmatrix
self.markov_tmatrix = markov_tmatrix
@classmethod
def from_nm_tmatrix(cls, transition_matrix, stateA, stateB,
sim_length=None, initial_state=0):
'''
Generates a discrete ensemble from the transition matrix
'''
if sim_length is None:
raise Exception('The simulation length must be given')
if not isinstance(transition_matrix, np.ndarray):
transition_matrix = np.array(transition_matrix)
n_states = len(transition_matrix)
assert(n_states == len(transition_matrix[0]))
current_state = initial_state
discrete_traj = [initial_state // 2]
for i in range(sim_length):
next_state = weighted_choice([k for k in range(n_states)],
transition_matrix[current_state, | :])
discrete_traj.append(next_state // 2)
current_state = next_state
return cls([np.array(discrete_traj)], stateA, stateB, clean_traj=True)
@property
def | lag_time(self):
return self._lag_time
@lag_time.setter
def lag_time(self, lag_time):
self._lag_time = lag_time
self.fit()
def mfpts(self):
if self.markovian:
return markov_mfpts(self.markov_tmatrix, self.stateA, self.stateB,
lag_time=self._lag_time)
else:
return non_markov_mfpts(self.nm_tmatrix, self.stateA, self.stateB,
lag_time=self._lag_time)
def empirical_mfpts(self):
return direct_mfpts(self.trajectories, self.stateA, self.stateB,
lag_time=self._lag_time)
def empirical_fpts(self):
return direct_fpts(self.trajectories, self.stateA, self.stateB,
lag_time=self._lag_time)
def populations(self):
# In this case the results are going to be the same
if self.markovian:
return pops_from_tmatrix(self.markov_tmatrix)
else:
return pops_from_nm_tmatrix(self.nm_tmatrix)
@property
def popA(self):
pop_A = 0
pops = self.populations()
for i, p in enumerate(pops):
if i in self.stateA:
pop_A += p
return pop_A
@property
def popB(self):
pop_B = 0
pops = self.populations()
for i, p in enumerate(pops):
if i in self.stateB:
pop_B += p
return pop_B
def tmatrixAB(self):
if self.markovian:
return self.markov_tmatrix
matrixAB = []
for i in range(0, 2 * self.n_states, 2):
|
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/selenium/webdriver/remote/remote_connection.py | Python | mit | 17,659 | 0.000963 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import string
import urllib2
import urlparse
from command import Command
import utils
LOGGER = logging.getLogger(__name__)
class Request(urllib2.Request):
"""
Extends the urllib2.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
urllib2.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(urllib2.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""
A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
http://code.google.com/p/selenium/wiki/JsonWireProtocol
"""
def __init__(self, remote_server_addr):
# Attempt to resolve the hostname and get an IP address.
parsed_url = urlparse.urlparse(remote_server_addr)
if parsed_url.hostname:
try:
netloc = socket.gethostbyname(parsed_url.hostname)
if parsed_url.port:
netloc += ':%d' % parsed_url.port
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = urlparse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
except socket.gaierror:
LOGGER.info('Could not get IP address for host: %s' %
parsed_url.hostname)
self._url = remote_server_addr
self._commands = {
Command.NEW_SESSION: ('POST', '/session'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.SET_BROWSER_VISIBLE:
('POST', '/session/$sessionId/visible'),
Command.IS_BROWSER_VISIBLE: ('GET', '/session/$sessionId/visible'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'), |
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$ses | sionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.TOGGLE_ELEMENT:
('POST', '/session/$sessionId/element/$id/toggle'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.HOVER_OVER_ELEMENT:
('POST', '/session/$sessionId/element/$id/hover'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
|
ionelmc/django-uni-form | test_project/urls.py | Python | mit | 605 | 0.008264 | from django.conf.url | s.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
url(r'^$', "test_app.views.basic_test", name='test_index'),
(r'^more/', include('test_app.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin document | ation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
|
mitchellrj/touchdown | touchdown/core/goals.py | Python | apache-2.0 | 3,341 | 0.000299 | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# dist | ributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import os
from . import dependencies, plan, map, errors
from .cache import JSONFileCache
class GoalFactory(object): |
def __init__(self):
self.goals = {}
def register(self, cls):
self.goals[cls.name] = cls
def registered(self):
return self.goals.items()
def create(self, name, workspace, ui, map=map.ParallelMap):
try:
goal_class = self.goals[name]
except KeyError:
raise errors.Error("No such goal '{}'".format(name))
return goal_class(workspace, ui, map=map)
class Goal(object):
execute_in_reverse = False
mutator = False
def __init__(self, workspace, ui, map=map.ParallelMap, cache=None):
self.ui = ui
self.cache = cache
if not self.cache:
self.cache = JSONFileCache(os.path.expanduser('~/.touchdown'))
self.workspace = workspace
self.resources = {}
self.Map = map
@classmethod
def setup_argparse(cls, parser):
pass
def get_plan_order(self):
return dependencies.DependencyMap(self.workspace, tips_first=False)
def get_plan_class(self, resource):
raise NotImplementedError(self.get_plan_class)
def get_plan(self, resource):
if resource not in self.resources:
klass = self.get_plan_class(resource)
plan = klass(self, resource)
plan.validate()
self.resources[resource] = plan
return self.resources[resource]
def get_execution_order(self):
return dependencies.DependencyMap(self.workspace, tips_first=self.execute_in_reverse)
def visit(self, message, dep_map, callable):
with self.ui.progressbar(max_value=len(dep_map)) as pb:
for status in self.Map(self.ui, dep_map, callable):
pb.update(status)
def collect_as_iterable(self, plan_name):
collected = []
def _(resource):
plan = self.get_plan(resource)
if plan.name == plan_name:
collected.append(plan)
self.visit("Building plan...", self.get_plan_order(), _)
return collected
def collect_as_dict(self, plan_name):
collected = {}
def _(resource):
plan = self.get_plan(resource)
if plan.name == plan_name:
collected[plan.resource.name] = plan
self.visit("Building plan...", self.get_plan_order(), _)
return collected
class Describe(Goal):
name = "describe"
def get_plan_class(self, resource):
return resource.meta.plans.get("describe", plan.NullPlan)
goals = GoalFactory()
register = goals.register
registered = goals.registered
create = goals.create
register(Describe)
|
bvernoux/micropython | ports/esp32/boards/manifest_release.py | Python | mit | 283 | 0 | include("manifest.py")
freeze("$(MPY_LIB_DIR)/python-ecosys/urequests", "urequests.py")
freeze("$(MPY_LIB_DIR)/micropython/upysh", "upysh.py")
freeze("$(MPY_LIB_DIR)/micropython/umqtt.simple", "umqt | t/simple.py")
freeze("$(MPY_LIB_DIR)/micropython/umqtt.robust", | "umqtt/robust.py")
|
LouisPlisso/analysis_tools | add_ASN_geoip.py | Python | gpl-3.0 | 2,116 | 0.016541 | """Retrieve As information out of GeoIP (MaxMind) binding.
Beware: database location is hardcoded!"""
import GeoIP
import re
import numpy as np
import INDEX_VALUES
#WARNING: hard coded
GAS=GeoIP.open('/home/louis/streaming/flows/AS/GeoIPASNum.dat',
GeoIP.GEOIP_STANDARD)
#ugly but more efficient: compile only once
REGEXP = re.compile('(AS([0-9]+).*)')
def extend_fields_AS_down(d):
"Extend each line of array considered as list with src IP addresses."
fields = list(d)
src = GAS.org_by_addr(d['srcAddr'])
if src != None:
fields.extend(list(REGEXP.match(src).group(2,1)))
else:
fields.extend([0, 'Not found'])
return tuple(fields)
def extend_array_AS_down(flows_array):
"Return a new array with AS information upstream."
return np.array([extend_fields_AS_down(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_AS_down)
def extend_fields_AS(d):
"Extend each line of array considered as list with both IP addresses."
fields = list(d)
| src = GAS.org_by_addr(d['srcAddr'])
if src != None:
fields.extend(list(REGEXP.match(src).group(2,1)))
else:
fields.extend([0, 'Not found'])
dst = GAS.org_by_addr(d['dstAddr'])
if dst != None:
fields.extend(list(REGEXP.match(dst).group(2,1)))
else:
fields.extend([0, 'Not found'])
return tuple(fields)
de | f extend_array_AS(flows_array):
"Return a new array with AS information on both sides."
return np.array([extend_fields_AS(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_AS)
def extend_array_BGP_AS(flows_array):
"Return a new array with AS information on both sides."
return np.array([extend_fields_AS(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_BGP_AS)
#test_flows=np.loadtxt('test/flows_ftth_nov.head',
#dtype=INDEX_VALUES.dtype_GVB,skiprows=1).view(np.recarray)
#np.array(zip(test_flows,[[GAS.org_by_addr(src),GAS.org_by_addr(dst)]
#for src,dst in zip(test_flows.srcAddr,test_flows.dstAddr)]))
#[(f, GAS.org_by_addr(f['srcAddr']), GAS.org_by_addr(f['dstAddr']))
#for f in test_flows]
|
wanqizhu/mtg-python-engine | MTG/player.py | Python | mit | 26,315 | 0.002356 | import pdb
import traceback
import random
from copy import deepcopy
from collections import defaultdict
from MTG import mana
from MTG import zone
from MTG import play
from MTG import gamesteps
from MTG import cards
from MTG import triggers
from MTG import token
from MTG.exceptions import *
class Player():
is_player = True
is_permanent = False
is_creature = False
is_land = False
is_spell = False
def __init__(self, deck, name='player',
startingLife=20, maxHandSize=7, game=None):
self.name = name
self.game = game
self.timestamp = -1
self.life = startingLife
self.startingLife = startingLife
self.maxHandSize = maxHandSize
self.landPerTurn = 1
self.landPlayed = 0
self.passPriorityUntil = None
self.autoPayMana = False
self.autoOrderTriggers = True
self.autoDiscard = False
self.library = zone.Library(self, deck)
for card in self.library:
card.controller = self
card._owner = self
self.battlefield = zone.Battlefield(self)
self.hand = zone.Hand(self)
self.graveyard = zone.Graveyard(self)
self.exile = zone.Exile(self)
self.mana = mana.ManaPool(self)
self.lost = False
self.won = False
self.pending_triggers = []
# todo: also track "YOUR last turn" rather than just last turn
self.turn_events = defaultdict(lambda: None)
self.last_turn_events = defaultdict(lambda: None)
self.static_effects = []
# tracks which permanents cares about each player-init triggers
# trigger_listeners[condition] = list of (permanent, tstamp), where permanent cares about condition
# and tstamp is stored to check permanent expiration
self.trigger_listeners = defaultdict(lambda: [])
# todo: cost modifier tracker
def __repr__(self):
return 'player.Player(name=%r)' % self.name
def __str__(self):
return self.name
def __eq__(x, y):
return isinstance(y, x.__class__) and x.__repr__() == y.__repr__()
def __hash__(self):
return hash(self.__repr__())
@property
def is_active(self):
return self == self.game.current_player
@property
def opponent(self):
return self.game.opponent(self)
@property
def creatures(self):
return self.battlefield.filter(filter_func=lambda p: p.is_creature)
@property
def lands(self):
return self.battlefield.filter(filter_func=lambda p: p.is_land)
@property
def stack(self):
return self.game.stack if self.game else None
def get_zone(self, zone_type):
return {
zone.ZoneType.LIBRARY: self.library,
zone.ZoneType.HAND: self.hand,
zone.ZoneType.BATTLEFIELD: self.battlefield,
zone.ZoneType.GRAVEYARD: self.graveyard,
zone.ZoneType.STACK: self.game.stack,
zone.ZoneType.EXILE: self.exile,
# zone.ZoneType.COMMAND: self.command
}[zone_type]
def get_action(self):
""" asks the player to do something
this gets called whenever a player has priority
"""
answer = 'placeholder'
_play = None
while answer and _play is None:
answer = self.make_choice(
"What would you like to | do? {}{}, {}\n".format(
self.name,
'*' if self.is_active else '',
self.game.step))
if self.game.test:
print("\t" + self.name + ", " +
| str(self.game.step) + ": " + answer + "\n")
if answer == '':
break
try:
if answer == 'print':
self.game.print_game_state()
elif answer == 'hand':
print(self.hand)
elif answer == 'battlefield':
print(self.battlefield)
elif answer == 'graveyard':
print(self.graveyard)
elif answer == 'exile':
print(self.exile)
elif answer == 'stack':
print(self.game.stack)
elif answer == 'mana':
print(self.mana)
## debug
elif answer == 'addmana':
self.mana.add_str('WWWWWUUUUUBBBBBRRRRRGGGGG11111')
elif answer == 'debug':
pdb.set_trace()
pass
elif answer[:2] == '__': # for dev purposes
exec(answer[2:])
return '__continue'
elif answer[0] == 'p': # playing card from hand
try:
# 'p 3' == plays third card in hand
num = int(answer[2:])
assert num < len(self.hand)
card = self.hand[num]
except:
name = answer[2:] # 'p Island' == plays 'Island'
card = self.hand.get_card_by_name(name)
assert card
# timing & restrictions
can_play = True
if card.is_land and self.landPlayed >= self.landPerTurn:
can_play = False
if not (card.is_instant or card.has_ability('Flash')) and (
self.game.stack
or self.game.step.phase not in [
gamesteps.Phase.PRECOMBAT_MAIN,
gamesteps.Phase.POSTCOMBAT_MAIN]
or not self.is_active):
can_play = False
# choose targets
if can_play:
can_target = card.targets()
# pay mana costs
if can_play and can_target:
can_pay = False
cost = card.manacost
creatures_to_tap = []
if card.has_ability("Convoke"):
untapped_creatures = [
c for c in self.creatures if not c.status.tapped]
print("Your creatures: {}".format(untapped_creatures))
ans = self.make_choice("What creatures would you like to tap"
" to pay for %s? (Convoke) " % card)
ans = ans.split(" ")
for ind in ans:
try:
ind = int(ind)
_creature = untapped_creatures[ind]
if not _creature.status.tapped and _creature not in creatures_to_tap:
color = _creature.characteristics.color
if not color:
color = 'C'
elif len(color) > 1:
color = self.make_choice(
"What color would you like to add? {}".format(color))
assert color in mana.manachr
else:
color = color[0]
color = mana.chr_to_mana(color)
creatures_to_tap.append(_creature)
if cost[color]:
cost[color] -= 1
else:
if cost[mana.Mana.GENERIC]:
cost[mana.Mana.GENERIC] -= 1
else:
raise Va |
zycdragonball/tensorflow | tensorflow/python/kernel_tests/cwise_ops_test.py | Python | apache-2.0 | 79,648 | 0.007546 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_NEG = lambda x: -x
_ABS = abs
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
class UnaryOpTest(test.TestCase):
def _compareCpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
if x.dtype in (np.float32, np.float64):
y = 1.1 * tf_func(inx)
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(np_ans, tf_cpu)
if x.dtype in (np.complex64, np.complex128) and tf_func == math_ops.sign:
return # Return early
if x.dtype == np.float16:
s = list(np.shape(x))
jacob_t, _ = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x)
xf = x.astype(np.float)
inxf = ops.convert_to_tensor(xf)
yf = tf_func(inxf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, yf, s, x_init_value=xf)
jacob_n = jacob_n.astype(np.float16)
self.assertAllClose(jacob_t, jacob_n, rtol=5e-3, atol=5e-3)
elif x.dtype in (np.float32, np.complex64):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype in (np.float64, np.complex128):
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, y, s, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _check(self, result_tensor, result_np, input_sp_t, tol):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
if tol is None:
self.assertAllClose(result_np, result_tensor.values.eval())
else:
self.assertAllClose(
result_np, result_tensor.values.eval(), rtol=tol, atol=tol)
def _compareSparseCpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=False):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareGpu(self, x, np_func, tf_func):
np_ans = np_func(x)
with self.tes | t_session(use_gpu=True):
result = tf_func(ops.convert_to_tensor(x))
tf_gpu = result.eval()
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:
se | lf.assertAllClose(np_ans, tf_gpu)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareSparseGpu(self, x, np_func, tf_func, tol):
x_sp, x_sp_vals = _sparsify(x)
res_np = np_func(x_sp_vals)
with self.test_session(use_gpu=True):
self._check(tf_func(x_sp), res_np, x_sp, tol)
def _compareBoth(self, x, np_func, tf_func):
self._compareCpu(x, np_func, tf_func)
self._compareGpu(x, np_func, tf_func)
def _compareBothSparse(self, x, np_func, tf_func, tol=None):
self._compareSparseCpu(x, np_func, tf_func, tol)
self._compareSparseGpu(x, np_func, tf_func, tol)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def _log_sigmoid(self, x):
return np.log(self._sigmoid(x))
def _replace_domain_error_with_inf(self, fn):
def func(x):
try:
return fn(x)
except ValueError as e:
if "domain error" in str(e):
return np.inf * np.ones_like(x)
else:
raise e
return func
def testFloatBasic(self):
x = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32)
w = x - x.min() + 1.01 # all greater than 1
y = (x + .5).astype(np.float32) # no zero
z = (x + 15.5).astype(np.float32) # all positive
k = np.arange(-0.90, 0.90, 0.25).astype(np.float32) # between -1 and 1
self._compareBoth(x, np.abs, math_ops.abs)
self._compareBoth(x, np.abs, _ABS)
self._compareBoth(x, np.negative, math_ops.negative)
self._compareBoth(x, np.negative, _NEG)
self._compareBoth(y, self._inv, math_ops.reciprocal)
self._compareBoth(x, np.square, math_ops.square)
self._compareBoth(z, np.sqrt, math_ops.sqrt)
self._compareBoth(z, self._rsqrt, math_ops.rsqrt)
self._compareBoth(x, np.exp, math_ops.exp)
self._compareBoth(x, np.expm1, math_ops.expm1)
self._compareBoth(z, np.log, math_ops.log)
self._compareBoth(z, np.log1p, math_ops.log1p)
self._compareBoth(x, np.sinh, math_ops.sinh)
self._compareBoth(x, np.cosh, math_ops.cosh)
self._compareBoth(x, np.tanh, math_ops.tanh)
self._compareBoth(x, np.arcsinh, math_ops.asinh)
self._compareBoth(w, np.arccosh, math_ops.acosh)
self._compareBoth(k, np.arctanh, math_ops.atanh)
self._compareBoth(x, self._sigmoid, math_ops.sigmoid)
self._compareBoth(x, self._log_sigmoid, math_ops.log_sigmoid)
self._compareBoth(y, np.sign, math_ops.sign)
self._compareBoth(x, np.sin, math_ops.sin)
self._compareBoth(x, np.cos, math_ops.cos)
self._compareBoth(k, np.arcsin, math_ops.asin)
self._compareBoth(k, np.arccos, math_ops.acos)
self._compareBoth(x, np.arctan, math_ops.atan)
self._compareBoth(x, np.tan, math_ops.tan)
self._compareBoth(
y,
np.vectorize(self._replace_domain_error_with_inf(math.lgamma)),
math_ops.lgamma)
self._compareBoth(x, np.vectorize(math.erf), math_ops.erf)
self._compareBoth(x, np.vectorize(math.erfc), math_ops.erf |
JioEducation/edx-platform | common/lib/xmodule/xmodule/tests/test_video.py | Python | agpl-3.0 | 42,276 | 0.001443 | # -*- coding: utf-8 -*-
# pylint: disable=protected-access
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py. You can
search for usages of this in the cms and lms tests for examples. You use
this so that it will do things like point the modulestore setting to mongo,
flush the contentstore before and after, load the templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined
in common/lib/xmodule/xmodule/modulestore/tests/factories.py to create
the course, section, subsection, unit, etc.
"""
import unittest
import datetime
from uuid import uuid4
from lxml import etree
from mock import ANY, Mock, patch
import ddt
from django.conf import settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
from xmodule.video_module import VideoDescriptor, create_youtube_string
from xmodule.video_module.transcripts_utils import download_youtube_subs, save_to_store
from . import LogicTest
from .test_import import DummySystem
SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
sprechen sie deutsch?
1
00:00:02,720 --> 00:00:05,430
Ja, ich spreche Deutsch
'''
CRO_SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
Dobar dan!
1
00:00:02,720 --> 00:00:05,430
Kako ste danas?
'''
YOUTUBE_SUBTITLES = (
"LILA FISHER: Hi, welcome to Edx. I'm Lila Fisher, an Edx fellow helping to put together these"
" courses. As you know, our courses are entirely online. So before we start learning about the"
" subjects that brought you here, let's learn about the tools that you will use to navigate through"
" the course material. Let's start with what is on your screen right now. You are watching a video"
" of me talking. You have several tools associated with these videos. Some of them are standard"
" video buttons, like the play Pause Button on the bottom left. Like most video players, you can see"
" how far you are into this particular video segment and how long the entire video segment is."
" Something that you might not be used to is the speed option. While you are going through the"
" videos, you can speed up or slow down the video player with these buttons. Go ahead and try that"
" now. Make me talk faster and slower. If you ever get frustrated by the pace of speech, you can"
" adjust it this way. Another great feature is the transcript on the side. This will follow along"
" with everything that I am saying as I am saying it, so you can read along if you like. You can"
" also click on any of the words, and you will notice that the video jumps to that word. The video"
" slider at the bottom of the video will let you navigate through the video quickly. If you ever"
" find the transcript distracting, you can toggle the c | aptioning button in order to make it go away"
" or reappear. Now that you know about the video player, I want to point out the sequence navigator."
" Right now you're in a lecture sequence, which interweaves many videos and practice exercises. You"
" can see how far you are in a particular sequence by observing which tab you're on. Yo | u can"
" navigate directly to any video or exercise by clicking on the appropriate tab. You can also"
" progress to the next element by pressing the Arrow button, or by clicking on the next tab. Try"
" that now. The tutorial will continue in the next video."
)
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = SlashSeparatedCourseKey('org', 'course', 'run')
usage_key = course_key.make_usage_key('video', 'SampleProblem')
return system.construct_xblock_from_class(
VideoDescriptor,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
)
# Because of the way xmodule.video_module.video_module imports edxval.api, we
# must mock the entire module, which requires making mock exception classes.
class _MockValVideoNotFoundError(Exception):
"""Mock ValVideoNotFoundError exception"""
pass
class _MockValCannotCreateError(Exception):
"""Mock ValCannotCreateError exception"""
pass
class VideoModuleTest(LogicTest):
"""Logic tests for Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_invalid(self):
"""Ensure that ids that are invalid return an empty dict"""
# invalid id
youtube_str = 'thisisaninvalidid'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# another invalid id
youtube_str = ',::,:,,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# and another one, partially invalid
youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': 'AXdE34_U',
'1.25': 'KLHF9K_Y',
'1.50': 'VO3SxfeD'})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(
VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''}
)
class VideoDescriptorTestBase(unittest.TestCase):
"""
Base class for tests for VideoDescriptor
"""
def setUp(self):
super(VideoDescriptorTestBase, self).setUp()
self.descriptor = instantiate_descriptor()
def assertXmlEqual(self, expected, xml):
"""
Assert that the given XML fragments have the same attributes, text, and
(recursively) children
"""
def get_child_tags(elem):
"""Extract the list of tag names for children of elem"""
return [child.tag for child in elem]
for attr in ['tag', 'attrib', 'text', 'tail']:
self.assertEqual(getattr(expected, attr), getattr(xml, attr))
se |
OuterDeepSpace/OuterDeepSpace | messager/setup.py | Python | gpl-2.0 | 6,455 | 0.009605 | # A setup script showing how to extend py2exe.
#
# In this case, the py2exe command is subclassed to create an installation
# script for InnoSetup, which can be compiled with the InnoSetup compiler
# to a single file windows installer.
#
# By default, the installer will be created as dist\Output\setup.exe.
from distutils.core import setup
import py2exe
import sys
sys.path.insert(0, 'lib')
sys.path.insert(0, '../server/lib')
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store if in a file named
# test_wx.exe.manifest, and probably copy it with the data_files
# option.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
################################################################
# arguments for the setup() call
msg_wx = dict(
script = "main.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="msg-wx"))],
description = "OuterSpace Message Reader",
version = "0.5",
company_name = "Ludek Smid",
copyright = "(C) 2004 Ludek Smid",
dest_base = "OuterSpaceReader",
)
zipfile = r"lib\shardlib"
options = {
"py2exe": {
"compressed": 1,
"optimize": 2,
"packages": ["encodings"],
}
}
################################################################
import os
class InnoScript:
def __init__(self,
name,
lib_dir,
dist_dir,
windows_exe_files = [],
lib_files = [],
version = "1.0"):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = name
self.version = version
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.lib_files = [self.chop(p) for p in lib_files]
def chop(self, pathname):
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname="dist\\msg-wx.iss"):
self.pathname = pathname
ofi = self.file = open(pathname, "w")
print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script"
print >> ofi, "; will be overwritten the next time py2exe is run!"
print >> ofi, r"[Setup]"
print >> ofi, r"AppName=%s" % self.name
print >> ofi, r"AppVerName=%s %s" % (self.name, self.version)
print >> ofi, r"DefaultDirName={pf}\%s" % self.name
print >> ofi, r"DefaultGroupName=%s" % self.name
print >> ofi, r"AppPublisherURL=http://www.opace.net"
print >> ofi
print >> ofi, r"[Files]"
for path in self.windows_exe_files + self.lib_files:
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
for path in self.windows_exe_files:
print >> ofi, r'Name: "{group}\%s"; Filename: "{app}\%s"; WorkingDir: {app}' % \
(self.name, path)
print >> ofi, 'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
def compile(self):
try:
import ctypes
except ImportError:
try:
import win32api
except ImportError:
import os
os.startfile(self.pathname)
else:
print "Ok, using win32api.", self.pathname
win32api.ShellExecute(0, "compile",
self.pathname,
None,
None,
0)
else:
print "Cool, you have ctypes installed."
res = ctypes.windll.shell32.ShellExecuteA(0, "compile",
self.pathname,
None,
None,
0)
if res < 32:
raise RuntimeError, "ShellExecute failed, error %d" % res
################################################################
from py2exe.build_exe import py2exe
class build_installer(py2exe):
# This class first builds the exe file(s), then creates a Windows installer.
# You need InnoSetup for it.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
script = InnoScript("OuterSpace Message Reader",
lib_dir,
dist_dir,
self.windows_exe_files,
self.lib_files)
print "*** creating the inno setup script***"
script.create()
print "*** compiling the inno setup script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
#################### | ############################################
setup(
options = options,
# The lib directory contains everything except the executables and the python dll.
zipfile = zipfile,
windows = [msg_wx],
# use out build_installer class a | s extended py2exe build command
cmdclass = {"py2exe": build_installer},
data_files = [('res/techspec',
['../server/lib/ige/ospace/Rules/Tech.spf',
'../server/lib/ige/ospace/Rules/techs.spf']
)]
)
|
bhaisaab/ipmisim | setup.py | Python | apache-2.0 | 2,011 | 0.013923 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
requires = [
'pyghmi==1.2.16',
'future==0.18. | 2',
'pycrypto==2.6.1',
]
setup | (
name = 'ipmisim',
version = '0.10',
maintainer = 'Rohit Yadav',
maintainer_email = 'rohit@apache.org',
url = 'https://github.com/shapeblue/ipmisim',
description = "ipmisim is a fake ipmi server",
long_description = "ipmisim is a fake ipmi server",
platforms = ("Any",),
license = 'ASL 2.0',
packages = find_packages(),
install_requires = requires,
include_package_data = True,
zip_safe = False,
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
"Topic :: Utilities",
],
entry_points="""
[console_scripts]
ipmisim = ipmisim.ipmisim:main
""",
)
|
opensvn/python | mymovies.py | Python | gpl-2.0 | 877 | 0.003421 | #!/usr/bin/env python
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import moviedata
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
se | lf.movies = moviedata.MovieContainer()
self.table = QTableWidget()
self.setCentralWidget(self.table)
def updateTable(self, current=None):
| self.table.clear()
self.table.setRowCount(len(self.movies))
self.table.setColumnCount(5)
self.table.setHorizontalHeaderLabels(['Title',
'Year', 'Mins', 'Acquired', 'Notes'])
self.table.setAlternatingRowColors(True)
self.table.setEditTriggers(QTableWidget.NoEditTriggers)
self.table.setSelectionBehavior(QTableWidget.SelectRows)
self.table.setSelectionMode(QTableWidget.SingleSelection)
selected = None |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/cryptography/hazmat/primitives/padding.py | Python | mit | 3,948 | 0 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import os
import six
from cryptography import utils
from cryptography.exceptions import AlreadyFinalized
from cryptography.hazmat.bindings.utils import LazyLibrary, build_ffi
with open(os.path.join(os.path.dirname(__file__), "src/padding.h")) as f:
TYPES = f.read()
with open(os.path.join(os.path.dirname(__file__), "src/padding.c")) as f:
FUNCTIONS = f.read()
_ffi = build_ffi(cdef_source=TYPES, verify_source=FUNCTIONS)
_lib = LazyLibrary(_ffi)
@six.add_metaclass(abc.ABCMeta)
class PaddingContext(object):
@abc.abstractmethod
def update(self, data):
"""
Pads the provided bytes and returns any available data as bytes.
"""
@abc.abstractmethod
def finalize(self):
"""
Finalize the padding, returns bytes.
"""
class PKCS7(object):
def __init__(self, block_size):
if not (0 <= block_size < 256):
raise ValueError("block_size must be in range(0, 256).")
if block_size % 8 != 0:
raise Value | Error("block_size must be a multiple of 8.")
self.block_size = block_size
def padder(self):
return _PKCS7PaddingContext(self.block_size)
def unpadder(self):
return _PKCS7UnpaddingContext(self.block_size)
@utils.register_interface(PaddingContext)
class | _PKCS7PaddingContext(object):
def __init__(self, block_size):
self.block_size = block_size
# TODO: more copies than necessary, we should use zero-buffer (#193)
self._buffer = b""
def update(self, data):
if self._buffer is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._buffer += data
finished_blocks = len(self._buffer) // (self.block_size // 8)
result = self._buffer[:finished_blocks * (self.block_size // 8)]
self._buffer = self._buffer[finished_blocks * (self.block_size // 8):]
return result
def finalize(self):
if self._buffer is None:
raise AlreadyFinalized("Context was already finalized.")
pad_size = self.block_size // 8 - len(self._buffer)
result = self._buffer + six.int2byte(pad_size) * pad_size
self._buffer = None
return result
@utils.register_interface(PaddingContext)
class _PKCS7UnpaddingContext(object):
def __init__(self, block_size):
self.block_size = block_size
# TODO: more copies than necessary, we should use zero-buffer (#193)
self._buffer = b""
def update(self, data):
if self._buffer is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._buffer += data
finished_blocks = max(
len(self._buffer) // (self.block_size // 8) - 1,
0
)
result = self._buffer[:finished_blocks * (self.block_size // 8)]
self._buffer = self._buffer[finished_blocks * (self.block_size // 8):]
return result
def finalize(self):
if self._buffer is None:
raise AlreadyFinalized("Context was already finalized.")
if len(self._buffer) != self.block_size // 8:
raise ValueError("Invalid padding bytes.")
valid = _lib.Cryptography_check_pkcs7_padding(
self._buffer, self.block_size // 8
)
if not valid:
raise ValueError("Invalid padding bytes.")
pad_size = six.indexbytes(self._buffer, -1)
res = self._buffer[:-pad_size]
self._buffer = None
return res
|
jmesteve/saas3 | openerp/addons/crm_partner_assign/crm_partner_assign.py | Python | agpl-3.0 | 11,290 | 0.006466 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import random
from openerp.addons.base_geolocalize.models.res_partner import geo_find, geo_query_address
from openerp.osv import osv
from openerp.osv import fields
class res_partner_grade(osv.osv):
_order = 'sequence'
_name = 'res.partner.grade'
_columns = {
'sequence': fields.integer('Sequence'),
'active': fields.boolean('Active'),
'name': fields.char('Grade Name', size=32),
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
}
_defaults = {
'active': lambda *args: 1,
'partner_weight':1
}
class res_partner_activation(osv.osv):
_name = 'res.partner.activation'
_order = 'sequence'
_columns = {
'sequence' : fields.integer('Sequence'),
'name' : fields.char('Name', size=32, required=True),
}
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
'opportunity_assigned_ids': fields.one2many('crm.lead', 'partner_assigned_id',\
'Assigned Opportunities'),
'grade_id': fields.many2one('res.partner.grade', 'Grade'),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'date_partnership' : fields.date('Partnership Date'),
'date_review' : fields.date('Latest Partner Review'),
'date_review_next' : fields.date('Next Partner Review'),
# customer implementation
'assigned_partner_id': fields.many2one(
'res.partner', 'Implementedy by',
),
'implemented_partner_ids': fields.one2many(
'res.partner', 'assigned_partner_id',
string='Implementation References',
),
}
_defaults = {
'partner_weight': lambda *args: 0
}
def onchange_grade_id(self, cr, uid, ids, grade_id, context=None):
res = {'value' :{'partner_weight':0}}
if grade_id:
partner_grade = self.pool.get('res.partner.grade').browse(cr, uid, grade_id)
res['value']['partner_weight'] = partner_grade.partner_weight
return res
class crm_lead(osv.osv):
_inherit = "crm.lead"
_columns = {
'partner_latitude': fields.float('Geo Latitude'),
'partner_longitude': fields.float('Geo Longitude'),
'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner',track_visibility='onchange' , help="Partner this case has been forwarded/assigned to.", select=True),
'date_assign': fields.date('Assignation Date', help="Last date this case was forwarded/assigned to a partner"),
}
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
fields += ['partner_latitude', 'partner_longitude', 'partner_assigned_id', 'date_assign']
return super(crm_lead, self)._merge_data(cr, uid, ids, oldest, fields, context=context)
def onchange_assign_id(self, cr, uid, ids, partner_assigned_id, context=None):
"""This function updates the "assignation date" automatically, when manually assign a partner in the geo assign tab
"""
if not partner_assigned_id:
return {'value':{'date_assign': False}}
else:
partners = self.pool.get('res.partner').browse(cr, uid, [partner_assigned_id], context=context)
user_id = partners[0] and partners[0].user_id.id or False
return {'value':
{'date_assign': fields.date.context_today(self,cr,uid,context=context),
'user_id' : user_id}
}
def action_assign_partner(self, cr, uid, ids, context=None):
return self.assign_partner(cr, uid, ids, partner_id=False, context=context)
def assign_partner(self, cr, uid, ids, partner_id=False, context=None):
partner_ids = {}
res = False
res_partner = self.pool.get('res.partner')
if not partner_id:
partner_ids = self.search_geo_partner(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner_ids.get(lead.id, False)
if not partner_id:
continue
self.assign_geo_localize(cr, uid, [lead.id], lead.partner_latitude, lead.partner_longitude, context=context)
partner = res_partner.browse(cr, uid, partner_id | , context=context)
if partner.user_id:
salesteam_id = partner | .section_id and partner.section_id.id or False
for lead_id in ids:
self.allocate_salesman(cr, uid, [lead_id], [partner.user_id.id], team_id=salesteam_id, context=context)
self.write(cr, uid, [lead.id], {'date_assign': fields.date.context_today(self,cr,uid,context=context), 'partner_assigned_id': partner_id}, context=context)
return res
def assign_geo_localize(self, cr, uid, ids, latitude=False, longitude=False, context=None):
if latitude and longitude:
self.write(cr, uid, ids, {
'partner_latitude': latitude,
'partner_longitude': longitude
}, context=context)
return True
# Don't pass context to browse()! We need country name in english below
for lead in self.browse(cr, uid, ids):
if lead.partner_latitude and lead.partner_longitude:
continue
if lead.country_id:
result = geo_find(geo_query_address(street=lead.street,
zip=lead.zip,
city=lead.city,
state=lead.state_id.name,
country=lead.country_id.name))
if result:
self.write(cr, uid, [lead.id], {
'partner_latitude': result[0],
'partner_longitude': result[1]
}, context=context)
return True
def search_geo_partner(self, cr, uid, ids, context=None):
res_partner = self.pool.get('res.partner')
res_partner_ids = {}
self.assign_geo_localize(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
partner_ids = []
if not lead.country_id:
continue
latitude = lead.partner_latitude
longitude = lead.partner_longitude
if latitude and longitude:
# 1. first way: in the same country, small area
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 2), ('partner_latitude', '<', latitude + 2),
('partner_longitude', '>', longitude - 1.5), ('partner_longitude', '<', longitude + 1.5),
('country_id', '=', lead.country_id.id),
|
knightzero/AutoClicker | autoclicker.py | Python | mit | 2,779 | 0.013314 | import wx
import win32api
import win32con #for the VK keycodes
from threading import *
import time
EVT_RESULT_ID = wx.NewId()
def mouseClick(timer):
print "Click!"
x,y = win32api.GetCursorPos()
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
time.sleep(timer)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
time.sleep(timer)
def EVT_RESULT(win, func):
"""Define Result Event."""
win.Connect(-1, -1, EVT_RESULT_ID, f | unc)
class ResultEvent(wx.PyEvent):
"""Simple event to carry arbitrary result data."""
def __init__(self, data):
"""Init Result Event."""
wx.PyEvent.__init__(self)
self.SetEventType(EVT_RESULT_ID)
self.data = data
class WorkerThread(Thread):
'''Worker Thread Class.'''
def __init__(self, notify_window, timer):
Thread.__init__( | self)
self._notify_window = notify_window
self._want_abort = False
self.timer = timer
self.start()
def run(self):
while True:
if self._want_abort:
wx.PostEvent(self._notify_window, ResultEvent(None))
return
mouseClick(self.timer)
print self.timer
def abort(self):
self._want_abort = True
class Frame1(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.autoClick = False
self.worker = None
self.__set_properties()
self.regHotKey()
self.Bind(wx.EVT_HOTKEY, self.handleHotKey, id=self.hotKeyId)
self.slider1 = wx.Slider(self, -1, 1, 1, 10000, (10, 10), (300, 50),
wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS)
def __set_properties(self):
self.SetTitle("AutoClicker")
self.SetSize((300, 100))
self.SetBackgroundColour("white")
def regHotKey(self):
"""
This function registers the hotkey Alt+F1 with id=100
"""
self.hotKeyId = 100
self.RegisterHotKey(
self.hotKeyId, #a unique ID for this hotkey
win32con.MOD_ALT, #the modifier key
win32con.VK_F1) #the key to watch for
def handleHotKey(self, evt):
self.autoClick = not self.autoClick
if self.autoClick:
self.worker = WorkerThread(self, float(1/float(self.slider1.GetValue())))
else:
self.worker.abort()
self.worker = None
print self.autoClick
class AutoClicker(wx.App):
def OnInit(self):
frame1 = Frame1(None, wx.ID_ANY, "")
self.SetTopWindow(frame1)
frame1.Show()
return 1
autoClicker = AutoClicker(0)
autoClicker.MainLoop()
|
YuHongJun/python-training | work_one/mydict2.py | Python | mit | 854 | 0 | class Dict(dict):
'''
Simple dict but also support access as x.y style.
>>> d1=Dict()
>>> d1['x']=100
>>> d1.x
100
| >>> d1.y=200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
| Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
'''
def __int__(self, **kw):
super(Dict, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
if __name__ == '__main__':
import doctest
doctest.testmod()
|
yotamfr/prot2vec | src/python/dingo_net1.py | Python | mit | 13,615 | 0.001469 | # import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import torch.nn as nn
from torch import optim
from torch.autograd import Variable
import torch.nn.functional as F
from src.python.dingo_utils2 import *
from src.python.preprocess2 import *
from src.python.dingo_sampling import *
from src.python.pytorch_utils import *
from src.python.consts import *
from tqdm import tqdm
import numpy as np
from tempfile import gettempdir
import argparse
import datetime
np.random.seed(101)
LR = 0.1
LEARN_GO = False
ATTN = "self"
BATCH_SIZE = 20
USE_CUDA = True
VERBOSE = True
def set_cuda(val):
global USE_CUDA
USE_CUDA = val
def get_loss(vec1, vec2, lbl):
loss = criterion(vec1.float(), vec2.float(), lbl.float())
return loss
def evaluate(model, gen_xy, length_xy):
model.eval()
pbar = tqdm(total=length_xy)
err = 0
for i, (seq1, seq2, lbl) in enumerate(gen_xy):
vec1 = net(seq1)
vec2 = net(seq2)
loss = get_loss(vec1, vec2, lbl)
err += loss.data[0]
pbar.set_description("Validation Loss:%.5f" % (err/(i + 1)))
pbar.update(len(lbl))
pbar.close()
return err / (i + 1)
class CNN(nn.Module):
def __init__(self, num_channels, embedding_size, dropout=0.3):
super(CNN, self).__init__()
self.aa_embedding = nn.Embedding(26, embedding_size)
self.cnn = nn.Sequential(
nn.Conv1d(embedding_size, num_channels * 2, kernel_size=15),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(num_channels * 2, num_channels, kernel_size=15),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(num_channels, num_channels, kernel_size=15),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(num_channels, num_channels, kernel_size=15),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
)
self.embedding_size = embedding_size
self.dropout = dropout
def forward(self, input_seqs, hidden=None):
embedded_seqs = self.aa_embedding(input_seqs)
input_features = self.cnn(embedded_seqs.transpose(1, 2))
return input_features
class DingoNet(nn.Module):
def __init__(self, num_channels, prot_section_size, dropout=0.1):
super(DingoNet, self).__init__()
self.cnn = CNN(num_channels, 23)
self.dropout = dropout
# Keep for reference
self.prot_section_size = prot_section_size
self.num_channels = num_channels
self.dropout = dropout
# Define layers
self.embedding_dropout = nn.Dropout(dropout)
self.attn = SelfAttn(num_channels * prot_section_size)
def forward(self, input_seq):
outputs = self.cnn(input | _seq)
outputs = self.embedding_dropout | (outputs)
batch_size = outputs.size(0)
protein_length = outputs.size(2)
prot_section_size = self.prot_section_size
new_prot_length = protein_length // prot_section_size
remainder = protein_length % prot_section_size
head = remainder // 2
tail = protein_length - (remainder - head)
outputs = outputs[:, :, head:tail].contiguous()
outputs = outputs.view(batch_size, -1, new_prot_length)
attn_weights = Variable(torch.zeros(batch_size, 1, new_prot_length))
if USE_CUDA:
attn_weights = attn_weights.cuda()
for b in range(batch_size):
attn_weights[b, :, :] = self.attn(outputs[b])
context_vec = attn_weights.bmm(outputs.transpose(1, 2))
return context_vec.squeeze(1)
class SelfAttn(nn.Module):
def __init__(self, protein_vector_size):
super(SelfAttn, self).__init__()
self.prot_size = protein_vector_size
self.W_a = nn.Parameter(torch.FloatTensor(self.prot_size, self.prot_size))
self.v_a = nn.Parameter(torch.FloatTensor(1, self.prot_size))
def forward(self, hidden_states):
H = hidden_states # prot_size X max_length
attn_energies = torch.mm(self.v_a, F.tanh(torch.mm(self.W_a, H)))
attn_weights = F.softmax(attn_energies)
return attn_weights
def prepare_seq(sequence_obj, max_length=MAX_LENGTH):
seq = sequence_obj.seq
delta = max_length - len(seq)
left = [PAD for _ in range(delta // 2)]
right = [PAD for _ in range(delta - delta // 2)]
seq = left + [AA.aa2index[aa] for aa in seq] + right
return np.asarray(seq)
def prepare_node(node, onto):
return onto.classes.index(node.go)
def pairs_generator(data, labels, batch_size=BATCH_SIZE):
def prepare_batch(seqs1, seqs2, labels, extra_padding=10):
b1 = max(map(len, seqs1)) + extra_padding
b2 = max(map(len, seqs2)) + extra_padding
inp1 = np.asarray([prepare_seq(seq, b1) for seq in seqs1])
inp2 = np.asarray([prepare_seq(seq, b2) for seq in seqs2])
inp_var1 = Variable(torch.LongTensor(inp1))
inp_var2 = Variable(torch.LongTensor(inp2))
lbl_var = Variable(torch.FloatTensor(labels))
if USE_CUDA:
inp_var1 = inp_var1.cuda()
inp_var2 = inp_var2.cuda()
lbl_var = lbl_var.cuda()
return inp_var1, inp_var2, lbl_var
indices = list(range(0, len(data), batch_size))
np.random.shuffle(indices)
while indices:
ix = indices.pop()
batch_inp = data[ix: min(ix + batch_size, len(data))]
lbls = labels[ix: min(ix + batch_size, len(labels))]
seqs1, seqs2 = zip(*batch_inp)
yield prepare_batch(seqs1, seqs2, lbls)
def compute_vectors(data, model, onto, batch_size=BATCH_SIZE):
model.eval()
def prepare_batch(seqs, nodes, extra_padding=10):
b = max(map(len, seqs)) + extra_padding
inp_seq = np.asarray([prepare_seq(seq, b) for seq in seqs])
inp_node = np.asarray([prepare_node(node, onto) for node in nodes])
var_seq = Variable(torch.LongTensor(inp_seq))
var_node = Variable(torch.LongTensor(inp_node))
if USE_CUDA:
var_seq = var_seq.cuda()
var_node = var_node.cuda()
return var_seq, var_node
pbar = tqdm(range(len(data)), desc="records processed")
indices = list(range(0, len(data), batch_size))
while indices:
ix = indices.pop()
batch_inp = data[ix: min(ix + batch_size, len(data))]
seqs, nodes = zip(*batch_inp)
var_seq, var_node = prepare_batch(seqs, nodes)
var_vec = model(var_seq, var_node)
vecs = var_vec.data.cpu().numpy()
for seq, node, vec in zip(seqs, nodes, vecs):
node.seq2vec[seq] = vec
pbar.update(len(vecs))
pbar.close()
def train(model, training_manager, gen_xy, length_xy):
model.train()
opt = training_manager.opt
pbar = tqdm(total=length_xy)
err = 0
for i, (seq1, seq2, lbl) in enumerate(gen_xy):
opt.zero_grad()
vec1 = model(seq1)
vec2 = model(seq2)
# print(vec1)
# print(vec2)
loss = get_loss(vec1, vec2, lbl)
# print(loss.data[0])
adalr.update(loss.data[0])
err += loss.data[0]
loss.backward()
opt.step()
pbar.set_description("Training Loss:%.5f" % (err/(i + 1)))
pbar.update(len(lbl))
pbar.close()
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("-e", "--eval_every", type=int, default=1,
help="How often to evaluate on the validation set.")
parser.add_argument("-n", "--num_epochs", type=int, default=100,
help="How many epochs to train the model?")
parser.add |
sahat/bokeh | sphinx/source/tutorial/exercises/stocks.py | Python | bsd-3-clause | 1,939 | 0.004126 |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Here is some code to read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000",
parse_dates=['Date'])
GOOG = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=GOOG&a=0&b=1&c=2000",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000",
parse_dates=['Date'])
output_file("stocks.html", title="stocks.py example")
# EXERCISE: turn on plot hold
# EXERCISE: finish this line plot, and add more for the other stocks. Each one should
# have a legend, and its own color.
line(
AAPL['Date'], # x coordinates
AAPL['Adj Close'], # y coordinates
color='#A6CEE3', # set a color for the line
legend='AAPL', # attach a legend label
x_axis_type = "datetime", # NOTE: only needed on first
tools="pan,wheel_zoom,box_zoom,reset,previewsave" # NOTE: only needed on first
)
# EXERCISE: style the p | lot, set a title, lighten the gridlines, etc.
# EXERCISE: start a new figure
# Here is some code to compute the 30-day moving average for AAPL
aapl = AAPL['Adj Close']
aapl_dates = AAPL['Date']
window_size = 30
window = np.ones(window_size)/float(window_s | ize)
aapl_avg = np.convolve(aapl, window, 'same')
# EXERCISE: plot a scatter of circles for the individual AAPL prices with legend
# 'close'. Remember to set the x axis type and tools on the first renderer.
# EXERCISE: plot a line of the AAPL moving average data with the legeng 'avg'
# EXERCISE: style the plot, set a title, lighten the gridlines, etc.
show() # open a browser
|
minhphung171093/GreenERP | openerp/addons/product_uos/models/product_uos.py | Python | gpl-3.0 | 1,586 | 0.005044 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
class ProductTemplate(models.Model):
_inherit = "product.template"
uos_id = fields.Many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another'
' unit of measure than inventory. Keep empty to use the default unit of measure.')
uos_coeff = fields.Float('Unit of Measure -> UOS Coeff', digits_compute=dp.get_precision('Product Unit of Measure'),
help='Coefficient to convert default Unit of Measure to Unit of Sale'
' uos = uom * coeff')
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.one
def _set_uos(self):
if self.product_id.uos_coeff:
self.product_uom_qty = self.product_uos_qty / self.product_id.uos_coeff
self.product_uom = self.product_id.uom_id
@api.one
def _compute_uos(self):
self.product_uos_qty = self.product_uom_qty * self.product_id.uos_coeff
product_uos_qty = fields.Float(st | ring='Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
compute='_compute_uos', inverse='_set_uos', readonly=False)
product_uos = fields.Many2one('product.uom', string='Unit of Measure', required=True,
| related='product_id.uos_id', readonly=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.