repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dannyperry571/theapprentice | script.module.pydevd/lib/interpreterInfo.py | Python | gpl-2.0 | 8,047 | 0.006462 | '''
This module was created to get information available in the interpreter, such as libraries,
paths, etc.
what is what:
sys.builtin_module_names: contains the builtin modules embeeded in python (rigth now, we specify all manually).
sys.prefix: A string giving the site-specific directory prefix where the platform independent Python files are installed
format is something as
EXECUTABLE:python.exe|libs@compiled_dlls$builtin_mods
all internal are separated by |
'''
import sys
try:
import os.path
def fullyNormalizePath(path):
'''fixes the path so that the format of the path really reflects the directories in the system
'''
return os.path.normpath(path)
join = os.path.join
except: # ImportError or AttributeError.
# See: http://stackoverflow.com/questions/10254353/error-while-installing-jython-for-pydev
def fullyNormalizePath(path):
'''fixes the path so that the format of the path really reflects the directories in the system
'''
return path
def join(a, b):
if a.endswith('/') or a.endswith('\\'):
return a + b
return a + '/' + b
IS_PYTHON_3K = 0
try:
if sys.version_info[0] == 3:
IS_PYTHON_3K = 1
except:
# That's OK, not all versions of python have sys.version_info
pass
try:
# Just check if False and True are defined (depends on version, not whether it's jython/python)
False
True
except:
exec ('True, False = 1,0') # An exec is used so that python 3k does not give a syntax error
if sys.platform == "cygwin":
try:
import ctypes # use from the system if available
except ImportError:
sys.path.append(join(sys.path[0], 'third_party/wrapped_for_pydev'))
import ctypes
def nativePath(path):
MAX_PATH = 512 # On cygwin NT, its 260 lately, but just need BIG ENOUGH buffer
'''Get the native form of the path, like c:\\Foo for /cygdrive/c/Foo'''
retval = ctypes.create_string_buffer(MAX_PATH)
path = fullyNormalizePath(path)
path = tobytes(path)
CCP_POSIX_TO_WIN_A = 0
ctypes.cdll.cygwin1.cygwin_conv_path(CCP_POSIX_TO_WIN_A, path, retval, MAX_PATH)
return retval.value
else:
def nativePath(path):
return fullyNormalizePath(path)
def __getfilesystemencoding():
'''
Note: there's a copy of this method in _pydev_filesystem_encoding.py
'''
try:
ret = sys.getfilesystemencoding()
if not ret:
raise RuntimeError('Unable to get encoding.')
return ret
except:
try:
# Handle Jython
from java.lang import System
env = System.getProperty("os.name").lower()
if env.find('win') != -1:
return 'ISO-8859-1' # mbcs does not work on Jython, so, use a (hopefully) suitable replacement
return 'utf-8'
except:
pass
# Only available from 2.3 onwards.
if sys.platform == 'win32':
| return 'mbcs'
return 'utf-8'
def getfilesystemencoding():
try:
ret = __getfilesystemencoding()
#Check i | f the encoding is actually there to be used!
if hasattr('', 'encode'):
''.encode(ret)
if hasattr('', 'decode'):
''.decode(ret)
return ret
except:
return 'utf-8'
file_system_encoding = getfilesystemencoding()
if IS_PYTHON_3K:
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def tounicode(s):
if hasattr(s, 'decode'):
if not isinstance(s, unicode_type):
# Depending on the platform variant we may have decode on string or not.
return s.decode(file_system_encoding)
return s
def tobytes(s):
if hasattr(s, 'encode'):
if not isinstance(s, bytes_type):
return s.encode(file_system_encoding)
return s
def toasciimxl(s):
# output for xml without a declared encoding
# As the output is xml, we have to encode chars (< and > are ok as they're not accepted in the filesystem name --
# if it was allowed, we'd have to do things more selectively so that < and > don't get wrongly replaced).
s = s.replace("&", "&")
try:
ret = s.encode('ascii', 'xmlcharrefreplace')
except:
# use workaround
ret = ''
for c in s:
try:
ret += c.encode('ascii')
except:
try:
# Python 2: unicode is a valid identifier
ret += unicode("&#%d;") % ord(c)
except:
# Python 3: a string is already unicode, so, just doing it directly should work.
ret += "&#%d;" % ord(c)
return ret
if __name__ == '__main__':
try:
# just give some time to get the reading threads attached (just in case)
import time
time.sleep(0.1)
except:
pass
try:
executable = tounicode(nativePath(sys.executable))
except:
executable = tounicode(sys.executable)
if sys.platform == "cygwin" and not executable.endswith(tounicode('.exe')):
executable += tounicode('.exe')
try:
major = str(sys.version_info[0])
minor = str(sys.version_info[1])
except AttributeError:
# older versions of python don't have version_info
import string
s = string.split(sys.version, ' ')[0]
s = string.split(s, '.')
major = s[0]
minor = s[1]
s = tounicode('%s.%s') % (tounicode(major), tounicode(minor))
contents = [tounicode('<xml>')]
contents.append(tounicode('<version>%s</version>') % (tounicode(s),))
contents.append(tounicode('<executable>%s</executable>') % tounicode(executable))
# this is the new implementation to get the system folders
# (still need to check if it works in linux)
# (previously, we were getting the executable dir, but that is not always correct...)
prefix = tounicode(nativePath(sys.prefix))
# print_ 'prefix is', prefix
result = []
path_used = sys.path
try:
path_used = path_used[1:] # Use a copy (and don't include the directory of this script as a path.)
except:
pass # just ignore it...
for p in path_used:
p = tounicode(nativePath(p))
try:
import string # to be compatible with older versions
if string.find(p, prefix) == 0: # was startswith
result.append((p, True))
else:
result.append((p, False))
except (ImportError, AttributeError):
# python 3k also does not have it
# jython may not have it (depending on how are things configured)
if p.startswith(prefix): # was startswith
result.append((p, True))
else:
result.append((p, False))
for p, b in result:
if b:
contents.append(tounicode('<lib path="ins">%s</lib>') % (p,))
else:
contents.append(tounicode('<lib path="out">%s</lib>') % (p,))
# no compiled libs
# nor forced libs
for builtinMod in sys.builtin_module_names:
contents.append(tounicode('<forced_lib>%s</forced_lib>') % tounicode(builtinMod))
contents.append(tounicode('</xml>'))
unic = tounicode('\n').join(contents)
inasciixml = toasciimxl(unic)
if IS_PYTHON_3K:
# This is the 'official' way of writing binary output in Py3K (see: http://bugs.python.org/issue4571)
sys.stdout.buffer.write(inasciixml)
else:
sys.stdout.write(inasciixml)
try:
sys.stdout.flush()
sys.stderr.flush()
# and give some time to let it read things (just in case)
import time
time.sleep(0.1)
except:
pass
raise RuntimeError('Ok, this is so that it shows the output (ugly hack for some platforms, so that it releases the output).')
|
Daniel-CA/odoo-addons | account_invoice_manual_analytic/__openerp__.py | Python | agpl-3.0 | 669 | 0 | # -*- coding: utf-8 -*-
# Copyright 2017 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
"name": "Account Invoice Manual Analytic",
"version": "8.0.1.0.0",
"category": "Accounting & Finance",
"license": "AGPL-3",
"author": "AvanzOSC",
"website": "http://www.avanzosc.es",
"contributors": [
"Ana Juaristi <ajuaristio@gmail.com>",
"Alfredo de la Fuente <alfredodelafuente@avanzosc.es>",
],
"depends": [
"account",
],
"data": [
"views/account_invoice_view.xml",
| "views/account_analytic_line_view.xml",
],
"installable": Tr | ue,
}
|
edofic/ggrc-core | src/ggrc/models/access_group.py | Python | apache-2.0 | 777 | 0.002574 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc import db
from ggrc.models.mixins import BusinessObject, Timeboxed, CustomAttributable
from ggrc.models.object_document import Documentable
from ggrc.models.object_owner import Ownable
from ggrc.models.object_person import Personable
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState, track_state_for_class
class AccessGroup(HasObjectS | tate,
CustomAttributable, Personable, Documentable, Relatab | le,
Timeboxed, Ownable, BusinessObject, db.Model):
__tablename__ = 'access_groups'
_aliases = {"url": "Access Group URL"}
track_state_for_class(AccessGroup)
|
MPTCP-smartphone-thesis/pcap-measurement | example_graph.py | Python | gpl-3.0 | 2,787 | 0.005023 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Quentin De Coninck
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# To install on this machine: matplotlib, numpy
from __fu | ture__ import print_function
import argparse
import common as co
import common_graph as cog
import matplotlib
# Do not use any X11 backend
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib. | rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import mptcp
import numpy as np
import os
import tcp
##################################################
## ARGUMENTS ##
##################################################
parser = argparse.ArgumentParser(
description="Summarize stat files generated by analyze")
parser.add_argument("-s",
"--stat", help="directory where the stat files are stored", default=co.DEF_STAT_DIR + '_' + co.DEF_IFACE)
parser.add_argument('-S',
"--sums", help="directory where the summary graphs will be stored", default=co.DEF_SUMS_DIR + '_' + co.DEF_IFACE)
parser.add_argument("-d",
"--dirs", help="list of directories to aggregate", nargs="+")
args = parser.parse_args()
stat_dir_exp = os.path.abspath(os.path.expanduser(args.stat))
sums_dir_exp = os.path.abspath(os.path.expanduser(args.sums))
co.check_directory_exists(sums_dir_exp)
##################################################
## GET THE DATA ##
##################################################
connections = cog.fetch_valid_data(stat_dir_exp, args)
multiflow_connections, singleflow_connections = cog.get_multiflow_connections(connections)
##################################################
## PLOTTING RESULTS ##
##################################################
data = {co.C2S: {'all': {'Connections': [1, 2, 3]}}, co.S2C: {'all': {'Connections': [4, 5, 6]}}}
color = {'Connections': 'orange'}
base_graph_name = "fog_bytes"
co.scatter_plot_with_direction(data, "Bytes on Wi-Fi", "Bytes on cellular", color, sums_dir_exp, base_graph_name)
|
IV-GII/SocialCookies | ENV1/webcookies/socialcookies/forms.py | Python | gpl-2.0 | 1,986 | 0.019153 | #encoding:utf-8
# Copyright (C) 2014 SocialCookies @IV/GII
# @anaprados @oskyar @torresj @josemlp91
# @franciscomanuel @rogegg @pedroag @melero90
# Aplicacion web, para gestionar pedidos de galletas,
# con fotos de Instagram y Twitter.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from django.forms import ModelForm
from django import forms
from django.utils import html
class ContactForm(forms.Form):
nombre = forms.CharField(label='Nombre completo', widget=forms.TextInput(attrs={'class':'form-control'}))
correo = forms.EmailField(label='Correo electrónico',widget=forms.TextInput(attrs={'class':'form-control'}))
mensaje = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control'}))
class PedidoForm(forms.Form):
nombre = forms.CharField(label='Nombre completo', widget=forms.TextInput(attrs={'cl | ass':'form | -control'}))
direccion = forms.CharField(label='Direccion', widget=forms.TextInput(attrs={'class':'form-control'}))
telefono = forms.IntegerField(label='Telefono de contacto', widget=forms.TextInput(attrs={'class':'form-control'}))
correo = forms.EmailField(label='Correo electrónico',widget=forms.TextInput(attrs={'class':'form-control'}))
class fotosPedido(forms.Form):
hidden_field0 = forms.CharField(required=False, widget=forms.HiddenInput(attrs={'id':'galletas'}))
|
jgmanzanas/CMNT_004_15 | project-addons/sale_display_stock/__openerp__.py | Python | agpl-3.0 | 1,737 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <marta@pexego.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale display stock",
"version": "1.0",
"author": "Pexego",
'website': 'www.pexego.es',
"category": "Sales",
"description": """
Sales display st | ock
========================================
* Displays the real stock of product at each sale order line.
""",
"depends": ["base", "sale", "stock", "account",
"product_virtual_stock_conservative",
"stock_reserve"],
"data": [
"sale_view.xml",
"product_view.xml",
"report/sale_order_line_report.xml",
"security/ir.model.access.csv",
"secu | rity/sale_display_stock_security.xml"
],
"demo": [],
'auto_install': False,
"installable": True,
'images': [],
}
|
timvideos/flumotion | flumotion/test/test_icalbouncer.py | Python | lgpl-2.1 | 17,170 | 0.002446 | # -*- Mode: Python; -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the te | rms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
HAS_MODULES = False
try:
| from icalendar import vDatetime
HAS_MODULES = True
except ImportError:
pass
import os
import tempfile
import time
from datetime import datetime, timedelta
from flumotion.common import testsuite
from twisted.trial import unittest
from twisted.internet import defer
from flumotion.common import keycards
from flumotion.common.planet import moods
from flumotion.component.bouncers import icalbouncer
from flumotion.component.bouncers.algorithms import icalbouncer as \
icalbounceralgorithm
from flumotion.component.base import scheduler
from flumotion.common import eventcalendar
from flumotion.common import tz
def _get_config(path=None):
props = {'name': 'testbouncer',
'plugs': {},
'properties': {}}
if path:
props['properties']['file'] = path
return props
def get_iCalScheduler(bouncer):
return bouncer.get_main_algorithm().iCalScheduler
class RequiredModulesMixin(object):
if not HAS_MODULES:
skip = 'This test requires the icalendar and dateutil modules'
class TestIcalBouncerSetup(testsuite.TestCase, RequiredModulesMixin):
def setUp(self):
self.bouncer = None
self.path = os.path.join(os.path.split(__file__)[0],
'test-google.ics')
def tearDown(self):
if self.bouncer:
self.bouncer.stop()
def testNoFileProperty(self):
conf = _get_config()
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testNonexistentIcalFile(self):
conf = _get_config('/you/dont/have/that/file')
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testMalformedIcalFile(self):
conf = _get_config(__file__)
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testSuccessfulSetup(self):
conf = _get_config(self.path)
self.bouncer = icalbouncer.IcalBouncer(conf)
self.assertEquals(self.bouncer.getMood(), moods.happy.value)
class TestIcalBouncerRunning(testsuite.TestCase, RequiredModulesMixin):
def setUp(self):
self.bouncer = None
self.now = datetime.now(tz.UTC)
self.a_day_ago = self.now - timedelta(days=1)
self.half_an_hour_ago = self.now - timedelta(minutes=30)
self.in_half_an_hour = self.now + timedelta(minutes=30)
self.ical_template = """
BEGIN:VCALENDAR
PRODID:-//Flumotion Fake Calendar Creator//flumotion.com//
VERSION:2.0
BEGIN:VTIMEZONE
TZID:Asia/Shanghai
BEGIN:STANDARD
TZOFFSETFROM:+0800
TZOFFSETTO:+0800
TZNAME:CET
DTSTART:19701025T030000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/Guatemala
BEGIN:STANDARD
TZOFFSETFROM:-0600
TZOFFSETTO:-0600
TZNAME:CET
DTSTART:19701025T030000
RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART%(dtstart-tzid)s:%(dtstart)s
DTEND%(dtend-tzid)s:%(dtend)s
SUMMARY:Test calendar
UID:uid
END:VEVENT
END:VCALENDAR
"""
def tearDown(self):
if self.bouncer:
self.bouncer.stop()
def bouncer_from_ical(self, data):
tmp = tempfile.NamedTemporaryFile()
tmp.write(data)
tmp.flush()
conf = _get_config(tmp.name)
return icalbouncer.IcalBouncer(conf)
def ical_from_specs(self, dtstart_tzid, dtstart, dtend_tzid, dtend):
return self.ical_template % {'dtstart-tzid': dtstart_tzid,
'dtstart': vDatetime(dtstart).ical(),
'dtend-tzid': dtend_tzid,
'dtend': vDatetime(dtend).ical()}
def _approved_callback(self, keycard):
self.failUnless(keycard)
self.assertEquals(keycard.state, keycards.AUTHENTICATED)
def _denied_callback(self, keycard):
self.failIf(keycard)
class TestIcalBouncerUTC(TestIcalBouncerRunning, RequiredModulesMixin):
def testDeniedUTC(self):
data = self.ical_from_specs('', self.a_day_ago,
'', self.half_an_hour_ago)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._denied_callback)
return d
def testApprovedUTC(self):
data = self.ical_from_specs('', self.a_day_ago,
'', self.in_half_an_hour)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._approved_callback)
return d
class TestIcalBouncerTZID(TestIcalBouncerRunning, RequiredModulesMixin):
def setUp(self):
TestIcalBouncerRunning.setUp(self)
# Beijing is UTC+8
self.beijing_tz = tz.gettz('Asia/Shanghai')
if self.beijing_tz is None:
raise unittest.SkipTest("Could not find tzinfo data "
"for the Asia/Shanghai timezone")
# Guatemala is UTC+8
self.guatemala_tz = tz.gettz('America/Guatemala')
if self.guatemala_tz is None:
raise unittest.SkipTest("Could not find tzinfo data "
"for the America/Guatemala timezone")
def testIncorrectTimeTZID(self):
naive_new_end = self.in_half_an_hour.replace(tzinfo=None)
# This will fail the assertion that an event can't start after
# it ended (if our timezone handling is correct)
data = self.ical_from_specs('', self.half_an_hour_ago,
';TZID=Asia/Shanghai', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
self.assertEquals(self.bouncer.getMood(), moods.sad.value)
def testDeniedTZID(self):
new_end = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs('', self.a_day_ago,
';TZID=Asia/Shanghai', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
keycard = keycards.KeycardGeneric()
d = defer.maybeDeferred(self.bouncer.authenticate, keycard)
d.addCallback(self._denied_callback)
return d
def testDeniedIfNotDefinedTZID(self):
new_end = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs('', self.a_day_ago,
';TZID=/some/obscure/path/Asia/Shanghai',
naive_new_end)
try:
self.bouncer = self.bouncer_from_ical(data)
except NotCompilantError:
pass
else:
self.assert_(True)
def testApprovedBothTZID(self):
new_start = self.half_an_hour_ago.astimezone(self.beijing_tz)
naive_new_start = new_start.replace(tzinfo=None)
new_end = self.in_half_an_hour.astimezone(self.guatemala_tz)
naive_new_end = new_end.replace(tzinfo=None)
data = self.ical_from_specs(';TZID=Asia/Shanghai', naive_new_start,
';TZID=America/Guatemala', naive_new_end)
self.bouncer = self.bouncer_from_ical(data)
keycard = |
scorphus/politicos | tests/unit/handlers/test_political_party.py | Python | agpl-3.0 | 4,973 | 0 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Marcelo Jorge Vieira <metal@alucinados.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ujson import loads, dumps
from preggy import expect
from tornado.testing import gen_test
from tornado.httpclient import HTTPError
from tests.unit.base import ApiTestCase
from tests.fixtures import PoliticalPartyFactory
class TestPoliticalPartyHandler(ApiTestCase):
@gen_test
def test_cannot_get_political_party_info(self):
response = yield self.anonymous_fetch(
'/political-parties/PBA',
method='GET'
)
expect(response.code).to_equal(200)
political_party = loads(response.body)
expect(political_party).to_equal({})
expect(political_party).to_length(0)
@gen_test
def test_can_get_political_party_info(self):
PoliticalPartyFactory.create(name='Partido Blah', siglum='PBA')
response = yield self.anonymous_fetch(
'/political-parties/PBA',
method='GET'
)
expect(response.code).to_equal(200)
political_party = loads(response.body)
expect(political_party).to_length(7)
expect(political_party.get('name')).to_equal('Partido Blah')
expect(political_party.get('siglum')).to_equal('PBA')
class TestAllPoliticalPartyHandler(ApiTestCase):
@gen_test
def test_cannot_get_political_party_info(self):
response = yield self.anonymous_fetch(
'/political-parties/',
method='GET'
)
expect(response.code).to_equal(200)
political_party = loads(response.body)
expect(political_party).to_equal({})
expect(political_party).to_length(0)
@gen_test
def test_can_get_all_political_parties(self):
political_parties = []
for x in range(5):
party = PoliticalPartyFactory.create(
name='Partido %s' % x,
siglum='%s' % x,
founded_date=None,
)
| political_parties.append(party.to_dict())
response = yield self | .anonymous_fetch(
'/political-parties/',
method='GET'
)
expect(response.code).to_equal(200)
political_parties_loaded = loads(response.body)
expect(political_parties_loaded).to_length(5)
expect(political_parties_loaded).to_be_like(political_parties)
@gen_test
def test_can_add_political_party(self):
response = yield self.anonymous_fetch(
'/political-parties/',
method='POST',
body=dumps({'name': 'Partido Heavy Metal', 'siglum': 'PHM'})
)
expect(response.code).to_equal(200)
data = loads(response.body)
expect(data.get('siglum')).to_equal('PHM')
@gen_test
def test_cannot_add_political_party_twice(self):
yield self.anonymous_fetch(
'/political-parties/',
method='POST',
body=dumps({'name': 'Partido Heavy Metal', 'siglum': 'PHM'})
)
try:
yield self.anonymous_fetch(
'/political-parties/',
method='POST',
body=dumps({'name': 'Partido Heavy Metal', 'siglum': 'PHM'})
)
except HTTPError as e:
expect(e).not_to_be_null()
expect(e.code).to_equal(500)
expect(e.response.reason).to_be_like('Internal Server Error')
@gen_test
def test_cannot_add_political_party_without_name(self):
try:
yield self.anonymous_fetch(
'/political-parties/',
method='POST',
body=dumps({'siglum': 'PHM'})
)
except HTTPError as e:
expect(e).not_to_be_null()
expect(e.code).to_equal(400)
expect(e.response.reason).to_be_like('Invalid political party.')
@gen_test
def test_cannot_add_political_party_without_siglum(self):
try:
yield self.anonymous_fetch(
'/political-parties/',
method='POST',
body=dumps({'name': 'Partido Heavy Metal'})
)
except HTTPError as e:
expect(e).not_to_be_null()
expect(e.code).to_equal(400)
expect(e.response.reason).to_be_like('Invalid political party.')
|
lmmentel/ase-espresso | espresso/worldstub.py | Python | gpl-3.0 | 616 | 0 | # -*- coding: utf-8 -*-
# ****************************************************************************
# Original work Copyright (C) 2013-2015 SUNCAT
# Modified work Copyright 2015-2017 Lukasz Mentel
#
# This file is distributed un | der the terms of the
# GNU General Public License. See the file 'COPYING'
# in the root directory of the present di | stribution,
# or http://www.gnu.org/copyleft/gpl.txt .
# ****************************************************************************
# mpi stub for object having number of nodes as "size" attribute
class world:
def __init__(self, size):
self.size = size
|
joaduo/rel_imp | rel_imp.py | Python | mit | 8,202 | 0.000488 | '''
Copyright (c) 2014 Joaquin Duo - File under MIT License
Import this module to enable explicit relative importing on a submodule or
sub-package running it as a main module. Doing so is useful for running smoke
tests or small scripts within the module.
Usage:
------
To enable explicit relative importing in __main__, you simply import
this package before any relative import
import rel_imp; rel_imp.init()
from .my_pkg import foo, bar
...
Make sure your PYTHONPATH is correctly set to solve the relative path of the
submodule/subpackage.
'''
from inspect import currentframe
from os import path
import importlib
import sys
import os
import traceback
__all__ = ['init']
def _get_search_path(main_file_dir, sys_path):
'''
Find the parent python path that contains the __main__'s file directory
:param main_file_dir: __main__'s file directory
:param sys_path: paths list to match directory against (like sys.path)
'''
# List to gather candidate parent paths
paths = []
# look for paths containing the directory
for pth in sys_path:
# convert relative path to absolute
pth = path.abspath(pth)
# filter out __main__'s file directory, it will be in the sys.path
# filter in parent paths containing the package
if (pth != main_file_dir
and pth == path.commonprefix((pth, main_file_dir))):
paths.append(pth)
# check if we have results
if paths:
# we found candidates, look for the largest(closest) parent search path
paths.sort()
return paths[-1]
def _print_exc(e):
'''
Log exception as error.
:param e: exception to be logged.
'''
msg = ('Exception enabling relative_import for __main__. Ignoring it: %r'
'\n relative_import won\'t be enabled.')
_log_error(msg % e)
def _try_search_paths(main_globals):
'''
Try different strategies to found the path containing the __main__'s file.
Will try strategies, in the following order:
1. Building file's path with PWD env var.
2. Building file's path from absolute file's path.
3. Buidling file's path from real file's path.
:param main_globals: globals dictionary in __main__
'''
# try with abspath
fl = main_globals['__file__']
search_path = None
if not path.isabs(fl) and os.getenv('PWD'):
# Build absolute path from PWD if possible
cwd_fl = path.abspath(path.join(os.getenv('PWD'), fl))
main_dir = path.dirname(cwd_fl)
search_path = _get_search_path(main_dir, sys.path)
if not search_path:
# try absolute strategy (will fail on some symlinks configs)
main_dir = path.dirname(path.abspath(fl))
search_path = _get_search_path(main_dir, sys.path)
if not search_path:
# try real path strategy
main_dir = path.dirname(path.realpath(fl))
sys_path = [path.realpath(p) for p in sys.path]
search_path = _get_search_path(main_dir, sys_path)
return main_dir, search_path
def _solve_pkg(main_globals):
'''
Find parent python path of __main__. From there solve the package
containing __main__, import it and set __package__ variable.
:param main_globals: globals dictionary in __main__
'''
# find __main__'s file directory and search path
main_dir, search_path = _try_search_paths(main_globals)
if not search_path:
_log_debug('Could not solve parent python path for %r' % main_dir)
# no candidates for search path, return
return
# solve package name from search path
pkg_str = path.relpath(main_dir, search_path).replace(path.sep, '.')
# Remove wrong starting string for site-packages
site_pkgs = 'site-packages.'
if pkg_str.startswith(site_pkgs):
pkg_str = pkg_str[len(site_pkgs):]
assert pkg_str
_log_debug('pkg_str=%r' % pkg_str)
# import the package in order to set __package__ value later
try:
if '__init__.py' in main_globals['__file__']:
_log_debug('__init__ script. This module is its own package')
# The __main__ is __init__.py => its own package
# If we treat it as a normal module it would be imported twice
# So we simply reuse it
sys.modules[pkg_str] = sys.modules['__main__']
# We need to set __path__ because its needed for
# relative importing
sys.modules[pkg_str].__path__ = [main_dir]
# We need to import parent package, something that would
# happen automatically in non-faked import
parent_pkg_str = '.'.join(pkg_str.split('.')[:-1])
if parent_pkg_str:
importlib.import_module(parent_pkg_str)
else:
_log_debug('Importing package %r' % pkg_str)
# we need to import the package to be available
importlib.import_module(pkg_str)
# finally enable relative import
main_globals['__package__'] = pkg_str
return pkg_str
except ImportError as e:
# In many situations we won't care if it fails, simply report error
# main will fail anyway if finds an explicit relative import
_print_exc(e)
def _log(msg):
'''
Central log function (all levels)
:param msg: message to log
'''
sys.stderr.write(msg + '\n')
sys.stderr.flush()
def _log_debug(msg):
'''
Log at debug level
:param msg: message to log
'''
if _log_level <= DEBUG:
if _log_level == TRACE:
traceback.print_stack()
_log(msg)
def _log_error(msg):
'''
Log at error level
:param msg: message to log
'''
if _log_level <= ERROR:
_log(msg)
# Logging constants
ERROR = 40
DEBUG = 10
TRACE = 5
# Set default level
_log_level = ERROR
# Keeps track of rel_imp initialization
_initialized = False
def init(log_level=ERROR):
'''
Enables explicit relative import in sub-modules when ran as __main__
:param log_level: module's inner logger level (equivalent to logging pkg)
Use PYTHON_DISABLE_REL_IMP environment variable to disable the initialization
'''
global _initialized
if _initialized:
_log_debug('Initialized. Doing nothing')
return
elif 'PYTHON_DISABLE_REL_IMP' in os.environ:
_log_debug('PYTHON_DISABLE_REL_IMP environment variable present. Doing nothing')
return
else:
_initialized = True
# find caller's frame
frame = currentframe()
# go 1 frame back to find who imported us
frame = frame.f_back
_init(frame, log_level)
def init_implicitly(log_level=ERROR):
'''
| Use PYTHON_DISABLE_REL_IMP environment variable to disable the initialization
'''
global _initialized
if _initialized:
_log_debug('Initialized. Doing nothing')
return
elif 'PYTHON_DISABLE_REL_IMP' in os.environ:
_log_debug('PYTHON_DISABLE_REL_IMP environment variable present. Doing nothing')
return
else:
_initialized = True
# find caller's frame
frame = currentframe()
while frame.f_globals['__name__'] != '_ | _main__':
frame = frame.f_back
_init(frame, log_level)
def _init(frame, log_level=ERROR):
'''
Enables explicit relative import in sub-modules when ran as __main__
:param log_level: module's inner logger level (equivalent to logging pkg)
'''
global _log_level
_log_level = log_level
# now we have access to the module globals
main_globals = frame.f_globals
# If __package__ set or it isn't the __main__, stop and return.
# (in some cases relative_import could be called once from outside
# __main__ if it was not called in __main__)
# (also a reload of relative_import could trigger this function)
pkg = main_globals.get('__package__')
file_ = main_globals.get('__file__')
if pkg or not file_:
_log_debug('Package solved or init was called from interactive '
'console. __package__=%r, __file__=%r' % (pkg, file_))
return
try:
_solve_pkg(main_globals)
except Exception as e:
|
jacksonllee/lxa5 | linguistica/datasets/__init__.py | Python | mit | 179 | 0 | # -*- encoding: utf8 -*-
import os
brown = os.path.join(os.path. | dirname(__file__), 'english-brown.txt')
cmudict = os.path.join(os.path.dirname(__file__), | 'english-cmudict.dx1')
|
USGSDenverPychron/pychron | docs/user_guide/operation/scripts/examples/argus/extraction/apis/PrepareForApis.py | Python | apache-2.0 | 702 | 0.014245 | def main():
info('Evacuate Microbone')
close(description='Jan Inlet')
open(description='Jan Io | n Pump')
#close(description='Minibone to Bone')
open(description='Minibone to Bone')
#close(description='Microbone to Minibone')
open(description='Microbone to Minibone')
open('C')
close('P')
close(description='Microbone to CO2 Laser')
open(description='Microbone to Turbo')
open(description='Microbo | ne to Inlet Pipette')
open(description='Microbone to Getter NP-10C')
#evacuate apis section
#info('evacuate apis')
open(description='Microbone to Getter NP-10H')
#sleep(15)
#close(description='Microbone to Getter NP-10H') |
satanas/libturpial | tests/models/test_media.py | Python | gpl-3.0 | 1,767 | 0.007923 | import pytest
import __builtin__
from libturpial.api.models.media import Media
from tests.helpers import DummyFileHandler
class TestMedia:
@classmethod
@pytest.fixture(autouse=True)
def setup_class(self, monkeypatch):
monkeypatch.setattr(__builtin__, 'open', lambda x, y: DummyFileHandler())
def test_init(self, monkeypatch):
media = Media(Media.IMAGE, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
assert media.path == '/path/to/ble/'
assert media.content == 'binary'
media = Media(Media.IMAGE, 'foo', 'binary')
assert media.path != None
assert media.info == None
assert media.content == 'binary'
def test_new_image(self, monkeypatch):
media = Media.new_image('foo', 'binary', path='/path/to/ble', info='lorem ipsum')
as | sert isinstance(media, Media)
def test_save_content(self, monkeypatch):
media = Media(Media.IMAGE, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
# TODO: How test that this works?
assert media.save_content() == None
def raise_ex():
| raise KeyError
monkeypatch.setattr(__builtin__, 'open', lambda x, y: raise_ex())
media = Media(Media.IMAGE, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
assert media.save_content() == None
def test_type(self):
media = Media(Media.VIDEO, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
assert media.is_video()
media = Media(Media.IMAGE, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
assert media.is_image()
media = Media(Media.MAP, 'foo', 'binary', path='/path/to/ble/', info='lorem ipsum')
assert media.is_map()
|
donaldinou/nuked-gamer | make.py | Python | gpl-2.0 | 988 | 0.004049 | #!/usr/bin/python
import os
import shutil
import zipfile
import zlib
import os.path
def _ignore(src, name ):
if src == './UPLOAD/':
print name
return ['RESTDIR', 'conf.inc.php', 'Nuked-Klan.zip', '.hg', 'make.py']
else:
return []
def _RecImport(src, dst, zip | ):
elements = os.listdir(src)
ignored = _ignore('.' + dst, elements)
for delete in ignored:
try:
elements.remove(delete)
except ValueError:
pass
for ele in elements:
if os.path.isfile(src + ele):
zip.write(src + ele, dst + ele)
print dst + ele
elif os.path.isdir(src + ele):
_RecImport(src | + ele + '/', dst + ele + '/', zip)
try:
shutil.rmtree('tmp')
except OSError:
pass
file = 'Nuked-Klan.zip'
try:
os.remove(file)
except OSError:
pass
zip = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED, False)
_RecImport('RESTDIR/', '/', zip)
_RecImport('./', '/UPLOAD/', zip)
|
stachenov/PyLeetCode | problems/integer_replacement.py | Python | unlicense | 421 | 0 | class Solution(object):
def integerReplacement(self, n):
"""
:type n: int
:rtype: int
| """
if n < 4:
return [0, 0, 1, 2][n]
elif (n & 0b01) == 0b00:
return self.integerReplacement(n / 2) + 1
elif (n & 0b11) == | 0b01:
return self.integerReplacement(n - 1) + 1
else:
return self.integerReplacement(n + 1) + 1
|
synsun/robotframework | utest/running/test_userhandlers.py | Python | apache-2.0 | 6,437 | 0.001243 | import unittest
from robot.errors import DataError
from robot.model import Keywords
from robot.running.userkeyword import (EmbeddedArgs, EmbeddedArgsTemplate,
UserKeywordHandler)
from robot.running.arguments import EmbeddedArguments, UserKeywordArgumentParser
from robot.utils.asserts import assert_equal, assert_true, assert_raises
class Fake(object):
value = ''
message = ''
def __iter__(self):
return iter([])
class FakeArgs(object):
def __init__(self, args):
self.value = args
def __nonzero__(self):
return bool(self.value)
def __iter__(self):
return iter(self.value)
class HandlerDataMock:
def __init__(self, name, args=[]):
self.name = name
self.args = FakeArgs(args)
self.metadata = {}
self.keywords = Keywords()
self.defaults = []
self.varargs = None
self.minargs = 0
self.maxargs = 0
self.return_value = None
self.doc = Fake()
self.timeout = Fake()
self.return_ = Fake()
self.tags = ()
self.teardown = None
def EAT(name, args=[]):
handler = HandlerDataMock(name, args)
embedded = EmbeddedArguments(name)
return EmbeddedArgsTemplate(handler, 'resource', embedded)
class TestEmbeddedArgs(unittest.TestCase):
def setUp(self):
self.tmp1 = EAT('User selects ${item} from list')
self.tmp2 = EAT('${x} * ${y} from "${z}"')
def test_no_embedded_args(self):
assert_true(not EmbeddedArguments('No embedded args here'))
assert_true(EmbeddedArguments('${Yes} embedded args here'))
def test_get_embedded_arg_and_regexp(self):
assert_equal(self.tmp1.embedded_args, ['item'])
assert_equal(self.tmp1.embedded_name.pattern,
'^User\\ selects\\ (.*?)\\ from\\ list$')
assert_equal(self.tmp1.name, 'User selects ${item} from list')
def test_get_multiple_embedded_args_and_regexp(self):
assert_equal(self.tmp2.embedded_args, ['x', 'y', 'z'])
assert_equal(self.tmp2.embedded_name.pattern,
'^(.*?)\\ \\*\\ (.*?)\\ from\\ \\"(.*?)\\"$')
def test_create_handler_when_no_match(self):
assert_raises(ValueError, EmbeddedArgs, 'Not matching', self.tmp1)
def test_create_handler_with_one_embedded_arg(self):
handler = EmbeddedArgs('User selects book from list', self.tmp1)
assert_equal(handler.embedded_args, [('item', 'book')])
assert_equal(handler.name, 'User selects book from list')
assert_equal(handler.longname, 'resource.User selects book from list')
handler = EmbeddedArgs('User selects radio from list', self.tmp1)
assert_equal(handler.embedded_args, [('item', 'radio')])
assert_equal(handler.name, 'User selects radio from list')
assert_equal(handler.longname, 'resource.User selects radio from list')
def test_create_handler_with_many_embedded_args(self):
handler = EmbeddedArgs('User * book from "list"', self.tmp2)
assert_equal(handler.embedded_args,
[('x', 'User'), ('y', 'book'), ('z', 'list')])
def test_create_handler_with_empty_embedded_arg(self):
handler = EmbeddedArgs('User selects from list', self.tmp1)
assert_equal(handler.embedded_args, [('item', '')])
def test_create_handler_with_special_characters_in_embedded_args(self):
handler = EmbeddedArgs('Janne & Heikki * "enjoy" from """', self.tmp2)
assert_equal(handler.embedded_args,
[('x', 'Janne & Heikki'), ('y', '"enjoy"'), ('z', '"')])
def test_embedded_args_without_separators(self):
template = EAT('This ${does}${not} work so well')
handler = EmbeddedArgs('This doesnot work so well', template)
assert_equal(handler.embedded_args, [('does', ''), ('not', 'doesnot')])
def test_embedded_args_with_separators_in_values(self):
template = EAT('This ${could} ${work}-${OK}')
handler = EmbeddedArgs("This doesn't really work---", template)
assert_equal(handler.embedded_args,
[('could', "doesn't"), ('work', 'really work'), ('OK', '--')])
def test_creating_handlers_is_case_insensitive(self):
handler = EmbeddedArgs('User SELECts book frOm liST', self.tmp1)
assert_equal(handler.embedded_args, [('item', 'book')])
assert_equal(handler.name, 'User SELECts book frOm liST')
assert_equal(handler.longname, 'resource.User SELECts book frOm liST')
def test_embedded_args_handler_has_all_needed_attributes(self):
normal = UserKeywordHandler(HandlerDataMock('My name'), None)
embedded = EmbeddedArgs('My name', EAT('My ${name}'))
for attr in dir(normal):
assert_true(hasattr(embedded, attr), "'%s' mis | sing" % attr)
class TestGetArgSpec(unittest.TestCase):
def test_no_args(self):
self._verify('')
def test_one_arg(self):
self._verify('${arg1}', ['arg1',])
def test_one_vararg(self):
self._verify('@{varargs}', exp_varargs='varargs')
def test_one_default(self):
self._verify('${arg1} ${arg2}=default @{varargs}',
['arg1', 'arg2'], ['default'], 'varargs')
def test | _one_empty_default(self):
self._verify('${arg1} ${arg2}= @{varargs}',
['arg1', 'arg2'], [''], 'varargs')
def test_many_defaults(self):
self._verify('${arg1}=default1 ${arg2}=default2 ${arg3}=default3',
['arg1', 'arg2', 'arg3'],
['default1', 'default2', 'default3'])
def _verify(self, in_args, exp_args=[], exp_defaults=[], exp_varargs=None):
argspec = self._parse(in_args)
assert_equal(argspec.positional, exp_args)
assert_equal(argspec.defaults, exp_defaults)
assert_equal(argspec.varargs, exp_varargs)
def _parse(self, in_args):
return UserKeywordArgumentParser().parse(in_args.split())
def test_many_varargs_raises(self):
assert_raises(DataError, self._parse, '@{varargs} @{varargs2}')
def test_args_after_varargs_raises(self):
assert_raises(DataError, self._parse, '@{varargs} ${arg1}')
def test_get_defaults_before_args_raises(self):
assert_raises(DataError, self._parse, '${args1}=default ${arg2}')
if __name__ == '__main__':
unittest.main()
|
google-research/remixmatch | ablation/ab_cta_remixmatch_noweak.py | Python | apache-2.0 | 3,534 | 0.001698 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ReMixMatch training, changes from MixMatch are:
- Add distribution matching.
"""
import os
import numpy as np
from absl import app
from absl import flags
from ablation.ab_cta_remixmatch import ABCTAReMixMatch
from libml import utils, data, ctaugment
from libml.augment import AugmentPoolCTA
FLAGS = flags.FLAGS
cla | ss AugmentPoolCTANoWeak(AugmentPoolCTA):
@staticmethod
def numpy_apply_policies(arglist):
x, cta, probe = arglist
if x.ndim == 3:
assert probe
policy = cta.policy(probe=True)
return dict(policy=policy,
probe=ctaugment.apply(x, policy),
image=ctaugment.apply(x, cta.policy(p | robe=False)))
assert not probe
return dict(image=np.stack([ctaugment.apply(y, cta.policy(probe=False)) for y in x]).astype('f'))
class ABCTAReMixMatchNoWeak(ABCTAReMixMatch):
AUGMENT_POOL_CLASS = AugmentPoolCTANoWeak
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.MANY_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = ABCTAReMixMatchNoWeak(
os.path.join(FLAGS.train_dir, dataset.name, ABCTAReMixMatchNoWeak.cta_name()),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
K=FLAGS.K,
beta=FLAGS.beta,
w_kl=FLAGS.w_kl,
w_match=FLAGS.w_match,
w_rot=FLAGS.w_rot,
redux=FLAGS.redux,
use_dm=FLAGS.use_dm,
use_xe=FLAGS.use_xe,
warmup_kimg=FLAGS.warmup_kimg,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('beta', 0.75, 'Mixup beta distribution.')
flags.DEFINE_float('w_kl', 0.5, 'Weight for KL loss.')
flags.DEFINE_float('w_match', 1.5, 'Weight for distribution matching loss.')
flags.DEFINE_float('w_rot', 0.5, 'Weight for rotation loss.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
flags.DEFINE_integer('warmup_kimg', 1024, 'Unannealing duration for SSL loss.')
flags.DEFINE_enum('redux', '1st', 'swap mean 1st'.split(), 'Logit selection.')
flags.DEFINE_bool('use_dm', True, 'Whether to use distribution matching.')
flags.DEFINE_bool('use_xe', True, 'Whether to use cross-entropy or Brier.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_operations.py | Python | mit | 5,294 | 0.004156 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Sql/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__ | (self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.OperationListResult"]:
"""Lists all of the available SQL Rest API operations.
:keyword callable cls: A custom type or function that will be pass | ed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Sql/operations'} # type: ignore
|
jeryfast/piflyer | piflyer/servo_handler.py | Python | apache-2.0 | 5,739 | 0.002265 | from __future__ import division
import number_range as n
import time
import Adafruit_PCA9685
import delays
MIN = 0
MAX = 180
# RPi code
# !/usr/bin/python
# Initialise the PWM device using the default address
pwm = Adafruit_PCA9685.PCA9685(0x41)
# Note if you'd like more debug output you can instead run:
# pwm = PWM(0x40, debug=True)
# Set frequency to 60 Hz
pwm.set_pwm_freq(60)
# default:min 150, max 600
servoMin = 150 # Min pulse length out of 4096
servoMax = 600 # Max pulse length out of 4096
on = 0
class servo_handler:
def __init__(self, channel):
# servo position variables
self.up = MIN
self.down = MAX
self.neutral = MAX / 2
self.position = 0
self.oldRange = [0, 0]
# mobile phone tilt reference - dont`t change
self.upTilt = 45
self.downTilt = -45
# upAdd can be 1 or -1
self.upAdd = -1
# input intensitymultiplier
self.multiplier = 1
self.t = 0
self.channel = channel
self.timer=0
self.setServoValue(self.channel, MIN + MAX / 2)
#hardware
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print("%d us per period" % pulseLength)
pulseLength /= 4096 # 12 bits of resolution
print("%d us per bit" % pulseLength)
pulse *= 1000
pulse /= pulseLength
pwm.set_pwm(channel, 0, pulse)
print(pulse)
def setServoValue(self, channel, value):
t = time.time()
if (t - self.timer > delays.SERVO_COMMAND_REFRESH_DELAY):
self.timer = t
value = n.arduino_map(value, 0, 180, servoMin, servoMax)
pwm.set_pwm(channel, on, int(value))
#software
def getPosition(self):
return self.position
def getUpPosition(self):
return self.up
def getDownPosition(self):
return self.down
# converter
def tiltToPosition(self, tilt):
return n.arduino_map(tilt, self.downTilt, self.upTilt, self.down, self.up)
# converter with limits
def tiltToPositionWithLimits(self, tilt, upTilt, downTilt):
return n.arduino_map(tilt, downTilt, upTilt, self.down, self.up)
# converter
def positionToTilt(self, position):
return n.arduino_map(position, self.down, self.up, self.downTilt, self.upTilt)
"""
# Should not be used
# reference limits of tilt controls
def setTiltLimits(self, up, down):
self.upTilt = up
self.downTilt = down
# reference limits of tilt control
def getTiltLimits(self):
return [self.upTilt,self.downTilt]
"""
# resolution: 100, from servosettings
def setPositionPercent(self, position):
position = n.arduino_map(position, 0, 100, MIN, MAX)
self.setPosition(position)
def setPositionFromTilt(self, tiltPosition):
self.setPosition(self.tiltToPosition(tiltPosition * self.multiplier))
# resolution: 180 or abs(MAX-MIN)
def setPosition(self, position):
if (position != self.position):
if(position<MIN):
position=MIN
elif(position>MAX):
position=MAX
#print("position: ",position)
self.position = position
self.setServoValue(self.channel, position)
# servo movement range limits - tested!
def setUpDownLimit(self, up, down):
# save history for live servo update - to know which value to update - top/left or bottom/right
a, b = self.oldRange
u, d = False, False
if (up != a):
u = True
self.oldRange[0] = up
if (down != b):
d = True
self.oldRange[1] = down
"""
Sets up and down servo limitations in percentage values: range - 0 to 100,
useful for flap of airbrake definition, control surface offsets for
airplane neutral point correction
Correct upDirection values must be set to work properly
up=0, down=100 -> full range
"""
if (self.upAdd == -1):
self.up = n.arduino_map(up, 0, 100, MIN, MAX)
self.down = n. | arduino_map(down, 0, 100, MIN, MAX)
if (u):
self.setPosition(self.up)
print("Servo position: %d" % (self.up))
u | = False
elif (d):
self.setPosition(self.down)
print("Servo position: %d" % (self.down))
d = False
elif (self.upAdd == 1):
self.up = n.arduino_map(up, 0, 100, MAX, MIN)
self.down = n.arduino_map(down, 0, 100, MAX, MIN)
if (u):
self.setPosition(self.up)
print("Servo position: %d" % (self.up))
u = False
elif (d):
self.setPosition(self.down)
print("Servo position: %d" % (self.down))
d = False
# servo directions settings - tested
def setUpDirection(self, val):
if (val > 50):
self.up = MAX
self.down = MIN
self.upAdd = 1
else:
self.up = MIN
self.down = MAX
self.upAdd = -1
self.setPositionPercent(val)
print("Servo position: %d" % (val))
def setMultiplier(self, multiplier):
self.multiplier = multiplier
# not tested yet
def add(self, val=None):
if (not val):
val = 1
self.setPosition(self.position + val * self.upAdd)
# not tested yet
def sub(self, val=None):
if (not val):
val = 1
self.setPosition(self.position - val * self.upAdd)
|
kaday/cylc | lib/parsec/example/cfgspec.py | Python | gpl-3.0 | 1,963 | 0.037188 | #!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2015 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from parsec.validate import validator as vdr
"""
Legal it | ems and validators for the parsec test config file.
"""
SPEC = {
'title' : vdr( vtype="string" ),
| 'single values' :
{
'integers' : { '__MANY__' : vdr( vtype="integer" ) },
'booleans' : { '__MANY__' : vdr( vtype="boolean" ) },
'floats' : { '__MANY__' : vdr( vtype="float" ) },
'strings' : { '__MANY__' : vdr( vtype="string" ) },
'strings with internal comments' : { '__MANY__' : vdr( vtype="string" ) },
'multiline strings' : { '__MANY__' : vdr( vtype="string" ) },
'multiline strings with internal comments' : { '__MANY__' : vdr( vtype="string" ) },
},
'list values' :
{
'string lists' :
{
'__MANY__' : vdr( vtype="string_list" ),
'compulsory' : vdr( vtype="string_list", default=["jumped","over","the"], compulsory=True )
},
'integer lists' : { '__MANY__' : vdr( vtype="integer_list", allow_zeroes=False ) },
'float lists' : { '__MANY__' : vdr( vtype="float_list", allow_zeroes=False ) },
},
}
|
ocefpaf/biggus | biggus/tests/test_adapter.py | Python | gpl-3.0 | 8,309 | 0 | # (C) British Crown Copyright 2012 - 2013, Met Office
#
# This file is part of Biggus.
#
# Biggus is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Biggus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Biggus. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import biggus
class _TestAdapter(object):
longMessage = True
def test_dtype(self):
dtypes = ['f4', 'i1', 'O', 'm8', '<f4', '>f4', '=f4']
keys = [(), (5,), (slice(1, 3),)]
for dtype in dtypes:
for key in keys:
array = self.zeros_adapter([10], dtype=dtype, keys=key)
self.assertEqual(array.dtype, np.dtype(dtype))
def test_shape_0d(self):
pairs = [
[(), ()],
]
for key, shape in pairs:
array = self.zeros_adapter((), keys=key)
self.assertEqual(array.shape, shape)
def test_shape_1d(self):
pairs = [
[(), (10,)],
[(5,), ()],
[(slice(1, 3),), (2,)],
]
for key, shape in pairs:
array = self.zeros_adapter([10], keys=key)
self.assertEqual(array.shape, shape)
def test_shape_2d(self):
pairs = [
[(), (30, 40)],
[(5,), (40,)],
[(slice(1, 3),), (2, 40)],
[(slice(None, None),), (30, 40)],
[(5, 3), ()],
[(5, slice(2, 6)), (4,)],
[(slice(2, 3), slice(2, 6)), (1, 4)],
]
for key, shape in pairs:
array = self.zeros_adapter((30, 40), keys=key)
self.assertEqual(array.shape, shape)
def test_getitem(self):
# Sequence of tests, defined as:
# 1. Original array shape,
# 2. sequence of indexing operations to apply,
# 3. expected result shape or exception.
tests = [
[(30, 40), [], (30, 40)],
[(30, 40), [5], (40,)],
[(30, 40), [(5,)], (40,)],
[(30, 40), [5, 3], ()],
[(30, 40), [(5,), (4,)], ()],
[(30, 40), [(slice(None, None), 6)], (30,)],
[(30, 40), [(slice(None, None), slice(1, 5))], (30, 4)],
[(30, 40), [(slice(None, None),), 4], (40,)],
[(30, 40), [5, (slice(None, None),)], (40,)],
[(30, 40), [(slice(None, 10),)], (10, 40)],
[(30, 40), [(slice(None, None),)], (30, 40)],
[(30, 40), [(slice(None, None, -2),)], (15, 40)],
[(30, 40), [(slice(None, 10),), 5], (40,)],
[(30, 40), [(slice(None, 10),), (slice(None, 3),)], (3, 40)],
[(30, 40), [(slice(None, 10),), (slice(None, None, 2),)], (5, 40)],
[(30, 40), [(slice(5, 10),),
(slice(None, None), slice(2, 6))], (5, 4)],
[(30, 40), [(slice(None, None), slice(2, 6)),
(slice(5, 10),)], (5, 4)],
[(30, 40), [3.5], TypeError],
[(30, 40), ['foo'], TypeError],
[(30, 40), [object()], TypeError],
# Fancy indexing
[(21, 5, 70, 30, 40), [((1, 5), 0, (2, 5, 10), slice(None, 15))],
(2, 3, 15, 40)],
[(21, 5, 2, 70, 30, 40), [(0, (1, 4), 1, (2, 5, 10),
slice(None, 15))], (2, 3, 15, 40)],
# Boolean indexing
[(3, 4), [np.array([0, 1, 0], dtype=bool)], (1, 4)],
[(3, 4), [np.array([1, 0, 1], dtype=bool)], (2, 4)],
[(3, 4), [np.array([0, 0, 0], dtype=bool)], (0, 4)],
[(3, 4), [np.array([1, 1, 1], dtype=bool)], (3, 4)],
[(3, 4), [(slice(None), np.array([1, 0, 1, 1], dtype=bool))],
(3, 3)],
[(3, 4), [(slice(None), np.array([0, 1, 0, 0], dtype=bool))],
(3, 1)],
[(3, 4), [(slice(None), np.array([1, 1, 1, 1], d | type=bool))],
(3, 4)],
[(3, 4), [(slice(None), np.array([0, 0, 0, 0], dtype=bool))],
(3, 0)],
# Boolean indexing (too few indices - zero pad)
[(3, 4), [np.array([1, 1], dtype=bool)], (2, 4)],
[(3, 4), [(slice(None), np.array([1, 1, 1], dtype=bool))], (3, 3)],
# Boolean indexing (too many indices)
[(3, 4), [np.array([1, 1, 1, 0], dtype=bool)], IndexError],
[(3, 4), [(s | lice(None), np.array([1, 1, 1, 1, 0], dtype=bool))],
IndexError],
# Boolean testing, repeated slicing
[(3, 4), [(slice(None), slice(None)),
np.array([0, 1, 0], dtype=bool)], (1, 4)],
[(3, 4), [(slice(None), slice(None)),
(slice(None), slice(None)),
np.array([0, 1, 1], dtype=bool),
np.array([1, 0], dtype=bool)], (1, 4)],
]
for src_shape, cuts, target in tests:
array = self.zeros_adapter(src_shape)
if isinstance(target, type):
with self.assertRaises(target):
for cut in cuts:
array = array.__getitem__(cut)
else:
for cut in cuts:
array = array.__getitem__(cut)
self.assertIsInstance(array, biggus.Array)
msg = '\nSrc shape: {!r}\nCuts: {!r}'.format(src_shape, cuts)
self.assertEqual(array.shape, target, msg)
ndarray = array.ndarray()
self.assertEqual(ndarray.shape, target, msg)
def test_ndarray(self):
tests = [
[(3,), (), [0, 1, 2]],
[(3,), (1,), [1]],
[(3,), (slice(None, None, 2),), [0, 2]],
[(3, 4), (), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]],
[(3, 4), (1, ), [4, 5, 6, 7]],
[(3, 4), (1, 3), 7],
]
for src_shape, src_keys, target in tests:
array = self.arange_adapter(src_shape, keys=src_keys)
result = array.ndarray()
self.assertIsInstance(result, np.ndarray)
self.assertEqual(array.dtype, result.dtype)
self.assertEqual(array.shape, result.shape,
'\nKeys: {!r}'.format(src_keys))
np.testing.assert_array_equal(result, target)
def test_no_ndim(self):
# The concrete instance should not be need to provide `ndim` for
# the adapter to construct.
class Fake(object):
pass
ok = Fake()
ok.shape = (3, 4)
ok.dtype = 'f'
array = self.wrap(ok, ())
no_shape_dtype = Fake()
with self.assertRaises(AttributeError):
array = self.wrap(no_shape_dtype, ())
def zeros_adapter(self, shape, dtype='f', keys=()):
ndarray = np.zeros(shape, dtype=dtype)
return self.wrap(ndarray, keys)
def arange_adapter(self, shape, keys):
size = reduce(lambda x, y: x * y, shape)
ndarray = np.arange(size).reshape(shape)
return self.wrap(ndarray, keys)
class TestNumpyAdapter(_TestAdapter, unittest.TestCase):
def wrap(self, ndarray, keys):
return biggus.NumpyArrayAdapter(ndarray, keys)
class TestOrthoAdapter(_TestAdapter, unittest.TestCase):
class Ortho(object):
def __init__(self, array):
self._array = array
self.shape = array.shape
self.dtype = array.dtype
def __getitem__(self, keys):
result = self._array
for i, key in reversed(list(enumerate(keys))):
index = [slice(None)] * i + [key]
result = result.__getitem__(tuple(index))
return result
def wrap(self, ndarray, keys):
ortho = TestOrthoAdapter.Ortho(ndar |
saschpe/pycadf | pycadf/audit/api.py | Python | apache-2.0 | 9,375 | 0 | # -*- encoding: utf-8 -*-
#
# Copyright 2013 IBM Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import ConfigParser
import os
from oslo.config import cfg
import urlparse
from pycadf import cadftaxonomy as taxonomy
from pycadf import cadftype
from pycadf import credential
from pycadf import endpoint
from pycadf import eventfactory as factory
from pycadf import host
from pycadf import identifier
from pycadf import reason
from pycadf import reporterstep
from pycadf import resource
from pycadf import tag
from pycadf import timestamp
CONF = cfg.CONF
opts = [
cfg.StrOpt('api_audit_map',
default='api_audit_map.conf',
help='File containing mapping for api paths and '
'service endpoints'),
]
CONF.register_opts(opts, group='audit')
class ClientResource(resource.Resource):
def __init__(self, project_id=None, **kwargs):
super(ClientResource, self).__init__(**kwargs)
if project_id is not None:
self.project_id = project_id
class KeystoneCredential(credential.Credential):
def __init__(self, identity_status=None, **kwargs):
super(KeystoneCredential, self).__init__(**kwargs)
if identity_status is not None:
self.identity_status = identity_status
class PycadfAuditApiConfigError(Exception):
"""Error raised when pyCADF fails to configure correctly."""
class OpenStackAuditApi(object):
_API_PATHS = []
_BODY_ACTIONS = {}
_SERVICE_ENDPOINTS = {}
def __init__(self):
self._configure_audit_map()
def _configure_audit_map(self):
"""Configure to recognize and map known api paths."""
cfg_file = CONF.audit.api_audit_map
if not os.path.exists(CONF.audit.api_audit_map):
cfg_file = cfg.CONF.find_file(CONF.audit.api_audit_map)
if cfg_file:
try:
audit_map = ConfigParser.SafeConfigParser()
audit_map.readfp(open(cfg_file))
try:
paths = audit_map.get('DEFAULT', 'api_paths')
self._API_PATHS = paths.lstrip().split('\n')
except ConfigParser.NoSectionError:
pass |
try:
self._BODY_ACTIONS = dict(audit_map.items('body_actions'))
except ConfigParser.NoSectionError:
pass
try:
self._SERVICE_ENDPOINTS = \
dict(audit_map.items('service_endpoints'))
except ConfigParser.NoSectionError:
pass
except ConfigParser.ParsingError as err:
raise PycadfAudit | ApiConfigError(
'Error parsing audit map file: %s' % err)
def _get_action(self, req):
"""Take a given Request, parse url path to calculate action type.
Depending on req.method:
if POST: path ends with action, read the body and get action from map;
request ends with known path, assume is create action;
request ends with unknown path, assume is update action.
if GET: request ends with known path, assume is list action;
request ends with unknown path, assume is read action.
if PUT, assume update action.
if DELETE, assume delete action.
if HEAD, assume read action.
"""
path = urlparse.urlparse(req.url).path
path = path[:-1] if path.endswith('/') else path
method = req.method
if method == 'POST':
if path[path.rfind('/') + 1:] == 'action':
if req.json:
body_action = req.json.keys()[0]
action = self._BODY_ACTIONS.get(body_action,
taxonomy.ACTION_CREATE)
else:
action = taxonomy.ACTION_CREATE
elif path[path.rfind('/') + 1:] not in self._API_PATHS:
action = taxonomy.ACTION_UPDATE
else:
action = taxonomy.ACTION_CREATE
elif method == 'GET':
if path[path.rfind('/') + 1:] in self._API_PATHS:
action = taxonomy.ACTION_LIST
else:
action = taxonomy.ACTION_READ
elif method == 'PUT':
action = taxonomy.ACTION_UPDATE
elif method == 'DELETE':
action = taxonomy.ACTION_DELETE
elif method == 'HEAD':
action = taxonomy.ACTION_READ
else:
action = taxonomy.UNKNOWN
return action
def create_event(self, req, correlation_id):
action = self._get_action(req)
initiator_host = host.Host(address=req.client_addr,
agent=req.user_agent)
catalog = ast.literal_eval(req.environ['HTTP_X_SERVICE_CATALOG'])
for endp in catalog:
admin_urlparse = urlparse.urlparse(
endp['endpoints'][0]['adminURL'])
public_urlparse = urlparse.urlparse(
endp['endpoints'][0]['publicURL'])
req_url = urlparse.urlparse(req.host_url)
if (req_url.netloc == admin_urlparse.netloc
or req_url.netloc == public_urlparse.netloc):
service_type = self._SERVICE_ENDPOINTS.get(endp['type'],
taxonomy.UNKNOWN)
service_name = endp['name']
admin_end = endpoint.Endpoint(
name='admin',
url=endp['endpoints'][0]['adminURL'])
private_end = endpoint.Endpoint(
name='private',
url=endp['endpoints'][0]['internalURL'])
public_end = endpoint.Endpoint(
name='public',
url=endp['endpoints'][0]['publicURL'])
service_id = endp['endpoints'][0]['id']
break
else:
service_type = service_id = service_name = taxonomy.UNKNOWN
admin_end = private_end = public_end = None
initiator = ClientResource(
typeURI=taxonomy.ACCOUNT_USER,
id=str(req.environ['HTTP_X_USER_ID']),
name=req.environ['HTTP_X_USER_NAME'],
host=initiator_host,
credential=KeystoneCredential(
token=req.environ['HTTP_X_AUTH_TOKEN'],
identity_status=req.environ['HTTP_X_IDENTITY_STATUS']),
project_id=req.environ['HTTP_X_PROJECT_ID'])
target = resource.Resource(typeURI=service_type,
id=service_id,
name=service_name)
if admin_end:
target.add_address(admin_end)
if private_end:
target.add_address(private_end)
if public_end:
target.add_address(public_end)
event = factory.EventFactory().new_event(
eventType=cadftype.EVENTTYPE_ACTIVITY,
outcome=taxonomy.OUTCOME_PENDING,
action=action,
initiator=initiator,
target=target,
observer='target')
event.add_tag(tag.generate_name_value_tag('correlation_id',
correlation_id))
return event
def append_audit_event(self, req):
"""Append a CADF event to req.environ['CADF_EVENT']
Also, stores model in request for future process and includes a
CADF correlation id.
"""
correlation_id = identifier.generate_uuid()
req.environ['CADF_EVENT_CORRELATION_ID'] = correlation_id
|
s1mbi0se/s1ipc | s1ipc/sharedcache/tests/memoize_tests.py | Python | apache-2.0 | 1,191 | 0.002519 | # codi | ng=utf-8
from multiprocessing import Process
import unittest
from unittest import TestCase
from .. import SharedCacheServer, SharedCache, Memoize
import time
__author__ = 'matheus2740'
class Memo | izeTests(TestCase):
def setUp(self):
pass
def tearDown(self):
fun.remove_local_namespaces()
def test_simple_call(self):
server = SharedCacheServer()
i1 = fun()
i2 = fun()
i2 = fun()
i2 = fun()
i2 = fun()
assert i1 == i2
assert fun.get_stats()['hits'] == 4
assert fun.get_stats()['puts'] == 1
assert fun.get_stats()['gets'] == 5
server.shutdown()
def test_parallel_call(self):
server = SharedCacheServer()
i1 = fun()
def verify():
i2 = fun()
assert i1 == i2
p = Process(target=verify)
p.start()
p.join()
assert fun.get_stats()['hits'] == 1
assert fun.get_stats()['puts'] == 1
assert fun.get_stats()['gets'] == 2
#time.sleep(0.5)
server.shutdown()
@Memoize('test', 60)
def fun(*args):
return 1
if __name__ == '__main__':
unittest.main() |
blitzmann/Pyfa | eos/db/migrations/__init__.py | Python | gpl-3.0 | 1,545 | 0.000647 | """
The migration module includes migration logic to update database scheme and/or
data for the user database.
To create a migration, simply create a file upgrade<migration number>.py and
define an upgrade() function with the logic. Please note that there must be as
many upgrade files as there are database versions (version 5 would include
upgrade files 1-5)
"""
import pkgutil
import re
updates = {}
appVersion = 0
prefix = __name__ + "."
# load modules to work based with and without pyinstaller
# from: https://github.com/webcomics/dosage/blob/master/dosagelib/loader.py
# see: https://github.com/pyinstaller/pyinstaller/issues/1905
# load modules using iter_modules()
# (should find all filters in normal build, but not pyinstaller)
module_names = [m[1] f | or m in pkgutil.iter_modules(__path__, prefix)]
# special handling for PyInstaller
importers = map(pkgutil.get_importer, __path__)
toc = set()
for i in importers:
if hasattr(i, 'toc'):
toc |= i.toc
for elm in toc:
if elm.startswith(prefix):
module_names.append(elm)
for modname in module_names:
# loop through python files, extracting update number and function, and
# adding it to a list
modname_tail = mod | name.rsplit('.', 1)[-1]
module = __import__(modname, fromlist=True)
m = re.match("^upgrade(?P<index>\d+)$", modname_tail)
if not m:
continue
index = int(m.group("index"))
appVersion = max(appVersion, index)
upgrade = getattr(module, "upgrade", False)
if upgrade:
updates[index] = upgrade
|
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/ipu/test_lookuptable_op_ipu.py | Python | apache-2.0 | 4,745 | 0.000211 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest, ExecutionMode
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with | IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
@property
def fp16_enabled(self):
return True
def set_data_feed(self):
data = np.a | rray([[[1], [3]], [[2], [4]], [[4], [127]]])
self.feed_cpu = {"x": data.astype(np.int64)}
self.feed_ipu = {"x": data.astype(np.int32)}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed_cpu.values()]
self.feed_list = list(self.feed_cpu.keys())
self.feed_dtype = [x.dtype for x in self.feed_cpu.values()]
def set_op_attrs(self):
self.attrs = {
"size": [128, 16],
"is_sparse": False,
"is_distributed": False,
"padding_idx": -1,
"dtype": 'float32'
}
def _test_base(self, exec_mode):
scope = paddle.static.Scope()
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
main_prog.random_seed = self.SEED
startup_prog.random_seed = self.SEED
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_prog, startup_prog):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype='int64')
out = paddle.fluid.layers.embedding(x, **self.attrs)
if self.is_training:
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(learning_rate=1e-2)
adam.minimize(loss)
fetch_list = [loss.name]
else:
fetch_list = [out.name]
if exec_mode == ExecutionMode.CPU_FP32:
place = paddle.CPUPlace()
else:
place = paddle.IPUPlace()
exe = paddle.static.Executor(place)
exe.run(startup_prog)
if exec_mode != ExecutionMode.CPU_FP32:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
if exec_mode == ExecutionMode.IPU_POPART_FP16:
ipu_strategy.set_precision_config(enable_fp16=True)
program = paddle.static.IpuCompiledProgram(
main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
else:
program = main_prog
feed = self.feed_cpu
if exec_mode > ExecutionMode.CPU_FP32:
feed = self.feed_ipu
if self.is_training:
result = []
for _ in range(self.epoch):
loss_res = exe.run(program,
feed=feed,
fetch_list=fetch_list)
result.append(loss_res[0])
return np.array(result)
else:
result = exe.run(program, feed=feed, fetch_list=fetch_list)
return result[0]
def test(self):
output_dict = {}
for mode in ExecutionMode:
if mode > ExecutionMode.IPU_FP32 and (not self.fp16_enabled or
self.is_training):
break
output_dict[mode] = self._test_base(mode).flatten()
self.check(output_dict)
class TestTrainCase1(TestBase):
def set_atol(self):
self.atol = 1e-7
self.rtol = 1e-6
self.atol_fp16 = 1e-3
self.rtol_fp16 = 1e-3
def set_training(self):
self.is_training = True
self.epoch = 10
if __name__ == "__main__":
unittest.main()
|
kabrapratik28/DeepVideos | model/model_GAN_8.py | Python | apache-2.0 | 38,127 | 0.011278 | import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.contrib.layers.python.layers import regularizers
module_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.batch_generator import datasets
slim = tf.contrib.slim
tf.reset_default_graph()
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
# Contants
image_channels = 3
time_frames_to_consider = 8
time_frames_to_predict = 8
interval=4 # frames to jump !
heigth_train= 64
width_train= 64
custom_test_size=[160,210]
heigth_test, width_test = custom_test_size
#===================================================================
# Generative Model Parameters
#===================================================================
# +1 for input from previous layer !
scale_level_feature_maps= [[16, 32, 64, 3],
[16, 32, 64, 3],
[32, 64, 128, 3],
[32, 64, 128, 128, 3]]
# as size of image increase in scaling ... conv layer increases !
scale_level_kernel_size = [
[3, 3, 3, 3],
[5, 3, 3, 3],
[5, 5, 5, 5],
[7, 5, 5, 5, 5]
]
#===================================================================
# Descriminative Model Parameters
#===================================================================
disc_scale_level_feature_maps = [[16],
[16, 32, 32],
[32, 64, 64],
[32, 64, 128, 128]]
# kernel sizes for each convolution of each scale network in the discriminator model
disc_scale_level_kernel_size = [[3],
[3, 3, 3],
[5, 5, 5],
[7, 7, 5, 5]]
# layer sizes for each fully-connected layer of each scale network in the discriminator model
# layer connecting conv to fully-connected is dynamically generated when creating the model
disc_fc_layer_units = [[128, 64, 1],
[256, 128, 1],
[256, 128, 1],
[256, 128, 1]]
#===================================================================
# regularizer !
l2_val = 0.00005
# Adam optimizer !
adam_learning_rate = 0.0004
# Tensorboard images to show
batch_size = 8
number_of_images_to_show = 4
assert number_of_images_to_show <= batch_size, "images to show should be less !"
timesteps=24
file_path = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(file_path, "../../data/")
log_dir_file_path = os.path.join(file_path, "../../logs/")
model_save_file_path = os.path.join(file_path, "../../checkpoint/")
output_video_save_file_path = os.path.join(file_path, "../../output/")
iterations = "iterations/"
best = "best/"
checkpoint_iterations = 100
best_model_iterations = 100
test_model_iterations = 5
best_loss = float("inf")
heigth, width = heigth_train, width_train
channels = 3
assert timesteps>=time_frames_to_consider and timesteps>=time_frames_to_predict, "time steps must be greater !"
#==================== COPIED CODE ===============================================
#
# TENSORBOARD VISUALIZATION FOR SHARPNESS AND (Peak Signal to Noise Ratio){PSNR}
#=================================================================================
def log10(t):
"""
Calculates the base-10 log of each element in t.
@param t: The tensor from which to calculate the base-10 log.
@return: A tensor with the base-10 log of each element in t.
"""
numerator = tf.log(t)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr_error(gen_frames, gt_frames):
"""
Computes the Peak Signal to Noise Ratio error between the generated images and the ground
truth images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
batch.
"""
shape = tf.shape(gen_frames)
num_pixels = | tf.to_float(shape[1] * shape[ | 2] * shape[3])
square_diff = tf.square(gt_frames - gen_frames)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
def sharp_diff_error(gen_frames, gt_frames):
"""
Computes the Sharpness Difference error between the generated images and the ground truth
images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
# gradient difference
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
# TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
gen_grad_sum = gen_dx + gen_dy
gt_grad_sum = gt_dx + gt_dy
grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
## =================== COPIED CODE ENDS ======================
def l2_loss(generated_frames, expected_frames):
losses = []
for each_scale_gen_frames, each_scale_exp_frames in zip(generated_frames, expected_frames):
losses.append(tf.nn.l2_loss(tf.subtract(each_scale_gen_frames, each_scale_exp_frames)))
loss = tf.reduce_mean(tf.stack(losses))
return loss
def gdl_loss(generated_frames, expected_frames, alpha=2):
"""
difference with side pixel and below pixel
"""
scale_losses = []
for i in xrange(len(generated_frames)):
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(generated_frames[i], filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(generated_frames[i], filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(expected_frames[i], filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(expected_frames[i], filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
scale_losses.append(tf.reduce_sum((grad_diff_x ** alpha + grad_diff_y ** alpha)))
# condense into one tensor and avg
return tf.reduce_mean(tf.stack(scale_losses))
def total_loss(generated_frames, expecte |
vincentadam87/gatsby-hackathon-seizure | code/python/seizures/examples/cross_validation_test.py | Python | bsd-2-clause | 5,173 | 0.011212 | '''
Created on 10 August 2014
@author: vincent
'''
# Loading necessary packages
import numpy as np
import sys
from seizures.data.DataLoader_v2 import DataLoader
from seizures.evaluation.XValidation import XValidation
from seizures.evaluation.performance_measures import accuracy, auc
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.features.MixFeatures import MixFeatures
from seizures.features.SEFeatures import SEFeatures
from seizures.features.StatsFeatures import StatsFeatures
from seizures.features.PLVFeatures import PLVFeatures
from seizures.features.ARFeatures import ARFeatures
from seizures.features.LyapunovFeatures import LyapunovFeatures
from seizures.prediction.ForestPredictor import ForestPredictor
from seizures.prediction.SVMPredictor import SVMPredictor
from seizures.prediction.XtraTreesPredictor import XtraTreesPredictor
from seizures.Global import Global
from sklearn.cross_validation import train_test_split
def Xval_on_single_patient(predictor_cls, feature_extractor, patient_name="Dog_1",preprocess=True):
"""
Single patient cross validation
Returns 2 lists of cross validation performances
:param predictor_cls:
:param feature_extractor
:param patient_name:
:return:
"""
# predictor_cls is a handle to an instance of PredictorBase
# Instantiate the predictor
predictor = predictor_cls()
base_dir = Global.path_map('clips_folder')
base_dir = '/nfs/data3/kaggle_seizure/clips/'
loader = DataLoader(base_dir, feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(patient_name,preprocess=preprocess)
#X_train,y_seizure, y_early = loader.training_data(patient_name)
#y_train = [y_seizure,y_early]
#X_list,y_list = train_test_split(X_train,y_train)
# running cross validation
print patient_name
print "\ncross validation: seizures vs not"
result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_seizure), np.std(result_seizure), result_seizure)
print "\ncross validation: early_vs_not"
result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(result_early), np.std(result_early), result_early)
return result_seizure,result_early
def Xval_on_patients(predictor_cls, feature_extractor, patients_list=['Dog_1'],preprocess=True):
''' Runs cross validation for given predictor class and feature instance on the given list of patients
INPUT:
- predictor_cls: a Predictor class (implement)
- feature_extractor: an instanciation of a Features class
- patients_list: a list of subject strings e.g., ['Dog_1', 'Patient_2']
'''
assert(isinstance(feature_extractor, FeatureExtractBase))
results_seizure = []
results_early = []
for patient_name in patients_list:
result_seizure, result_early = Xval_on_single_patient(predictor_cls, feature_extractor, patient_name, preprocess=preprocess)
results_seizure.append(result_seizure)
results_early.append(result_early)
avg_results_seizure = np.mean(np.array(results_seizure),axis=0)
avg_results_early = np.mean(np.array(results_early),axis=0)
print "\ncross validation: seizures vs not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_seizure), np.std(avg_results_seizure), avg_results_seizure)
print "\ncross validation: early_vs_not (ACROSS ALL SUBJECTS)"
print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
% (np.mean(avg_results_early), np.std(avg_results_early), avg_results_early)
return avg_results_seizure, avg_results_early
# generate prediction for test data
def main():
# code run at script launch
#patient_name = sys.argv[1]
# There are Dog_[1-4] and Patient_[1-8]
patients_list = ["Dog_%d" % i for i in range(1, 5)] + ["Patient_%d" % i for i in range(1, 9)]
patients_list = ["Dog_%d" % i for i in [1]] #["Patient_%d" % i for i in range(1, 9)]#++
#feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}}])
#feature_extractor = PLVFeatures()
#feature_extractor = MixFeatures([{'name':"PLVFeatures",'args':{}},{'name':"ARFeatures",'args':{}}])
#feature_extractor = ARFeatures()
feature_extractor = MixFeatures([{'name':"ARFeatures",'args':{}},{'name' | :"PLVFeatures",'args':{}},{'name':'SEFeatures','args':{}}])
#feature_extractor = SEFeatures()
#f | eature_extractor = LyapunovFeatures()
#feature_extractor = StatsFeatures()
preprocess = True
predictor = SVMPredictor
#predictor = XtraTreesPredictor
if preprocess==True:
print 'Preprocessing ON'
else:
print 'Preprocessing OFF'
print 'predictor: ',predictor
Xval_on_patients(predictor,feature_extractor, patients_list,preprocess=preprocess)
if __name__ == '__main__':
main()
|
01org/fMBT | pythonshare/pythonshare/server.py | Python | lgpl-2.1 | 30,835 | 0.002594 | # fMBT, free Model Based Testing tool
# Copyright (c) 2013-2019, Intel Corporation.
#
# Author: antti.kervinen@intel.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# This library implements pythonshare server functionality.
# pylint: disable=C0103,C0111,C0301,R0201,R0903,W0122,W0212,W0703
import cPickle
import datetime
import getopt
import hashlib
import os
import platform
import socket
import sys
import tempfile
import thread
import traceback
import urlparse
import pythonshare
import Queue
messages = pythonshare.messages
client = pythonshare.client
on_windows = (os.name == "nt")
has_os_fdatasync = hasattr(os, "fdatasync")
opt_debug = False
opt_debug_limit = 240
opt_log_fd = None
opt_allow_new_namespaces = True
_g_wake_server_function = None
_g_waker_lock = None
def timestamp():
if on_windows:
rv = "%.6f" % (
(datetime.datetime.utcnow() -
datetime.datetime(1970, 1, 1)).total_seconds(),)
else:
rv = datetime.datetime.now().strftime("%s.%f")
return rv
def daemon_log(msg):
if opt_debug_limit >= 0:
if len(msg) > opt_debug_limit:
msg = (msg[:opt_debug_limit/2] +
("...[%s B, log CRC %s]..." % (len(msg), messages.crc(msg))) +
msg[-opt_debug_limit/2:])
formatted_msg = "%s %s\n" % (timestamp(), msg)
if opt_log_fd != None:
os.write(opt_log_fd, formatted_msg)
if has_os_fdatasync:
os.fdatasync(opt_log_fd)
if opt_debug and opt_debug_limit != 0:
sys.stdout.write(formatted_msg)
sys.stdout.flush()
def code2string(code):
return "\n".join(
["%-4s %s" % (li+1, l) for li, l in enumerate(code.splitlines())])
def exception2string(exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _store_return_value(func, queue):
while True:
queue.put(func())
def _read_lines_from_stdin(queue):
while True:
line = sys.stdin.readline()
if not line:
break
queue.put(line)
daemon_log("stdin closed")
class Pythonshare_ns(object):
"""Pythonshare services inside a namespace
"""
def __init__(self, ns):
self.ns = ns
self._on_disconnect = []
self._on_drop = []
def ns_type(self, ns):
"""Query the type of a namespace.
Returns "local" or "remote" if namespace exists, otherwise None.
"""
if ns in _g_local_namespaces:
return "local"
elif ns in _g_remote_namespaces:
return "remote"
else:
return None
def local_nss(self):
"""List local namespaces
"""
return _g_local_namespaces.keys()
def remote_nss(self, ls_opts={}):
"""List remote namespaces
"""
if "ip" in ls_opts and ls_opts["ip"] == True:
key_peername = {}
for k in _g_remote_namespaces.keys():
try:
key_peername[k] = _g_remote_namespaces[k].conn.getpeername()
except Exception:
key_peername[k] = ("?", "?")
return key_peername
return _g_remote_namespaces.keys()
def on_disconnect(self):
"""Return codes that will be executed when a client has disconnected.
"""
return self._on_disconnect
def on_drop(self):
"""Return codes that will be executed when the namespace is dropped.
"""
return self._on_drop
def exec_on_disconnect(self, code, any_connection=False):
"""Add code that will be executed when client has disconnected.
"""
if not any_connection:
conn_id = _g_executing_pythonshare_conn_id
else:
conn_id = None
self._on_disconnect.append((conn_id, code))
def exec_on_drop(self, code):
"""Add code that will be executed when namespace is dropped.
"""
self._on_drop.append(code)
def set_on_disconnect(self, list_of_code):
"""Replace all "on disconnect" codes with new list of codes.
"""
self._on_disconnect = list_of_code
def set_on_drop(self, list_of_code):
"""Replace all "on drop" codes with new list of codes."""
self._on_drop = list_of_code
def call_on_disconnect(self, conn_id):
for setter_conn_id, code in self._on_disconnect:
if not setter_conn_id or setter_conn_id == conn_id:
exec_msg = messages.Exec(self.ns, code, None)
if opt_debug:
daemon_log("on disconnect %s: %s" % (conn_id, exec_msg,))
rv = _local_execute(exec_msg)
if opt_debug:
daemon_log("on disconnect rv: %s" % (rv,))
if setter_conn_id == conn_id:
self._on_disconnect.remove((conn_id, code))
def call_on_drop(self):
for code in self._on_drop:
exec_msg = messages.Exec(self.ns, code, None)
if opt_debug:
daemon_log("on drop: %s" % (exec_msg,))
rv = _local_execute(exec_msg)
if opt_debug:
daemon_log("on drop rv: %s" % (rv,))
def read_rv(self, async_rv):
"""Return and remove asynchronous return value.
"""
if self.ns != async_rv.ns:
raise ValueError("Namespace mismatch")
if (async_rv.ns in _g_async_rvs and
async_rv.rvid in _g_async_rvs[async_rv.ns]):
rv = _g_async_rvs[async_rv.ns][async_rv.rvid]
if not isinstance(rv, pythonshare.InProgress):
del _g_async_rvs[async_rv.ns][async_rv.rvid]
return rv
else:
raise ValueError('Invalid return value id: "%s"'
% (async_rv.rvid,))
def poll_rvs(self):
"""Returns list of Async_rv instances that are ready for reading.
"""
rv = []
for rvid, value in _g_async_rvs[self.ns].iteritems():
if not isinstance(value, pythonshare.InProgress):
rv.append(messages.Async_rv(self.ns, rvid))
return rv
class Pythonshare_rns(object):
"""Remote namespace"""
def __init__(self, conn, to_remote, from_remote):
self.conn = conn
self.to_remote = to_remote
self.from_remote = from_rem | ote
def __del__(self):
python | share._close(self.conn, self.to_remote, self.from_remote)
_g_local_namespaces = {}
# client-id -> set of namespaces
_g_namespace_users = {}
_g_executing_pythonshare_conn_id = None
# _g_remote_namespaces: namespace -> Connection to origin
_g_remote_namespaces = {}
# _g_namespace_exports: namespace -> list of Connections to which the
# namespace (remote or local) has been exported. If the namespace is
# deleted (or connection to origin is lost), these Connection objects
# are to be notified.
_g_namespace_exports = {}
_g_local_namespace_locks = {}
_g_async_rvs = {}
_g_async_rv_counter = 0
_g_server_shutdown = False
def _init_local_namespace(ns, init_code=None, force=False):
if not ns in _g_local_namespaces:
if opt_allow_new_namespaces or force:
daemon_log('added local namespace "%s"' % (ns,))
_g_local_namespaces[ns] = {
"pythonshare_ns": Pythonshare_ns(ns),
"Async_rv": pythonshare.messages.Async_rv
}
_g_local_namespace_locks[ns] = thread.allocate_lock()
_g_async_rvs[ns] = {}
else:
raise ValueError('Unknown namespace "%s"' % (ns,))
if ini |
Serveza/Server | serveza/scripts/resetdb.py | Python | gpl-2.0 | 3,940 | 0.003807 | import csv
import getpass
import io
import sqlite3
import json
import requests
import re
import urllib.request
import os
from geopy.geocoders import GoogleV3
from serveza.app import app
from serveza.db import db
from serveza.settings import PROJECT_ROOT, DB_PATH
from pprint import pprint
from bs4 import BeautifulSoup
from pathlib import Path
DATA_DIR = PROJECT_ROOT / 'data'
# Data
# > thomas's fix
def connect(sqlite_file):
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
return conn, c
def close(conn):
conn.commit()
conn.close()
def get_beer_list(c):
c.execute('SELECT * FROM beer WHERE 1')
result = c.fetchall()
return result
def reset_data():
reset_bars()
reset_beers()
reset_cartes()
reset_events()
def data_reader(f):
return csv.DictReader(f, delimiter=';')
# > Bars
def reset_bars():
from serveza.db import Bar
geolocator = GoogleV3()
bars_file = DATA_DIR / 'bars.csv'
with bars_file.open() as f:
reader = data_reader(f)
for row in reader:
name = row['name']
address = row['address']
location = geolocator.geocode(address)
bar = Bar(
name=name, latitude=location.latitude, longitude=location.longitude)
db.session.add(bar)
# > Beers
def reset_beers():
from serveza.db import Beer
beers_file = 'data/beer.db'
conn, c = connect(beers_file)
beers = get_beer_list(c)
for beertmp in beers:
name = beertmp[1]
beer = Beer()
beer.name = name
beer.image = beertmp[5]
beer.brewery = beertmp[3]
beer.degree = beertmp[4]
print("add to db ", beer.name)
db.session.add(beer)
# > Cartes
def reset_cartes():
from money import Money
from serveza.db import Bar, Beer, BarBeer
cartes_files = DATA_DIR / 'cartes.csv'
with cartes_files.open() as f:
reader = data_reader(f)
for row in reader:
try:
bar = Bar.query.filter(Bar.name == row['barname']).one()
beer = Beer.query.filter(Beer.name == row['beername']).one()
(amount, currency) = row['price'].split()
price = Money(amount, currency)
entry = BarBeer(bar=bar, beer=beer, price=price)
db.session.add(entry)
except Exception as e:
print(e)
pass
# > Events
def reset_events():
from serveza.db import Bar, BarEvent
events_file = DATA_DIR / 'events.csv'
with events_file.open() as f:
reader = data_reader(f)
for row in reader:
try:
bar = Bar.query.filter(Bar.name == row['barname']).one()
name = row['name']
event = BarEvent(bar=bar, name=name)
db.session.add(event)
except:
pass
# Users
def reset_users():
from serveza.db import User
# Create super-user
# admin_email = input('Superuser email: ')
# admin_password = getpass.getpass('Superuser password: ')
admin_email = 'kokakiwi+serveza@kokakiwi.net'
admin_password = 'kiwi3291'
admin_token = 'e2a52f2f73924017beb8fab0e8529182'
admin = User(
email=admin_email, password=admin_password, api_token=admin_token)
db.session.add(admin)
toto = User(email='toto', password='titi', firstname='Pinkie', lastname='Pie', avatar='http://i.imgur.com/hapjgQi.png')
db.session.add(toto)
aang = User(email='aang', password='zuko', firstname='Aang', avatar='http://i.i | mgur.com/5h2fDdT.jpg')
db.session.add(aang)
print('Superuser token:', admin.api_token)
def reset_db():
| # Reset all data
reset_data()
reset_users()
db.session.commit()
def main():
app.config['SQLALCHEMY_ECHO'] = True
app.test_request_context().push()
reset_db()
|
sj26/webapp | build/lib/webapp/session.py | Python | gpl-3.0 | 1,477 | 0.007448 | from paste.session import make_session_middleware
from webapp.app import *
from webapp.check import Check, environ_has
Request.session = property(lambda self: self.environ['paste.session.factory']())
def session_has(*args, **kwargs):
"""
Check for the presense and, optionally, value of a session variable.
If value is a callable it will be passed (app, var, value), otherwise
it will be compared literally.
"""
if len(args) > 1 or len(kwargs) > 1 or (len(args) and len(kwargs)) or (not len(args) and not len(kwargs)):
raise ValueError("Must provide one and only one session variable to test. Consider using session_has_any or session_has_all.")
elif len(args):
var = args[0]
return Check(lambda app: var in app.request.session)
else:
var, value = kwargs.items()[0]
return Check(lambda app: app.request.session.get(var) == value)
def environ_has_any(*args, **kwargs):
return Check(lambda app: any(arg in app.request.session for arg in args) or any(app.request.session.get(key) == value for (key, value) in kwargs.iteritems()))
def session_has_all(*args, **kwargs):
return Check(lambda app: all(arg in app.request.session for arg in args) and all(app.request.session.get(key) == value for (key, valu | e) in kwarg | s.iteritems()))
class Sessioned(object):
def __middleware_factory__(self, app):
return super(Sessioned, self).__middleware_factory__(make_session_middleware(app, {}))
|
josephsl/wintenApps | addon/appModules/cortana.py | Python | gpl-2.0 | 2,658 | 0.020316 | # Cortana Conversations
# Part of Windows App Essentials collection
# Copyright 2019-2022 Joseph Lee, released under GPL
# Various workarounds for Cortana Conversations (in build 18922 and later)
import appModuleHandler
import api
import ui
import UIAHandler
from NVDAObjects.UIA import UIA
class AppModule(appModuleHandler.AppMo | dule):
# Sometimes, Cortan | a's textual response is announced twice.
_cortanaResponse: str = ""
def event_UIA_notification(self, obj, nextHandler, displayString=None, **kwargs):
# For some reason Cortana fires this event whenever user types and an answer is received.
# Results are displayed inside a list.
# Thus respond to both and see what should be announced.
if displayString is None or "Cortana" not in displayString:
return
# Version 1.1910 (beta) changed UIA tree for responses list.
# 1.1911 (beta) and version 2 changed the tree yet again.
# Thankfully, Cortana's response is part of a grouping object.
# As long as conversation list uses the same UIA Automation Id,
# traversal will work across versions (code credit: Abdel)
clientObject = UIAHandler.handler.clientObject
condition = clientObject.CreatePropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, "ConversationList")
cortanaWindow = clientObject.ElementFromHandleBuildCache(
api.getForegroundObject().windowHandle, UIAHandler.handler.baseCacheRequest
)
# Instantiate UIA object directly.
# In order for this to work, a valid UIA pointer must be returned
# (value error is seen when Cortana window closes).
try:
responses = UIA(
UIAElement=cortanaWindow.FindFirstBuildCache(
UIAHandler.TreeScope_Descendants, condition, UIAHandler.handler.baseCacheRequest
)
)
except ValueError:
return
try:
cortanaResponse = responses.children[-1]
# Since August 2020, different Automation Id's are used for Cortana responses versus Bing searches.
if cortanaResponse.UIAAutomationId.startswith("CortanaResponseText"):
cortanaResponse = cortanaResponse.firstChild.name
elif cortanaResponse.UIAAutomationId.startswith("CardResponse"):
# When searching through Bing, summary text shows up.
if cortanaResponse.firstChild.childCount > 1:
cortanaResponse = ", ".join([response.name for response in cortanaResponse.firstChild.children])
except IndexError:
cortanaResponse = ""
if cortanaResponse != self._cortanaResponse:
try:
ui.message(cortanaResponse)
self._cortanaResponse = cortanaResponse
except (IndexError, TypeError):
# IndexError deals with multi-part mesage,
# while TypeError deals with a list item with users's message on it.
pass
|
remarkablerocket/django-vend | django_vend/stores/tests.py | Python | bsd-3-clause | 7,713 | 0.000648 | from datetime import datetime
from uuid import UUID
from django.utils.timezone import make_aware, now, FixedOffset
from django.test import TestCase
from django_vend.auth.models import VendRetailer
from .models import VendOutlet, VendRegister
from .forms import VendOutletForm, VendRegisterForm
class VendOutletFormTestCase(TestCase):
def setUp(self):
self.retailer = VendRetailer(
name="TestRetailer",
access_token="some token",
expires=now(),
expires_in=0,
refresh_token="some other token",
)
self.retailer.save()
def test_form(self):
uid = "dc85058a-a683-11e4-ef46-e8b98f1a7ae4"
name = "Main Outlet"
tz = "Pacific/Auckland"
currency = "NZD"
symbol = "$"
data = {
"id": uid,
"name": name,
"time_zone": tz,
"default_tax_id": "b1d192bc-f019-11e3-a0f5-b8ca3a64f8f4",
"currency": currency,
"currency_symbol": symbol,
"display_prices": "inclusive",
"deleted_at": "2014-07-01T20:22:58+00:00",
"version": 1288421
}
form = VendOutletForm(data)
form.instance.retailer = self.retailer
form.instance.retrieved = now()
self.assertTrue(form.is_valid())
if form.is_valid():
instance = form.save()
del_time = make_aware(datetime(2014, 7, 1, 20, 22, 58), FixedOffset(0))
self.assertEqual(instance.uid, UUID(uid))
self.assertEqual(instance.name, name)
self.assertEqual(instance.time_zone, tz)
self.assertEqual(instance.currency, currency)
self.assertEqual(instance.display_prices_tax_inclusive, True)
self.assertTrue(instance.deleted_at == del_time)
def test_override_instance(self):
uid = "dc85058a-a683-11e4-ef46-e8b98f1a7ae4"
name = "Main Outlet"
tz = "Pacific/Auckland"
currency = "NZD"
symbol = "$"
data = {
"id": uid,
"name": "London Outlet",
"time_zone": tz,
"default_tax_id": "b1d192bc-f019-11e3-a0f5-b8ca3a64f8f4",
"currency": currency,
"currency_symbol": symbol,
"display_prices": "inclusive",
"deleted_at": "2014-07-01T20:22:58+00:00",
"version": 1288421
}
VendOutlet.objects.create(uid=uid, name=name, time_zone=tz,
currency=currency, currency_symbol=symbol, retailer=self.retailer,
retrieved=now())
outlets = VendOutlet.objects.all()
self.assertEqual(len(outlets), 1)
instance = outlets[0]
self.assertEqual(instance.name, name)
form = VendOutletForm(data)
form.instance.retailer = self.retailer
form.instance.ret | rieved = now()
self.assertTrue(form.is_valid())
if form.is_valid():
instance = | form.save()
del_time = make_aware(datetime(2014, 7, 1, 20, 22, 58), FixedOffset(0))
self.assertEqual(instance.uid, UUID(uid))
self.assertEqual(instance.name, "London Outlet")
self.assertEqual(instance.time_zone, tz)
self.assertEqual(instance.currency, currency)
self.assertEqual(instance.display_prices_tax_inclusive, True)
self.assertTrue(instance.deleted_at == del_time)
outlets = VendOutlet.objects.all()
self.assertEqual(len(outlets), 1)
self.assertEqual(instance.name, "London Outlet")
class VendRegisterFormTestCase(TestCase):
def setUp(self):
self.retailer = VendRetailer.objects.create(
name="TestRetailer",
access_token="some token",
expires=now(),
expires_in=0,
refresh_token="some other token",
)
self.outlet = VendOutlet.objects.create(
uid="b8ca3a65-0183-11e4-fbb5-2816d2677218",
name="Main Outlet",
time_zone="Pacific/Auckland",
currency="NZD",
currency_symbol="$",
retailer=self.retailer,
retrieved=now()
)
self.uid = "dc85058a-a683-11e4-ef46-e8b98f1a7ae4"
self.name = "Main Register"
self.outlet_id = "b8ca3a65-0183-11e4-fbb5-2816d2677218"
self.outlet = VendOutlet.objects.get(uid=self.outlet_id)
self.data = {
"id": self.uid,
"name": self.name,
"outlet_id": self.outlet_id,
"ask_for_note_on_save": 1,
"print_note_on_receipt": False,
"ask_for_user_on_sale": False,
"show_discounts_on_receipts": True,
"print_receipt": True,
"email_receipt": False,
"invoice_prefix": "PRE",
"invoice_suffix": "SUF",
"invoice_sequence": 1234,
"button_layout_id": "b8ca3a65-0183-11e4-fbb5-2816e25ffc51",
"is_open": True,
"register_open_time": "2015-03-16T22:21:50+00:00",
"register_close_time": "null",
"deleted_at": "2014-07-01T20:22:58+00:00",
"version": 1288421
}
self.del_time = make_aware(datetime(2014, 7, 1, 20, 22, 58), FixedOffset(0))
self.other_time = make_aware(datetime(2015, 3, 16, 22, 21, 50), FixedOffset(0))
def test_form(self):
form = VendRegisterForm(self.data)
form.instance.retailer = self.retailer
form.instance.retrieved = now()
self.assertTrue(form.is_valid())
if form.is_valid():
instance = form.save()
self.assertEqual(instance.uid, UUID(self.uid))
self.assertEqual(instance.name, self.name)
self.assertEqual(instance.outlet, self.outlet)
self.assertEqual(instance.invoice_prefix, "PRE")
self.assertEqual(instance.invoice_suffix, "SUF")
self.assertEqual(instance.invoice_sequence, 1234)
self.assertTrue(instance.deleted_at == self.del_time)
self.assertTrue(instance.is_open)
self.assertTrue(instance.register_open_time == self.other_time)
self.assertIsNone(instance.register_close_time)
self.assertEqual(instance.retailer, self.retailer)
def test_override_instance(self):
VendRegister.objects.create(
uid=self.uid,
name=self.name,
outlet=self.outlet,
invoice_prefix="PRE",
invoice_suffix="SUF",
invoice_sequence=1234,
is_open=False,
register_close_time=self.other_time,
deleted_at=self.del_time,
retailer=self.retailer,
retrieved=now()
)
registers = VendRegister.objects.all()
self.assertEqual(len(registers), 1)
instance = registers[0]
self.assertEqual(instance.name, self.name)
self.assertEqual(instance.outlet, self.outlet)
self.assertFalse(instance.is_open)
self.assertIsNone(instance.register_open_time)
self.assertEqual(instance.register_close_time, self.other_time)
form = VendRegisterForm(self.data)
form.instance.retailer = self.retailer
form.instance.retrieved = now()
self.assertTrue(form.is_valid())
if form.is_valid():
instance = form.save()
self.assertEqual(instance.uid, UUID(self.uid))
self.assertTrue(instance.deleted_at == self.del_time)
self.assertTrue(instance.is_open)
registers = VendRegister.objects.all()
self.assertEqual(len(registers), 1)
register = registers[0]
self.assertTrue(register.register_open_time == self.other_time)
self.assertIsNone(register.register_close_time)
|
qiuminxu/tensorboard | tensorboard/plugins/hparams/metadata.py | Python | apache-2.0 | 3,935 | 0.004574 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants used in the HParams plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorboard.plugins.hparams import plugin_data_pb2
from tensorboard.plugins.hparams import error
PLUGIN_NAME = 'hparams'
PLUGIN_DATA_VERSION = 0
EXPERIMENT_TAG = '_hparams_/experiment'
SESSION_START_INFO_TAG = '_hparams_/session_start_info'
SESSION_END_INFO_TAG = '_hparams_/session_end_info'
def create_summary_metadata(hparams_plugin_data_pb):
"""Creates a tf.SummaryMetadata holding a copy of the given
HParamsPluginData message in its plugin_data.content field.
Sets the version field of the hparams_plugin_data_pb copy to
PLUGIN_DATA_VERSION.
"""
if not isinstance(hparams_plugin_data_pb, plugin_data_pb2.HParamsPluginData):
raise TypeError('Needed an instance of plugin_data_pb2.HParamsPluginData.'
' Got: %s' % type(hparams_plugin_data_pb))
content = plugin_data_pb2.HParamsPluginData()
content.CopyFrom(hparams_plugin_data_pb)
content.version = PLUGIN_DATA_VERSION
return tf.SummaryMetadata(
plugin_data=tf.SummaryMetadata.PluginData(
plugin_name | =PLUGIN_NAME,
content=content.SerializeToString()))
def parse_experiment_plugin_data(content):
"""Parses a given HParam's SummaryMetadata.plugin_data.content and
returns the 'experiment'.
Raises:
HParamsError if the content doesn't have 'experiment' set or
this file is incompatible with the version of the the metadata stored.
""" |
return _parse_plugin_data_as(content, 'experiment')
def parse_session_start_info_plugin_data(content):
"""Parses a given HParam's SummaryMetadata.plugin_data.content and
returns the 'session_start_info' field.
Raises:
HParamsError if the content doesn't have 'session_start_info' set or
this file is incompatible with the version of the the metadata stored.
"""
return _parse_plugin_data_as(content, 'session_start_info')
def parse_session_end_info_plugin_data(content):
"""Parses a given HParam's SummaryMetadata.plugin_data.content and
returns the 'session_end_info' field.
Raises:
HParamsError if the content doesn't have 'session_end_info' set or
this file is incompatible with the version of the the metadata stored.
"""
return _parse_plugin_data_as(content, 'session_end_info')
def _parse_plugin_data_as(content, data_oneof_field):
"""Parses a given HParam's SummaryMetadata.plugin_data.content and
returns the data oneof's field given by 'data_oneof_field'.
Raises:
HParamsError if the content doesn't have 'data_oneof_field' set or
this file is incompatible with the version of the the metadata stored.
"""
plugin_data = plugin_data_pb2.HParamsPluginData.FromString(content)
if plugin_data.version != PLUGIN_DATA_VERSION:
raise error.HParamsError(
'Only supports plugin_data version: %s; found: %s in: %s' %
(PLUGIN_DATA_VERSION, plugin_data.version, plugin_data))
if not plugin_data.HasField(data_oneof_field):
raise error.HParamsError(
'Expected plugin_data.%s to be set. Got: %s' %
(data_oneof_field, plugin_data))
return getattr(plugin_data, data_oneof_field)
|
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/test/test_xmllib.py | Python | mit | 532 | 0.00188 | '''Test module to thest the xmllib module.
Sjoerd | Mullender
'''
from test_support import verbose
testdoc = """\
<?xml version="1.0" encoding="UTF-8" standalone='yes' ?>
<!-- comments aren't allowed before the <?xml?> tag,
but they are allowed before the <!DOCTYPE> tag -->
<!DOCTYPE greeting [
<!ELEMENT greeting (#PCDATA)>
]>
<greeting>Hello, world!</greeting>
"""
import xmllib
if verbose:
parser = xmllib.TestX | MLParser()
else:
parser = xmllib.XMLParser()
for c in testdoc:
parser.feed(c)
parser.close()
|
FrozenPigs/Taigabot | plugins/_disabled/pyexec.py | Python | gpl-3.0 | 200 | 0.005 | import re
|
from util import hook, http
from util.execute import eval_py
@hook.command(adminonly=True)
def python(inp):
"python <prog> -- Executes <prog> as Python code."
return eval_py(i | np)
|
lucidmotifs/newtopia | newtopia/ntgame/admin.py | Python | gpl-3.0 | 2,300 | 0.001304 | from django.contrib import admin
from django.utils.safestring import mark_safe
from django.urls import reverse
from .models import Kingdom, Province, Military, Race
from .models import Infrastructure, InfrastructureItem, Building
from .models import Effect, Instance
from .models import Spell
# Register your models here.
# Game Entities
admin.site.register(Kingdom)
admin.site.register(Military)
admin.site.register(Race)
admin.site.register(Infrastructure)
admin.site.register(Spell)
class InfrastructureInline(admin.StackedInline):
model = Infrastructure
show_change_link = True
can_delete = False
readonly_fields | = ('explored','built',)
verbose_name_plural = "Infrastructure"
fieldsets = (
| (None, {
'fields': (
'land',
)
}),
('Details', {
'fields': (
'built',
'explored',
)
}),
)
class MilitaryInline(admin.StackedInline):
model = Military
show_change_link = True
can_delete = False
verbose_name_plural = "Military"
@admin.register(Building)
class Building(admin.ModelAdmin):
list_display = (
'name',
'description',
'effect_instances',
)
@admin.register(Effect)
class Effect(admin.ModelAdmin):
list_display = (
'tag',
'name',
)
@admin.register(Instance)
class EffectInstance(admin.ModelAdmin):
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
@admin.register(Province)
class Province(admin.ModelAdmin):
list_display = (
'name',
'ruler',
'kingdom',
)
readonly_fields = ('owner','trade_balance',)
fieldsets = (
(None, {
'fields': (
'name',
'ruler',
'peasants',
'money',
'land',
'food',
'trade_balance',
)
}),
('Resources', {
'fields': ('mages', 'runes', 'warhorses', 'prisoners',)
}),
('Meta', {
'fields': ('race', 'kingdom', 'owner', )
}),
)
inlines = [
InfrastructureInline, MilitaryInline
]
|
tensorflow/tensorflow | tensorflow/python/data/kernel_tests/interleave_test.py | Python | apache-2.0 | 17,486 | 0.005319 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.interleave()`."""
import multiprocessing
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def _interleave(lists, cycle_length, block_length, num_parallel_calls=None):
"""Reference implementation of interleave used for testing.
Args:
lists: a list of lists to interleave
cycle_length: the length of the interleave cycle
block_length: the length of the interleave block
num_parallel_calls: the number of parallel calls
Yields:
Elements of `lists` interleaved in the order determined by `cycle_length`
and `block_length`.
"""
num_open = 0
# `all_iterators` acts | as a queue of iterators over each element of `lists`.
al | l_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
if cycle_length is None:
# The logic here needs to match interleave C++ kernels.
if num_parallel_calls is None:
cycle_length = multiprocessing.cpu_count()
elif num_parallel_calls == dataset_ops.AUTOTUNE:
cycle_length = (multiprocessing.cpu_count() + 2) // 3
else:
cycle_length = min(num_parallel_calls, multiprocessing.cpu_count())
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def _repeat(values, count):
"""Produces a list of lists suitable for testing interleave.
Args:
values: for each element `x` the result contains `[x] * x`
count: determines how many times to repeat `[x] * x` in the result
Returns:
A list of lists of values suitable for testing interleave.
"""
return [[value] * value for value in np.tile(values, count)]
class InterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
input_values=[[4, 5, 6]],
cycle_length=1,
block_length=1,
expected_elements=[[
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6
]]) + combinations.combine(
input_values=[[4, 5, 6]],
cycle_length=2,
block_length=1,
expected_elements=[[
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6,
5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 6
]]) + combinations.combine(
input_values=[[4, 5, 6]],
cycle_length=2,
block_length=3,
expected_elements=[[
4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6,
4, 5, 5, 5, 6, 6, 6, 5, 5, 6, 6, 6
]]) + combinations.combine(
input_values=[[4, 5, 6]],
cycle_length=7,
block_length=2,
expected_elements=[[
4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6,
6, 4, 4, 5, 5, 6, 6, 5, 6, 6, 5, 6, 6
]]) +
combinations.combine(
input_values=[[4, 0, 6]],
cycle_length=2,
block_length=1,
expected_elements=[[
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]])))
def testPythonImplementation(self, input_values, cycle_length, block_length,
expected_elements):
input_lists = _repeat(input_values, 2)
for expected, produced in zip(
expected_elements, _interleave(input_lists, cycle_length,
block_length)):
self.assertEqual(expected, produced)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
input_values=[np.int64([4, 5, 6])],
cycle_length=1,
block_length=3,
num_parallel_calls=[None, 1]) + combinations.combine(
input_values=[np.int64([4, 5, 6])],
cycle_length=2,
block_length=[1, 3],
num_parallel_calls=[None, 1, 2]) + combinations.combine(
input_values=[np.int64([4, 5, 6])],
cycle_length=7,
block_length=2,
num_parallel_calls=[None, 1, 3, 5, 7]) +
combinations.combine(
input_values=[np.int64([4, 5, 6, 7])],
cycle_length=None,
block_length=3,
num_parallel_calls=[None, 1]) + combinations.combine(
input_values=[np.int64([]), np.int64([0, 0, 0])],
cycle_length=2,
block_length=3,
num_parallel_calls=[None]) + combinations.combine(
input_values=[np.int64([4, 0, 6])],
cycle_length=2,
block_length=3,
num_parallel_calls=[None, 1, 2])))
def testInterleaveDataset(self, input_values, cycle_length, block_length,
num_parallel_calls):
count = 2
dataset = dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
expected_output = [
element for element in _interleave(
_repeat(input_values, count), cycle_length, block_length,
num_parallel_calls)
]
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
input_values=[np.float32([1., np.nan, 2., np.nan, 3.])],
cycle_length=1,
block_length=3,
num_parallel_calls=[None, 1]) + combinations.combine(
input_values=[np.float32([1., np.nan, 2., np.nan, 3.])],
cycle_length=2,
block_length=[1, 3],
num_parallel_calls=[Non |
silentfuzzle/calibre | src/calibre/gui2/library/models.py | Python | gpl-3.0 | 67,194 | 0.003423 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import functools, re, os, traceback, errno, time
from collections import defaultdict, namedtuple
from itertools import groupby
from PyQt5.Qt import (QAbstractTableModel, Qt, pyqtSignal, QIcon, QImage,
QModelIndex, QDateTime, QColor, QPixmap, QPainter, QApplication)
from calibre.gui2 import error_dialog
from calibre.utils.search_query_pars | er import ParseException
from calibre.ebooks.metadata import fmt_sidx, authors_to_string, string_to_authors
from calibre.ebooks.metadat | a.book.formatter import SafeFormat
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.config import tweaks, device_prefs, prefs
from calibre.utils.date import dt_factory, qt_to_dt, as_local_time, UNDEFINED_DATE
from calibre.utils.icu import sort_key
from calibre.utils.search_query_parser import SearchQueryParser
from calibre.db.search import _match, CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH
from calibre.library.caches import force_to_bool
from calibre.library.save_to_disk import find_plugboard
from calibre import strftime, isbytestring
from calibre.constants import filesystem_encoding, DEBUG, config_dir
from calibre.gui2.library import DEFAULT_SORT
from calibre.utils.localization import calibre_langcode_to_name
from calibre.library.coloring import color_row_key
Counts = namedtuple('Counts', 'library_total total current')
def human_readable(size, precision=1):
""" Convert a size in bytes into megabytes """
return ('%.'+str(precision)+'f') % ((size/(1024.*1024.)),)
TIME_FMT = '%d %b %Y'
ALIGNMENT_MAP = {'left': Qt.AlignLeft, 'right': Qt.AlignRight, 'center':
Qt.AlignHCenter}
_default_image = None
def default_image():
global _default_image
if _default_image is None:
_default_image = QImage(I('default_cover.png'))
return _default_image
def group_numbers(numbers):
for k, g in groupby(enumerate(sorted(numbers)), lambda (i, x):i - x):
first = None
for last in g:
if first is None:
first = last[1]
yield first, last[1]
class ColumnColor(object): # {{{
def __init__(self, formatter):
self.mi = None
self.formatter = formatter
def __call__(self, id_, key, fmt, db, color_cache, template_cache):
key += str(hash(fmt))
if id_ in color_cache and key in color_cache[id_]:
self.mi = None
color = color_cache[id_][key]
if color.isValid():
return color
return None
try:
if self.mi is None:
self.mi = db.new_api.get_proxy_metadata(id_)
color = QColor(self.formatter.safe_format(fmt, self.mi, '', self.mi,
column_name=key,
template_cache=template_cache))
color_cache[id_][key] = color
if color.isValid():
self.mi = None
return color
except:
pass
# }}}
class ColumnIcon(object): # {{{
def __init__(self, formatter, model):
self.mi = None
self.formatter = formatter
self.model = model
def __call__(self, id_, fmts, cache_index, db, icon_cache, icon_bitmap_cache,
template_cache):
if id_ in icon_cache and cache_index in icon_cache[id_]:
self.mi = None
return icon_cache[id_][cache_index]
try:
if self.mi is None:
self.mi = db.new_api.get_proxy_metadata(id_)
icons = []
for dex, (kind, fmt) in enumerate(fmts):
rule_icons = self.formatter.safe_format(fmt, self.mi, '', self.mi,
column_name=cache_index+str(dex),
template_cache=template_cache)
if not rule_icons:
continue
icon_list = [ic.strip() for ic in rule_icons.split(':')]
icons.extend(icon_list)
if icon_list and not kind.endswith('_composed'):
break
if icons:
icon_string = ':'.join(icons)
if icon_string in icon_bitmap_cache:
icon_bitmap = icon_bitmap_cache[icon_string]
icon_cache[id_][cache_index] = icon_bitmap
return icon_bitmap
icon_bitmaps = []
total_width = 0
for icon in icons:
d = os.path.join(config_dir, 'cc_icons', icon)
if (os.path.exists(d)):
bm = QPixmap(d)
bm = bm.scaled(128, 128, aspectRatioMode=Qt.KeepAspectRatio,
transformMode=Qt.SmoothTransformation)
icon_bitmaps.append(bm)
total_width += bm.width()
if len(icon_bitmaps) > 1:
i = len(icon_bitmaps)
result = QPixmap((i * 128) + ((i-1)*2), 128)
result.fill(Qt.transparent)
painter = QPainter(result)
x = 0
for bm in icon_bitmaps:
painter.drawPixmap(x, 0, bm)
x += bm.width() + 2
painter.end()
else:
result = icon_bitmaps[0]
# If the image height is less than the row height, leave it alone
# The -4 allows for a margin above and below. Also ensure that
# it is always a bit positive
rh = max(2, self.model.row_height - 4)
if result.height() > rh:
result = result.scaledToHeight(rh, mode=Qt.SmoothTransformation)
icon_cache[id_][cache_index] = result
icon_bitmap_cache[icon_string] = result
self.mi = None
return result
except:
pass
# }}}
class BooksModel(QAbstractTableModel): # {{{
about_to_be_sorted = pyqtSignal(object, name='aboutToBeSorted')
sorting_done = pyqtSignal(object, name='sortingDone')
database_changed = pyqtSignal(object, name='databaseChanged')
new_bookdisplay_data = pyqtSignal(object)
count_changed_signal = pyqtSignal(int)
searched = pyqtSignal(object)
search_done = pyqtSignal()
def __init__(self, parent=None, buffer=40):
QAbstractTableModel.__init__(self, parent)
self.orig_headers = {
'title' : _("Title"),
'ondevice' : _("On Device"),
'authors' : _("Author(s)"),
'size' : _("Size (MB)"),
'timestamp' : _("Date"),
'pubdate' : _('Published'),
'rating' : _('Rating'),
'publisher' : _("Publisher"),
'tags' : _("Tags"),
'series' : ngettext("Series", 'Series', 1),
'last_modified' : _('Modified'),
'languages' : _('Languages'),
}
self.db = None
self.formatter = SafeFormat()
self._clear_caches()
self.column_color = ColumnColor(self.formatter)
self.column_icon = ColumnIcon(self.formatter, self)
self.book_on_device = None
self.editable_cols = ['title', 'authors', 'rating', 'publisher',
'tags', 'series', 'timestamp', 'pubdate',
'languages']
self.default_image = default_image()
self.sorted_on = DEFAULT_SORT
self.sort_history = [self.sorted_on]
self.last_search = '' # The last search performed on this model
self.column_map = []
self.headers = {}
self.alignment_map = {}
self.buffer_size = buffer
self.m |
daviddrysdale/python-phonenumbers | python/phonenumbers/shortdata/region_NO.py | Python | apache-2.0 | 772 | 0.007772 | """Auto-generated file, do not edit by hand. NO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NO = PhoneMetadata(id='NO', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d\\d(?:\\d(?:\\d{2})?)?' | , possible_length=(3, 4, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='11(?:[023]|6\\d{3})', example_number='110', possible_length=(3, 6)),
emergency=PhoneNumberDesc(national_number_pattern='11[023]', example_number='110', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:[0239]|61(?:1[17]|23))|2[048]|4( | ?:12|[59])|7[57]|8[5-9]\\d|90)', example_number='110', possible_length=(3, 4, 6)),
short_data=True)
|
NiJeLorg/CDADMap | cdad/cdadmap/migrations/0037_auto_20151106_1149.py | Python | mit | 1,088 | 0.003676 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cdadmap', '0036_auto_20151105_1350'),
]
operations = [
migrations.AddField(
model_name='locationpanel',
name='Activity_Other',
field=models.CharField(default=b'', max_length=255, null=True, blank=True),
),
migrations.AddField(
model_name='surveypanel',
name='Organization_Description_Other',
field=models.CharField(default=b'', max_length=255, null=True, blank=True),
), |
migrations.AddField(
model_name='surveypanel',
name='Service_Population_Other',
field=models.CharField(default=b'', max_length=255, null=True, | blank=True),
),
migrations.AlterField(
model_name='surveypanel',
name='CDAD_Services_Other',
field=models.CharField(default=b'', max_length=255, null=True, blank=True),
),
]
|
carsongee/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_index.py | Python | agpl-3.0 | 8,877 | 0.002816 | """
Unit tests for getting the list of courses and the course outline.
"""
import json
import lxml
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, add_instructor
from contentstore.views.access import has_course_access
from course_action_state.models import CourseRerunState
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from opaque_keys.edx.locator import CourseLocator
from student.tests.factories import UserFactory
from course_action_state.managers import CourseRerunUIStateManager
from django.conf import settings
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/course/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_ | staff_client, course_staff = self.create_non_ | staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(parent_location=lesson.location, category='vertical', display_name='Subsection 1')
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], 'i4x://MITx/999/course/Robot_Super_Course')
self.assertEqual(json_response['display_name'], 'Robot Super Course')
self.assertTrue(json_response['is_container'])
self.assertFalse(json_response['is_draft'])
# Now verify the first child
children = json_response['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], 'i4x://MITx/999/chapter/Week_1')
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(first_child_response['is_container'])
self.assertFalse(first_child_response['is_draft'])
self.assertTrue(len(first_child_response['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(course_key=self.course.id, new_state=state, allow_not_found=True)
CourseRerunState.objects.update_should_display(entry_id=rerun_state.id, user=UserFactory(), should_display=should_display)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(course_key=rerun_course_key, new_state=state, allow_not_found=True)
CourseRerunState.objects.update_should_display(entry_id=rerun_state.id, user=user2, should_display=should_display)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_access(use |
JakubPetriska/poker-cfr | test/cfr_tests.py | Python | mit | 1,756 | 0.001708 | import unittest
import acpc_python_client as acpc
from cfr.main import Cfr
KUHN_POKER_GAME_FILE_PATH = 'games/kuhn.limit.2p.game'
KUHN_BIG_DECK_POKER_GAME_FILE_PATH = 'games/kuhn.bigdeck.limit.2p.game'
KUHN_BIG_DECK_2ROUND_POKER_GAME_FILE_PATH = 'games/kuhn.bigdeck.2round.limit.2p.game'
LEDUC_POKER_GAME_FILE_PATH = 'games/leduc.limit.2p.game'
class CfrTests(unittest.TestCase):
def test_kuhn_cfr_works(self):
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
cfr = Cfr(game, show_progress=Fals | e)
cfr.train(5, weight_delay=2)
def test_kuhn_bigdeck_cfr_works(self):
game = acpc.read_game_file(KUHN_BIG_DECK_POKER_GAME_FILE_PATH)
cfr = Cfr(game, show_progress=False)
cfr.train(5, weight_delay=2)
def test_kuhn_bigdeck_2round_cfr_works(self):
game = acpc.read_game_file(KUHN_BIG_DECK_2ROUND_POKER_G | AME_FILE_PATH)
cfr = Cfr(game, show_progress=False)
cfr.train(5, weight_delay=2)
def test_leduc_cfr_works(self):
game = acpc.read_game_file(LEDUC_POKER_GAME_FILE_PATH)
cfr = Cfr(game, show_progress=False)
cfr.train(5, weight_delay=2)
def test_kuhn_cfr_checkpointing(self):
game = acpc.read_game_file(KUHN_POKER_GAME_FILE_PATH)
cfr = Cfr(game, show_progress=False)
checkpoints_count = 0
def checkpoint_callback(game_tree, checkpoint_index, iterations):
nonlocal checkpoints_count
self.assertTrue(game_tree is not None)
self.assertEqual(checkpoint_index, checkpoints_count)
checkpoints_count += 1
cfr.train(60, weight_delay=15, checkpoint_iterations=15, checkpoint_callback=checkpoint_callback)
self.assertEqual(checkpoints_count, 3)
|
tinloaf/home-assistant | homeassistant/components/lovelace/__init__.py | Python | apache-2.0 | 5,109 | 0 | """
Support for the Lovelace UI.
For more details about this component, please refer to the documentation
at https://www.home-assistant.io/lovelace/
"""
from functools import wraps
import logging
import os
import time
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.yaml import load_yaml
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'lovelace'
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_MODE = 'mode'
MODE_YAML = 'yaml'
MODE_STORAGE = 'storage'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_MODE, default=MODE_STORAGE):
vol.All(vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])),
}),
}, extra=vol.ALLOW_EXTRA)
LOVELACE_CONFIG_FILE = 'ui-lovelace.yaml'
WS_TYPE_GET_LOVELACE_UI = 'lovelace/config'
WS_TYPE_SAVE_CONFIG = 'lovelace/config/save'
SCHEMA_GET_LOVELACE_UI = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_GET_LOVELACE_UI,
vol.Optional('force', default=False): bool,
})
SCHEMA_SAVE_CONFIG = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SAVE_CONFIG,
vol.Required('config'): vol.Any(str, dict),
})
class ConfigNotFound(HomeAssistantError):
"""When no config available."""
async def async_setup(hass, config):
"""Set up the Lovelace commands."""
# Pass in default to `get` because defaults not set if loaded as dep
mode = config.get(DOMAIN, {}).get(CONF_MODE, MODE_STORAGE)
await hass.components.frontend.async_register_built_in_panel(
DOMAIN, config={
'mode': mode
})
if mode == MODE_YAML:
hass.data[DOMAIN] = LovelaceYAML(hass)
else:
hass.data[DOMAIN] = LovelaceStorage(hass)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_LOVELACE_UI, websocket_lovelace_config,
SCHEMA_GET_LOVELACE_UI)
hass.components.websocket_api.async_register_command(
WS_TYPE_SAVE_CONFIG, websocket_lovelace_save_config,
SCHEMA_SAVE_CONFIG)
return True
class LovelaceStorage:
"""Class to handle Storage based Lovelace config."""
def __init__(self, hass):
"""Initialize Lovelace config based on storage helper."""
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._data = None
async def async_load(self, force):
"""Load config."""
if self._data is None:
data = await self._store.async_load()
self._data = data if data else {'config': None}
config = self._data['config']
if config is None:
raise ConfigNotFound
| return con | fig
async def async_save(self, config):
"""Save config."""
self._data['config'] = config
await self._store.async_save(self._data)
class LovelaceYAML:
"""Class to handle YAML-based Lovelace config."""
def __init__(self, hass):
"""Initialize the YAML config."""
self.hass = hass
self._cache = None
async def async_load(self, force):
"""Load config."""
return await self.hass.async_add_executor_job(self._load_config, force)
def _load_config(self, force):
"""Load the actual config."""
fname = self.hass.config.path(LOVELACE_CONFIG_FILE)
# Check for a cached version of the config
if not force and self._cache is not None:
config, last_update = self._cache
modtime = os.path.getmtime(fname)
if config and last_update > modtime:
return config
try:
config = load_yaml(fname)
except FileNotFoundError:
raise ConfigNotFound from None
self._cache = (config, time.time())
return config
async def async_save(self, config):
"""Save config."""
raise HomeAssistantError('Not supported')
def handle_yaml_errors(func):
"""Handle error with WebSocket calls."""
@wraps(func)
async def send_with_error_handling(hass, connection, msg):
error = None
try:
result = await func(hass, connection, msg)
message = websocket_api.result_message(
msg['id'], result
)
except ConfigNotFound:
error = 'config_not_found', 'No config found.'
except HomeAssistantError as err:
error = 'error', str(err)
if error is not None:
message = websocket_api.error_message(msg['id'], *error)
connection.send_message(message)
return send_with_error_handling
@websocket_api.async_response
@handle_yaml_errors
async def websocket_lovelace_config(hass, connection, msg):
"""Send Lovelace UI config over WebSocket configuration."""
return await hass.data[DOMAIN].async_load(msg['force'])
@websocket_api.async_response
@handle_yaml_errors
async def websocket_lovelace_save_config(hass, connection, msg):
"""Save Lovelace UI configuration."""
await hass.data[DOMAIN].async_save(msg['config'])
|
MarllonSoares/PYTHON-ESTUDO | lista-de-atividades/01-estrutura-sequencial/02-atividade/algoritmo-atv2.py | Python | mit | 360 | 0 | # -*- coding | : utf-8 -*-
"""
Created on Mon Mar 20 00:30:34 2017
@author: Marllon Soares
@site: www.marllonsoares.com. | br
@linguagem: Python 3
@assunto: Estrutura sequencial
@problema:
Faça um Programa que peça um número e então mostre
a mensagem O número informado foi [número].
"""
numero = int(input())
print("O número informado foi ", numero)
|
kaushik94/tardis | tardis/tests/integration_tests/report.py | Python | bsd-3-clause | 8,984 | 0.000779 | """
A helper class which works as a plugin to generate the test report and upload it
to the group server's dokuwiki. It inheirts from the class `HTMLReport` of
the `pytest-html` plugin. The test report contains the following details:
* The git commit hash on which test run was executed.
* The time of generation of test report.
* Number of passes, fails, errors, skips etc.
* Tabular representation of each method - name, result, duration.
* Embedded image of plot(s) and error log below a particular method (if any).
As a subclass, this class serves as a plugin and hence, `pytest-html` has to be
unregistered during the test run for tis plugin to function.
When the integration tests are selected for a particular test run, this class
is registered as a plugin in `pytest_configure` and subsequently unregistered in
`pytest_unconfigure`. As a plugin, it implements several "hook" functions
specified in pytest's official documentation.
References
==========
1. "Writing Plugins" ( https://pytest.org/latest/writing_plugins.html )
2. "Hookspec Source" ( https://pytest.org/latest/_modules/_pytest/hookspec.html )
3. "pytest-html" ( https://www.github.com/davehunt/pytest-html )
"""
import datetime
import json
import os
import shutil
import time
# For specifying error while exception handling
from socket import gaierror
from tardis import __githash__ as tardis_githash
try:
from pytest_html import __name__ as pytest_html_path
from pytest_html.plugin import HTMLReport
import requests
except ImportError:
pytest_html = None
dokuwiki = None
requests = None
class DokuReport(HTMLReport):
def __init__(self, report_config):
"""
Initialization of a DokuReport object and registration as a plugin
occurs in `pytest_configure`, where a dict containing url, username and
password of dokuwiki is passed through `dokuwiki_details`.
"""
# This will be either "remote" or "local".
self.save_mode = report_config["save_mode"]
if self.save_mode == "remote":
import dokuwiki
# Base class accepts a file path to save the report, but we pass an
# empty string as it is redundant for this use case.
super(DokuReport, self).__init__(
logfile=" ", self_contained=True, has_rerun=False
)
# Upload the report on a dokuwiki instance.
dokuwiki_details = report_config["dokuwiki"]
try:
self.doku_conn = dokuwiki.DokuWiki(
url=dokuwiki_details["url"],
user=dokuwiki_details["username"],
password=dokuwiki_details["password"],
)
except (TypeError, gaierror, dokuwiki.DokuWikiError) as e:
raise e
self.doku_conn = None
self.dokuwiki_url = ""
else:
self.dokuwiki_url = dokuwiki_details["url"]
else:
# Save the html report file locally.
self.report_dirpath = os.path.join(
os.path.expandvars(
os.path.expanduser(report_config["reportpath"])
),
tardis_githash[:7],
)
if os.path.exists(self.report_dirpath):
shutil.rmtree(self.report_dirpath)
os.makedirs(self.report_dirpath)
os.makedirs(os.path.join(self.report_dirpath, "assets"))
super(DokuReport, self).__init__(
logfile=os.path.join(self.report_dirpath, "report.html"),
self_contained=False,
has_rerun=False,
)
self.suite_start_time = time.time()
def _generate_report(self, session):
"""Writes HTML report to a temporary logfile."""
# Little hack to include suite_time_delta in wiki overview page.
suite_stop_time = time.time()
self.suite_time_delta = suite_stop_time - self.suite_start_time
report_content = super(DokuReport, self)._generate_report(session)
# A string which holds the complete report.
report_content = (
"Test executed on commit "
"[[https://www.github.com/tardis-sn/tardis/commit/{0}|{0}]]\n\n".format(
tardis_githash
)
) + report_content
# Quick hack for preventing log to be placed in narrow left out space
report_content = report_content.replace(
'class="log"', 'class="log" style="clear: both"'
)
# It was displayed raw on wiki pages, but not needed.
report_content = report_content.replace("<!DOCTYPE html>", "")
return report_content
def _save_report(self, report_content):
"""
Uploads the report and closes the temporary file. Temporary file is
made using `tempfile` built-in module, it gets deleted upon closing.
"""
if self.save_mode == "remote":
# Upload the report content to wiki
try:
self.doku_conn.pages.set(
"reports:{0}".format(tardis_githash[:7]), report_content
)
except (gaierror, TypeError):
pass
else:
# Save the file locally at "self.logfile" path
with open(self.logfile, "w") as f:
f.write(report_content)
with open(
os.path.join(self.report_dirpath, "assets", "style.css"), "w"
) as f:
f.write(self.style_css)
def _wiki_overview_entry(self):
"""Makes an entry of current test run on overview page of dokuwiki."""
if self.errors == 0:
if self.failed + self.xpassed == 0:
status = "Passed"
else:
status = "Failed"
else:
status = "Errored"
suite_start_datetime = datetime.datetime.utcfromtimestamp(
self.suite_start_time
)
# Fetch commit message from github.
gh_request = requests.get(
"https://api.github.com/repos/tardis-sn/tardis/git/commits/{0}".format(
| tardis_githash
)
)
gh_commit_data = json.lo | ads(gh_request.content)
# Pick only first line of commit message
gh_commit_message = gh_commit_data["message"].split("\n")[0]
# Truncate long commit messages
if len(gh_commit_message) > 60:
gh_commit_message = "{0}...".format(gh_commit_message[:57])
row = "| "
# Append hash
row += "[[reports:{0}|{0}]] | ".format(tardis_githash[:7])
# Append commit message
row += "[[https://www.github.com/tardis-sn/tardis/commit/{0}|{1}]] | ".format(
tardis_githash, gh_commit_message
)
# Append start time
row += "{0} | ".format(
suite_start_datetime.strftime("%d %b %H:%M:%S")
)
# Append time elapsed
row += "{0:.2f} sec | ".format(self.suite_time_delta)
# Append status
row += "{0} |\n".format(status)
try:
self.doku_conn.pages.append("/", row)
except (gaierror, TypeError):
pass
def pytest_sessionfinish(self, session):
"""
This hook function is called by pytest when whole test run is completed.
It calls the two helper methods `_generate_report` and `_save_report`.
"""
report_content = self._generate_report(session)
self._save_report(report_content)
# This method need not be called if saving locally
if self.save_mode == "remote":
self._wiki_overview_entry()
def pytest_terminal_summary(self, terminalreporter):
"""
This hook is called by pytest after session ends, and it adds an extra
summary at the end. Here, the success / failure of upload of report
to dokuwiki is logged.
"""
if self.save_mode == "remote":
try:
uploaded_report = self.doku_conn.pages.get(
"reports:{0}".format(tardis_githash[:7])
)
|
akilism/moving_violation_scraper | scraper.py | Python | mit | 1,500 | 0.002667 | __author__ = 'akilharris'
import httplib
from bs4 import BeautifulSoup
import os
#Load http://www.nyc.gov/html/nypd/html/traffic_reports/traffic_summons_reports.shtml
#grab all pdfs and save to a folder
path = "raw_data/pdf/"
def scrape(url):
conn = httplib.HTTPConnection("www.nyc.gov")
conn.request("GET", url)
response = conn.getresponse()
if response.status == 200:
temp_data = response.read()
conn.close()
return temp_data
def parse(extension, raw_data):
links = []
soup = BeautifulSoup(raw_data)
for link in soup.find_all("a"):
href = link.get("href")
if href.find(exte | nsion) != -1:
links.append(href)
return links
def save_links(links):
if not os.path.exists(path):
parts = path.split("/")
for part in parts:
if len(part) > 0:
os.mkdir(part)
os.chdir(part)
else:
os.chdir(path)
# print(os.getcwd())
for link in links:
link = str(link).replace("../..", "/html/nypd")
file_name = link.split("/")[-1]
precinct = file_na | me.strip("sum.pdf")
print("Saving: " + precinct + " - " + os.getcwd() + "/" + file_name)
file_data = scrape(link)
with open(file_name, "wb") as f:
f.write(file_data)
def getPDFs():
raw_data = scrape("/html/nypd/html/traffic_reports/traffic_summons_reports.shtml")
links = parse(".pdf", raw_data)
save_links(links)
getPDFs()
|
karlp/pyOCD | pyOCD/transport/__init__.py | Python | apache-2.0 | 687 | 0.002911 | """
mbed CMSIS-DAP debugger
Copy | right (c) 2006-2013 ARM Limited
Licensed und | er the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cmsis_dap import CMSIS_DAP
TRANSPORT = {'cmsis_dap': CMSIS_DAP
} |
jwg4/flask-autodoc | setup.py | Python | mit | 1,280 | 0 | """
Flask-Selfdoc
-------------
Flask selfdoc automatically creates an online documentation for your flask app.
"""
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='Flask-Selfdoc',
version='1.1.0',
url='http://github.com/jwg4/flask-selfdoc',
license='MIT',
author= | 'Arnaud Coomans',
maintainer='Jack Grahl',
maintainer_email='jack.grahl@gmail.com',
description='Documentation generator for flask',
long_description=readme(),
# py_modules=['flask_autodoc'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_selfdoc'],
package_data={'flask_selfdoc': ['templ | ates/autodoc_default.html']},
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='tests',
)
|
octocoin-project/octocoin | qa/rpc-tests/bip65-cltv-p2p.py | Python | mit | 6,411 | 0.003588 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_NOP2, OP_DROP
from binascii import hexlify, unhexlify
import cStringIO
import time
def cltv_invalidate(tx):
'' | 'Modify the signature in vin 0 of the tx to fail CLTV
Prepends -1 CLTV DROP in th | e scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_NOP2, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
'''
This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY)
Connect to a single node.
Mine 2 (version 3) blocks (save the coinbases for later).
Generate 98 more version 3 blocks, verify the node accepts.
Mine 749 version 4 blocks, verify the node accepts.
Check that the new CLTV rules are not enforced on the 750th version 4 block.
Check that the new CLTV rules are enforced on the 751st version 4 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP65Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=3']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = cStringIO.StringIO(unhexlify(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].setgenerate(True, 2)
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = time.time()
''' 98 more version 3 blocks '''
test_blocks = []
for i in xrange(98):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 4 blocks '''
test_blocks = []
for i in xrange(749):
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new CLTV rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(2), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Check that the new CLTV rules are enforced in the 751st version 4
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
cltv_invalidate(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in xrange(199):
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 4
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(1), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP65Test().main()
|
madebydaniz/django1-11 | src/menus/models.py | Python | mit | 1,040 | 0.009615 | from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from restaurants.models import RestaurantLocation
class Item(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
restaurant = models.ForeignKey(RestaurantLocation)
name = models.CharField(max_length=120)
contents = models.TextField(help_text='seperate each item by comma')
excludes = models.TextField(null=True, blank=True, help_text='seperate each item by comma')
public = models.BooleanField(default=Tru | e)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('menus:detail', kwargs={'pk': self.pk})
class Meta:
ordering = ['-updated', '-timestamp']
def get_contents(self):
return self.contents.split(',')
def get_excludes(self):
return self.excludes.split(',') | |
CPonty/jrxml2sql | docs/scope.py | Python | gpl-2.0 | 3,584 | 0.000279 | """
jrxml2sql v0
- parameter extractor only; don't attempt full parser (yet?)
- query2workbench.py: "dumb" conversion
- jrxml2sql.py: extract query & parameter types from tags
- usage:
> if no arguments provided, open as CLI:
-- splash screen with name/purpose of script
-- paste text to terminal
-- run remainder as if file was provided
> if argument provided, treat as file, convert file
-- validate file exists
-- print minimal progress to terminal
-- print "created files: query_p | ostges.sql, query_sqlserver.sql"
-- prompt for next query
-- ctrl-c / ctrl-d / q / exit / quit exits
> drag-n-drop is the same as providing one argument
> -d flag: debug mode, detailed progress printed to terminal
- components:
> mappings_[language].py files
//-- map parameter to default type, per language
//-- map param | eter to $X{} default value, per language
-- map parameter to $P{} default value (+ type), per language
-- map java type to sql type, per language
> classes
-- parameter, incl. java type & converted type per language
-- $X statement - original string, value, parameter
- jrxml2sql steps:
> load and verify config file, jrxml file
> extract query, parameter types from jrxml
> extract $P, $P!, $X parameters from query
> extract a set of unique $X statements from query
> map java type to sql type for query parameters (reference jrxml)
> generate default value based on above mapping (if unknown: ???)
> make a copy of the query
> replace $P with editor-specific variable
> replace $X with editor-specific variable
> generate output - only include parameters in the query.
-- $P (single-value parameters) with default values
--divider--
-- $X (multi-value parameters) with:
~~~ true as default, allow user to replace expression.
Follow with original $X{} expr, commented out
--divider--
-- $P! variables with:
~~~ note for user to find and replace these
--divider--
-- fixed query
--divider
-- original query, commented out
> write file(s), terminal dump etc
-----------------------------------------------------------------------
conundrums
- we will *not* know the query language (even from the jrxml)
- we can try and extract clues, but it won't work for everything
- solution: config file!
editor="" # "workbench", "sqlstudio" or "all"
language="" # "postgres", "sqlserver" or "all"
debug=False # True or False
- no oracle support at first :(
-----------------------------------------------------------------------
query2workbench.py:
- convert jasper.sql to workbench.sql (fixed files)
- "dumb" conversion - doesn't try to match parameter types
and default values; doesn't support sql studio; don't care
about language
- no config file
- no mappings_[language].py files
- steps:
> extract $P, $P!, $X parameters from query
> extract a set of unique $X statements from query
> make a copy of the query
> replace $P with editor-specific variable
> replace $X with editor-specific variable
> generate output
-- $P (single-value parameters) = "???";
--divider--
-- $X (multi-value parameters) with:
~~~ true as default, allow user to replace expression.
Follow with original $X{} expr, commented out
--divider--
-- $P! variables with:
~~~ note for user to find and replace these
--divider--
-- fixed query
--divider
-- original query, commented out
> write file, terminal dump etc
-----------------------------------------------------------------------
|
leszektarkowski/PAR273 | server/server.py | Python | gpl-2.0 | 2,220 | 0.011712 | import gpib
class PARWriteError(Exception):
pass
class PARReadError(Exception):
pass
class PARCellWorking(Exception):
| pass
class Poll:
COMMAND_DONE = 1
COMMAND_ERROR = 2
CURVE_DONE = 4
OVERLOAD = 16
SWEEP_DONE = 32
SRQ = 64
OUTPUT_READY = 128
class PAR(object):
def __init__(self, addres | ):
self.dev = gpib.dev(*addres)
self.write('DD 13')
self.write('TYPE Succesfull init"')
def write(self, cmd):
while True:
status = ord(gpib.serial_poll(self.dev))
#print '.',
if status & Poll.COMMAND_DONE != 0:
break
elif status & Poll.OUTPUT_READY != 0:
raise PARWriteError("Data is ready, can't write")
gpib.write(self.dev, cmd)
def read(self):
while True:
status = ord(gpib.serial_poll(self.dev))
#print ':', status,
if status & Poll.OUTPUT_READY != 0:
break
elif status & Poll.COMMAND_DONE != 0:
raise PARReadError("Nothing to read")
return gpib.read(self.dev, 1024)
def ask(self, cmd):
self.write(cmd)
return self.read()
def wait_for_relay(self):
cell_hw_switch = int(p.ask("CS").split()[0])
cell_relay = int(p.ask("CELL").split()[0])
if cell_hw_switch and cell_relay:
raise PARCellWorking("Both cell switches enabled")
elif cell_hw_switch == False and cell_relay:
raise PARCellWorking("Cell relay in waiting status...")
elif cell_hw_switch == True and cell_relay == False:
raise PARCellWorking("Previous measurement not finished!")
elif cell_hw_switch == False and cell_relay == False:
for i in range(100):
print "Press Cell Switch!"
import time
time.sleep(0.2)
cell_hw_switch = int(p.ask("CS").split()[0])
if cell_hw_switch:
p.write("CELL 1")
print "Measurement started"
return True
return False
p = PAR( (0,14) )
|
JakeFountain/H3DAPIwithOculusSupport | examples/bumpmap/bumpmap.py | Python | gpl-2.0 | 571 | 0.042032 | from H3DInterface import *
class SFVec3f | ToColor( TypedField( MFColor, ( SFVec3f, SFMatrix4f, MFVec3f ) ) ):
def update( self, event ):
inputs = self.getRoutesIn()
light_pos_global = inputs[0].getValue()
acc_inverse_matrix = inputs[1].getValue()
points = inputs[2].getValue()
light_pos = acc_inverse_matrix * lig | ht_pos_global
res = []
for p in points:
v = light_pos - p
v.normalize()
v.y = -v.y
v = v * 0.5 + Vec3f( 0.5, 0.5, 0.5 )
res.append( RGB( v.x, v.y, v.z ) )
return res
toColor = SFVec3fToColor()
|
googleads/google-ads-python | google/ads/googleads/v9/errors/types/header_error.py | Python | apache-2.0 | 1,153 | 0.000867 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2 | .0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF | ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"HeaderErrorEnum",},
)
class HeaderErrorEnum(proto.Message):
r"""Container for enum describing possible header errors.
"""
class HeaderError(proto.Enum):
r"""Enum describing possible header errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_LOGIN_CUSTOMER_ID = 3
INVALID_LINKED_CUSTOMER_ID = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
702nADOS/sumo | tools/route/routeDiffStats.py | Python | gpl-3.0 | 3,296 | 0.001517 | #!/usr/bin/env python
"""
@file routeStats.py
@author Jakob Erdmann
@date 2014-12-18
@version $Id: routeDiffStats.py 19649 2015-12-17 21:05:20Z behrisch $
compute statistics for two sets of routes (for the same set of vehicles)
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2014-2014 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from optparse import OptionParser
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(os.path.join(tools))
from sumolib.output import parse, parse_fast
from sumolib.net import readNet
from sumolib.miscutils import Statistics
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
def get_options():
USAGE = """Usage %prog [options] <net.xml> <rou1.xml> <rou2.xml>"""
optParser = OptionParser(usage=USAGE)
optParser.add_o | ption("-v", "--verbose", action="store_true",
default=False, help="Gi | ve more output")
optParser.add_option("--binwidth", type="float",
default=100, help="binning width of route length difference histogram")
optParser.add_option("--hist-output", type="string",
default=None, help="output file for histogram (gnuplot compatible)")
optParser.add_option("--full-output", type="string",
default=None, help="output file for full data dump")
options, args = optParser.parse_args()
try:
options.network = args[0]
options.routeFile1 = args[1]
options.routeFile2 = args[2]
except:
sys.exit(USAGE)
return options
def getRouteLength(net, vehicle):
return sum([net.getEdge(e).getLength() for e in vehicle.route[0].edges.split()])
def main():
options = get_options()
net = readNet(options.network)
edges = set([e.getID() for e in net.getEdges()])
lengths1 = {}
lengths2 = {}
lengthDiffStats = Statistics(
"route length difference", histogram=True, scale=options.binwidth)
for vehicle in parse(options.routeFile1, 'vehicle'):
lengths1[vehicle.id] = getRouteLength(net, vehicle)
for vehicle in parse(options.routeFile2, 'vehicle'):
lengths2[vehicle.id] = getRouteLength(net, vehicle)
lengthDiffStats.add(
lengths2[vehicle.id] - lengths1[vehicle.id], vehicle.id)
print(lengthDiffStats)
if options.hist_output is not None:
with open(options.hist_output, 'w') as f:
for bin, count in lengthDiffStats.histogram():
f.write("%s %s\n" % (bin, count))
if options.full_output is not None:
with open(options.full_output, 'w') as f:
differences = sorted(
[(lengths2[id] - lengths1[id], id) for id in lengths1.keys()])
for diff, id in differences:
f.write("%s %s\n" % (diff, id))
if __name__ == "__main__":
main()
|
masayoota/upPIR | uppirlib.py | Python | mit | 26,413 | 0.016393 | """
<Author> Justin Cappos
(inspired from a previous version by Geremy Condra)
<Start Date>
May 16th, 2011
<Description>
Lots of helper code for upPIR. Much of this code will be used multiple
places, but some many not. Anything that is at least somewhat general will
live here.
"""
import sys
# used for os.path.exists, os.path.join and os.walk
import os
# only need ceil
import math
import socket
import ssl
# use this to turn the stream abstraction into a message abstraction...
import session
# Check the python version. It's pretty crappy to do this from a library,
# but it's an easy way to check this universally
if sys.version_info[0] != 2 or sys.version_info[1] < 5:
print "Requires Python >= 2.5 and < 3.0"
sys.exit(1)
# A safe way to serialize / deserialize network data
if sys.version_info[1] == 5:
try:
import simplejson as json
except ImportError:
# This may have plausibly been forgotten
print "Requires simplejson on Python 2.5.X"
sys.exit(1)
else:
# This really should be there. Let's ignore the try-except block...
import json
import hashlib
# Exceptions...
class FileNotFound(Exception):
"""The file could not be found"""
class IncorrectFileContents(Exception):
"""The contents of the file do not match the manifest"""
# these keys must exist in a manifest dictionary.
_required_manifest_keys = ['manifestversion', 'blocksize', 'blockcount',
'blockhashlist', 'hashalgorithm',
'vendorhostname', 'vendorport', 'vendorcliport',
'manifesthash', 'fileinfolist' ]
# an example manifest might look like:
# {'manifestversion':"1.0", 'blocksize':1024, 'blockcount':100,
# 'blockhashlist':['ab3...', ''2de...', ...], 'hashalgorithm':'sha1-base64',
# 'vendorhostname':'blackbox.cs.washington.edu', vendorport:62293,
# 'manifesthash':'42a...',
# 'fileinfolist':[{'filename':'file1',
# 'hash':'a8...',
# 'offset':1584,
# 'length':1023), # (do I need this?)
# {'filename':'foo/file2', # (next file listed...)
# 'hash':'4f...',
# 'offset':2607,
# 'length':63451}, # (do I need this?)
# ...]
def _validate_manifest(manifest):
# private function that validates the manifest is okay
# it raises a TypeError if it's not valid for some reason
if type(manifest) != dict:
raise TypeError("Manifest must be a dict!")
# check for the required keys
for key in _required_manifest_keys:
if key not in manifest:
raise TypeError("Manifest must contain key: "+key+"!")
# check specific things
if len(manifest['blockhashlist']) != manifest['blockcount']:
raise TypeError("There must be a hash for every manifest block")
# otherwise, I guess I'll let this slide. I don't want the checking to
# be too version specific
# JAC: Is this a dumb idea? Should I just check it all? Do I want
# this to fail later? Can the version be used as a proxy check for this?
_supported_hashalgorithms = ['md5', 'sha1', 'sha224', 'sha256', 'sha384',
'sha512']
_supported_hashencodings = ['hex','raw']
def find_hash(contents, algorithm):
# Helper function for hashing...
# first, if it's a noop, do nothing. THIS IS FOR TESTING ONLY
if algorithm == 'noop':
return ''
# accept things like: "sha1", "sha256-raw", etc.
# before the '-' is one of the types known to hashlib. After is
hashencoding = 'hex'
if '-' in algorithm:
# yes, this will raise an exception in some cases...
hashalgorithmname, hashencoding = algorithm.split('-')
# check the args
if hashalgorithmname not in _supported_hashalgorithms:
raise TypeError("Do not understand hash algorithm: '"+algorithm+"'")
if hashencoding not in _supported_hashencodings:
raise TypeError("Do not understand hash algorithm: '"+algorithm+"'")
hashobj = hashlib.new(hashalgorithmname)
hashobj.update(contents)
if hashencoding == 'raw':
return hashobj.digest()
elif hashencoding == 'hex':
return hashobj.hexdigest()
else:
raise Exception("Internal Error! Unknown hashencoding '"+hashencoding+"'")
def transmit_mirrorinfo(mirrorinfo, vendorlocation, defaultvendorport=62293):
"""
<Purpose>
Sends our mirror information to a vendor.
<Arguments>
vendorlocation: A string that contains the vendor location. This can be
of the form "IP:port", "hostname:port", "IP", or "hostname"
defaultvendorport: the port to use if the vendorlocation does not include
one.
<Exceptions>
TypeError if the args are the wrong types or malformed...
various socket errors if the connection fails.
ValueError if vendor does not accept the mirrorinfo
<Side Effects>
Contacts the vendor and retrieves data from it
<Returns>
None
"""
if type(mirrorinfo) != dict:
raise TypeError("Mirror information must be a dictionary")
# do the actual communication...
answer = _remote_query_helper(vendorlocation, "MIRRORADVERTISE"+json.dumps(mirrorinfo), defaultvendorport)
if answer != "OK":
# JAC: I don't really like using ValueError. I should define a new one
raise ValueError(answer)
def retrieve_rawmanifest(vendorlocation, defaultvendorport=62293):
"""
<Purpose>
Retrieves and verifies the manifest data from a vendor. It does not parse this
data in any way.
<Arguments>
vendorlocation: A string that contains the vendor location. This can be
of the form "IP:port", "hostname:port", "IP", or "hostname"
defaultvendorport: the port to use if the vendorlocation does not include
one.
<Exceptions>
TypeError if the vendorlocation is the wrong type or malformed.
various socket errors if the connection fails.
<Side Effects>
Contacts the vendor and retrieves data from it
<Returns>
A string containing the manifest data (unprocessed). It is a good idea
to use parse_manifest to ensure this data is correct.
"""
message = _remote_query_helper(vendorlocation, "GET MANIFEST", defaultvendorport)
# retrieve the strings of signature and manifest from message
idx = message.find('{')
signature = message[:idx]
rawmanifest = message[ | idx:]
# verify the manifest with the digital signature of the vendor
if _verify_ | manifest(signature, rawmanifest):
return rawmanifest
return ''
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from base64 import b64decode
def _verify_manifest(signature, data):
# private function that verifies data using the digital signature
# gets the public key from the manifest data.
manifestdict = parse_manifest(data)
pub_key = manifestdict['pub_key']
rsa_key = RSA.importKey(pub_key)
# computes the hash of data
datahash = SHA256.new()
datahash.update(data)
# verifies the data with the decoded signature
verifier = PKCS1_v1_5.new(rsa_key)
if verifier.verify(datahash, b64decode(signature)):
return True
return False
def retrieve_xorblock_from_mirror(mirrorip, mirrorport,bitstring):
"""
<Purpose>
Retrieves a block from a mirror.
<Arguments>
mirrorip: the mirror's IP address or hostname
mirrorport: the mirror's port number
bitstring: a bit string that contains an appropriately sized request that
specifies which blocks to combine.
<Exceptions>
TypeError if the arguments are the wrong types. ValueError if the
bitstring is the wrong size
various socket errors if the connection fails.
<Side Effects>
Contacts the mirror and retrieves data from it
<Returns>
A string containing the manifest data (unprocessed). It is a good idea
to use parse_manifest to ensure this data is correct.
"""
response = _remote_query_helper(mirrorip, "XORBLOCK"+bitstring,mirrorport)
if response == 'Invalid request length':
raise Val |
RobotTurtles/mid-level-routines | test_RobotMenu.py | Python | apache-2.0 | 659 | 0.003035 | from RobotMenu import RobotMenu
import unittest
import logging
class TestSequenceFunctions(unittest.TestCase):
"""Tests for basic movement, currently write to/from a file"""
def setUp(self):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
self.__targetFile = 'testResults.txt'
self.menu = RobotMenu(logger, 'movement')
def test_connect_wifi(self):
# Arrange
qrCode = 'runConnectToNetwork:bananas wpa2 mysteryspot2'
# Act
self.menu.proc | ess(qrCode)
| #Assert
self.assertTrue(True, 'some message')
if __name__ == '__main__':
unittest.main() |
felliott/waterbutler | waterbutler/providers/osfstorage/metadata.py | Python | apache-2.0 | 4,099 | 0.002196 | import pytz
import dateutil.parser
from waterbutler.core import metadata
class BaseOsfStorageMetadata:
@property
def provider(self):
return 'osfstorage'
class BaseOsfStorageItemMetadata(BaseOsfStorageMetadata):
def __init__(self, raw, materialized):
super().__init__(raw)
self._materialized = materialized
@property
def name(self):
return self.raw['name']
| @pr | operty
def path(self):
return self.raw['path']
@property
def materialized_path(self):
return self._materialized
class OsfStorageFileMetadata(BaseOsfStorageItemMetadata, metadata.BaseFileMetadata):
@property
def modified(self):
return self.raw['modified']
@property
def modified_utc(self):
try:
return self.raw['modified_utc']
except KeyError:
if self.raw['modified'] is None:
return None
# Kludge for OSF, whose modified attribute does not include
# tzinfo but is assumed to be UTC.
parsed_datetime = dateutil.parser.parse(self.raw['modified'])
if not parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.replace(tzinfo=pytz.UTC)
return parsed_datetime.isoformat()
@property
def created_utc(self):
try:
return self.raw['created_utc']
except KeyError:
if self.raw['created'] is None:
return None
# Kludge for OSF, whose created attribute does not include
# tzinfo but is assumed to be UTC.
parsed_datetime = dateutil.parser.parse(self.raw['created'])
if not parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.replace(tzinfo=pytz.UTC)
return parsed_datetime.isoformat()
@property
def size(self):
return self.raw['size']
@property
def content_type(self):
return self.raw.get('contentType')
@property
def etag(self):
return '{}::{}'.format(self.raw['version'], self.path)
@property
def extra(self):
"""osfstorage-specific metadata for files.
* ``guid``: Always `None`. Added in anticipation of OSF-side support, which was then
abandoned after technical consideration. Left in to avoid breaking clients that expect
the key to be present.
* ``version``: The version number of the *most recent* version, not the requested version.
* ``downloads``: Number of times the file has been downloaded.
* ``checkout``: Whether this file has been checked-out and is therefore read-only to all
but the user who has checked it out.
* ``latestVersionSeen``: Whether the requesting user has seen the most recent version of
the file. `True` if so. `False` if a newer version exists that the user has not yet
seen. `None` if the user has not seen *any* version of the file.
"""
return {
'guid': self.raw.get('guid', None),
'version': self.raw['version'],
'downloads': self.raw['downloads'],
'checkout': self.raw['checkout'],
'latestVersionSeen': self.raw.get('latestVersionSeen', None),
'hashes': {
'md5': self.raw['md5'],
'sha256': self.raw['sha256']
},
}
class OsfStorageFolderMetadata(BaseOsfStorageItemMetadata, metadata.BaseFolderMetadata):
pass
class OsfStorageRevisionMetadata(BaseOsfStorageMetadata, metadata.BaseFileRevisionMetadata):
@property
def modified(self):
return self.raw['date']
@property
def version_identifier(self):
return 'version'
@property
def version(self):
return str(self.raw['index'])
@property
def extra(self):
return {
'user': self.raw['user'],
'downloads': self.raw['downloads'],
'hashes': {
'md5': self.raw['md5'],
'sha256': self.raw['sha256']
},
}
|
dweinstein/mitmproxy | libmproxy/protocol/base.py | Python | mit | 6,663 | 0.003152 | from __future__ import (absolute_import, print_function, division)
import sys
import six
from netlib import tcp
from ..models import ServerConnection
from ..exceptions import ProtocolException
from netlib.exceptions import TcpException
class _LayerCodeCompletion(object):
"""
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
"""
def __init__(self, **mixin_args): # pragma: nocover
super(_LayerCodeCompletion, self).__init__(**mixin_args)
if True:
return
self.config = None
"""@type: libmproxy.proxy.ProxyConfig"""
self.client_conn = None
"""@type: libmproxy.models.ClientConnection"""
self.server_conn = None
"""@type: libmproxy.models.ServerConnection"""
self.channel = None
"""@type: libmproxy.controller.Channel"""
self.ctx = None
"""@type: libmproxy.protocol.Layer"""
class Layer(_LayerCodeCompletion):
"""
Base class for all layers. All other protocol layers should inherit from this class.
"""
def __init__(self, ctx, **mixin_args):
"""
Each layer usually passes itself to its child layers as a context. Properties of the
context are transparently mapped to the layer, so that the following works:
.. code-block:: python
root_layer = Layer(None)
root_layer.client_conn = 42
sub_layer = Layer(root_layer)
print(sub_layer.client_conn) # 42
The root layer is passed a :py:class:`libmproxy.proxy.RootContext` object,
which provides access to :py:attr:`.client_conn <libmproxy.proxy.RootContext.client_conn>`,
:py:attr:`.next_layer <libmproxy.proxy.RootContext.next_layer>` and other basic attributes.
Args:
ctx: The (read-only) parent layer / context.
"""
self.ctx = ctx
"""
The parent layer.
:type: :py:class:`Layer`
"""
super(Layer, | self).__init__(**mixin_args)
def | __call__(self):
"""Logic of the layer.
Returns:
Once the protocol has finished without exceptions.
Raises:
~libmproxy.exceptions.ProtocolException: if an exception occurs. No other exceptions must be raised.
"""
raise NotImplementedError()
def __getattr__(self, name):
"""
Attributes not present on the current layer are looked up on the context.
"""
return getattr(self.ctx, name)
@property
def layers(self):
"""
List of all layers, including the current layer (``[self, self.ctx, self.ctx.ctx, ...]``)
"""
return [self] + self.ctx.layers
def __repr__(self):
return type(self).__name__
class ServerConnectionMixin(object):
"""
Mixin that provides a layer with the capabilities to manage a server connection.
The server address can be passed in the constructor or set by calling :py:meth:`set_server`.
Subclasses are responsible for calling :py:meth:`disconnect` before returning.
Recommended Usage:
.. code-block:: python
class MyLayer(Layer, ServerConnectionMixin):
def __call__(self):
try:
# Do something.
finally:
if self.server_conn:
self.disconnect()
"""
def __init__(self, server_address=None):
super(ServerConnectionMixin, self).__init__()
self.server_conn = ServerConnection(server_address, (self.config.host, 0))
self.__check_self_connect()
def __check_self_connect(self):
"""
We try to protect the proxy from _accidentally_ connecting to itself,
e.g. because of a failed transparent lookup or an invalid configuration.
"""
address = self.server_conn.address
if address:
self_connect = (
address.port == self.config.port and
address.host in ("localhost", "127.0.0.1", "::1")
)
if self_connect:
raise ProtocolException(
"Invalid server address: {}\r\n"
"The proxy shall not connect to itself.".format(repr(address))
)
def set_server(self, address, server_tls=None, sni=None):
"""
Sets a new server address. If there is an existing connection, it will be closed.
Raises:
~libmproxy.exceptions.ProtocolException:
if ``server_tls`` is ``True``, but there was no TLS layer on the
protocol stack which could have processed this.
"""
if self.server_conn:
self.disconnect()
self.log("Set new server address: " + repr(address), "debug")
self.server_conn.address = address
self.__check_self_connect()
if server_tls:
raise ProtocolException(
"Cannot upgrade to TLS, no TLS layer on the protocol stack."
)
def disconnect(self):
"""
Deletes (and closes) an existing server connection.
Must not be called if there is no existing connection.
"""
self.log("serverdisconnect", "debug", [repr(self.server_conn.address)])
address = self.server_conn.address
source_address = self.server_conn.source_address
self.server_conn.finish()
self.server_conn.close()
self.channel.tell("serverdisconnect", self.server_conn)
self.server_conn = ServerConnection(address, source_address)
def connect(self):
"""
Establishes a server connection.
Must not be called if there is an existing connection.
Raises:
~libmproxy.exceptions.ProtocolException: if the connection could not be established.
"""
if not self.server_conn.address:
raise ProtocolException("Cannot connect to server, no server address given.")
self.log("serverconnect", "debug", [repr(self.server_conn.address)])
self.channel.ask("serverconnect", self.server_conn)
try:
self.server_conn.connect()
except TcpException as e:
six.reraise(
ProtocolException,
ProtocolException(
"Server connection to {} failed: {}".format(
repr(self.server_conn.address), str(e)
)
),
sys.exc_info()[2]
)
class Kill(Exception):
"""
Signal that both client and server connection(s) should be killed immediately.
"""
|
jthurst3/newspeeches | get_words.py | Python | mit | 836 | 0.026316 | # get_words.py
# returns a list of words from the relevant lines of a speech
# J. Hassler Thurston
# RocHack Hackathon December 7, 2013
import csv
import nltk
def | get_words(line_list):
sentences = parse_to_sentences(line_list)
#print sentences
words = [nltk.word_tokenize(sent) for sent in sentences]
return sentences
# returns a list of sentences from the list of lines
def parse_to_sentences(line_list) | :
#print line_list
sentence_list = []
for line in line_list:
# insert the sentences into the list
sentence_list.extend(line.split('.'))
return sentence_list
# exports the list of words to a CSV file (currently only exports sentences)
def export_to_csv(word_list, filename):
out = open(filename, 'w')
#wr = csv.writer(out, quoting=csv.QUOTE_ALL)
for sentence in word_list:
out.write(sentence + '\n')
out.close() |
Aloomaio/python-sdk | setup.py | Python | apache-2.0 | 645 | 0 | #!/usr/bin/env python
from setuptools import setup
sdk_package_name = 'alooma_pysdk'
packages = [sdk_package_name]
setup(
name=sdk_package_name,
packages=packages,
package_data={sdk_package | _name: ['alooma_ca']},
version='2.1',
description='An easy-to-integrate SDK for your Python apps to report '
'events to Alooma',
url='https://github.com/Aloomaio/python-sdk',
author='Alooma',
author_email='integrations@alooma.com',
keywords=['python', 'sdk', 'alooma', 'pysdk'],
install_requires=open("requirements.txt").readlines(),
tests_ | require=open("requirements-tests.txt").readlines()
)
|
lukas-krecan/tensorflow | tensorflow/python/ops/control_flow_ops.py | Python | apache-2.0 | 71,570 | 0.006721 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Control Flow Operations
TensorFlow provides several operations and classes that you can use to control
the execution of operations and add conditional dependencies to your graph.
@@identity
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
## Logical Operators
TensorFlow provides several operations that you can use to add logical operators
to your graph.
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
## Comparison Operators
TensorFlow provides several operations that you can use to add comparison
operators to your graph.
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@select
@@where
## Debugging Operations
TensorFlow provides several operations that you can use to validate values and
debug your graph.
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
from tensorflow.python.platform import logging
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
if not data.dtype.is_ref_dtype:
return array_ops.identity(data, name=name)
else:
return gen_array_ops._ref_identity(data, name=name)
def _NextIteration(data, name=None):
if not data.dtype.is_ref_dtype:
return next_iteration(data, name=name)
else:
return ref_next_iteration(data, name=name)
def _Merge(values, name=None):
if all([v.dtype.is_ref_dtype for v in values]):
ret | urn gen_control_flow_ops._ref_merge(values, name)
else:
return gen_co | ntrol_flow_ops._merge(values, name)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `output` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations` iterations
are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
if data.dtype.is_ref_dtype and use_ref:
return ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
return enter(data, frame_name, is_constant, parallel_iterations,
name=name)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
if data.dtype.is_ref_dtype:
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
"""
with ops.op_scope([data, pred], name, "Switch") as name:
data = ops.convert_to_tensor_or_indexed_slices(data, dtype=dtype,
name="data")
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
val, ind, dense_shape = data.values, data.indices, data.dense_shape
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if dense_shape:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If inputs are IndexedSlices and some but not all have a
dense_shape property.
"""
with ops.op_scope(inputs, name, "Merge") as name:
inputs = [ops.convert_to_tensor_or_indexed_slices(inp)
for inp in inputs]
if all([isinstance(inp, ops.Tensor) for inp in inputs]):
return _Merge(inputs, name=name)
else:
inputs = math_ops._as_indexed_slices_list(inputs)
values, _ = _Merge([inp.values for inp in inputs], name=name)
indices, chosen_index = _Merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape for inp in inputs):
if not all(inp.dense_shape for inp in inputs):
|
mfwarren/FreeCoding | 2015/01/fc_2015_01_30.py | Python | mit | 367 | 0 | #!/usr/bin/env python3
# imports go here
#
# Free Coding session for 2015-01-30
# Written by Matt Warren
#
def main():
print("writing code for someone else")
print("and not getting paid for it")
print("with no chance that it benfits me - even indirectly")
print("on a friday nigh | t")
print("this sucks! | ")
if __name__ == '__main__':
main()
|
prheenan/BioModel | BellZhurkov/Python/TestExamples/TestUtil/Bell_Test_Data.py | Python | gpl-2.0 | 2,506 | 0.01237 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
class BellData:
def __init__(self,forces,RatesFold,RatesUnfold=None):
self.Forces = forces
self.RatesFold = RatesFold
self.RatesUnfold=RatesUnfold
def Schlierf2006Figure1a():
"""
From
Schlierf, Michael, and Matthias Rief.
"Single-Molecule Unfolding Force Distributions Reveal a Funnel-Shaped
Energy Landscape."
Biophysical Journal 90, no. 4 (February 15, 2006)
"""
# pulling velocities (x axis of figure 1a)
Velocities = np.array([0.2,0.4,0.8,2,4]) * 1e-6
# unfolding forces (y axis of figure 1a)
UnfoldingForces = np.array([42,46,52,62,68]) * 1e-12
return BellData(UnfoldingForces,Velocities)
def Woodside2014FoldingAndUnfoldingData():
"""
From:
Woodside, Michael T., and Steven M. Block.
"Reconstructing Folding Energy Landscapes by Single-Molecule Force Spectroscopy"
Annual Review of Biophysics 43, no. 1 (2014): 19-39.
doi:10.1146/annurev-biophys-051013-022754.
"""
# write down figure 6a, pp 28
Forces = [11.25,
11.50,
11.75,
12.0,
12.25,
12.75,
13.25,
13.5,
13.75,
14.25,
14.75]
Forces= np.array(For | ces)
# write down the folding and unfolding rates as decaying/increasing
# exponentials, based on their starting val | ue in the graph
ForceDiff = max(Forces)-min(Forces)
# we have
# y ~ exp(+/- t/tau)
# so y_f/y_i = exp(+/- (t_f-t_i)/tau)
# so tau = +/- np.log(yf/y_i)/(tf-t_i). aparently the signs work themselves
# out the way I use tau below
yf = 300
yi = 8
# get the 'taus' for the exponential decay (really inverse forces)
tauFold = np.log(yf/yi)/(ForceDiff)
tauUnfold = np.log(yf/yi)/(ForceDiff)
# get the offset force array; use this to set up the data, which we
# assume decays or increases from our 'zero point' (first measured data)
forceOffset = (Forces-Forces[0])
# folding is decaying / decaying / *has* minus sign
Folding = yf * np.exp( -forceOffset/tauFold )
# unfolding is increasing / growing / *doesnt have* minus sign
Unfolding = yi * np.exp( forceOffset/tauUnfold)
return BellData(Forces*1e-12,Folding,Unfolding)
|
sahiljain/catapult | telemetry/telemetry/story/story.py | Python | bsd-3-clause | 4,429 | 0.00858 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.story import shared_state as shared_state_module
_next_story_id = 0
class Story(object):
"""A class styled on unittest.TestCase for creating story tests.
Tests should override Run to maybe start the application and perform actions
on it. To share state between different tests, one can define a
shared_state which contains hooks that will be called before and
after mutiple stories run and in between runs.
Args:
shared_state_class: subclass of telemetry.story.shared_state.SharedState.
name: string name of this story that can be used for identifying this story
in results output.
tags: A list or set of string labels that are used for filtering. See
story.story_filter for more information.
is_local: If True, the story does not require network.
grouping_keys: A dict of grouping keys that will be added to values computed
on this story.
"""
def __init__(self, shared_state_class, name='', tags=None,
is_local=False, make_javascript_deterministic=True,
grouping_keys=None, platform_specific=False):
"""
Args:
make_javascript_deterministic: Whether JavaScript performed on
the page is made deterministic across multiple runs. This
requires that the web content is served via Web Page Replay
to take effect. This setting does not affect stories containing no web
content or where the HTTP MIME type is not text/html.See also:
_InjectScripts method in third_party/web-page-replay/httpclient.py.
platform_specific: Boolean indicating if a separate web page replay
recording is required on each platform.
"""
assert issubclass(shared_state_class,
shared_state_module.SharedState)
self._shared_state_class = shared_state_class
self._name = name
self._platform_specific = platform_specific
global _next_story_id
self._id = _next_story_id
_next_story_id += 1
if tags is None:
tags = set()
| elif isinstance(tags, list):
tags = set(tags)
else:
assert isinstance(tags, set)
self._tags = tags
self._is_local = is_local
self._make_javascript_deterministic = make_javascript_deterministic
if grouping_keys is None:
grouping_keys = {}
else:
assert isinstance(grouping_keys, dict)
| self._grouping_keys = grouping_keys
def Run(self, shared_state):
"""Execute the interactions with the applications and/or platforms."""
raise NotImplementedError
@property
def tags(self):
return self._tags
@property
def shared_state_class(self):
return self._shared_state_class
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def grouping_keys(self):
return self._grouping_keys
@property
def display_name_and_grouping_key_tuple(self):
return self.display_name, tuple(self.grouping_keys.iteritems())
def AsDict(self):
"""Converts a story object to a dict suitable for JSON output."""
d = {
'id': self._id,
}
if self._name:
d['name'] = self._name
return d
@property
def file_safe_name(self):
"""A version of display_name that's safe to use as a filename.
The default implementation sanitizes special characters with underscores,
but it's okay to override it with a more specific implementation in
subclasses.
"""
# This fail-safe implementation is safe for subclasses to override.
return re.sub('[^a-zA-Z0-9]', '_', self.display_name)
@property
def display_name(self):
if self.name:
return self.name
else:
return self.__class__.__name__
@property
def is_local(self):
"""Returns True iff this story does not require network."""
return self._is_local
@property
def serving_dir(self):
"""Returns the absolute path to a directory with hash files to data that
should be updated from cloud storage, or None if no files need to be
updated.
"""
return None
@property
def make_javascript_deterministic(self):
return self._make_javascript_deterministic
@property
def platform_specific(self):
return self._platform_specific
|
lindemann09/pytrak | pytrak/plotter.py | Python | gpl-3.0 | 10,237 | 0.001661 | __version__ = "0.1"
import threading
import numpy as np
import pygame
from expyriment.stimuli import Canvas
from expyriment.stimuli._visual import Visual
lock_expyriment = threading.Lock()
Numpy_array_type = type(np.array([ | ]))
def inherit_docs(cls):
for name, func in vars(cls).items():
if not func.__doc__:
for parent in cls.__bases__:
parfunc = getattr( | parent, name)
if parfunc and getattr(parfunc, '__doc__', None):
func.__doc__ = parfunc.__doc__
break
return cls
@inherit_docs
class PGSurface(Canvas):
"""PyGame Surface: Expyriment Stimulus for direct Pygame operations and
PixelArrays
In contrast to other Expyriment stimuli the class does not generate temporary
surfaces.
"""
def __init__(self, size, position=None, colour=None):
Canvas.__init__(self, size, position, colour)
self._px_array = None
@property
def surface(self):
"""todo"""
if not self.has_surface:
ok = self._set_surface(self._get_surface()) # create surface
if not ok:
raise RuntimeError(Visual._compression_exception_message.format(
"surface"))
return self._surface
@property
def pixel_array(self):
"""todo"""
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
return self._px_array
@pixel_array.setter
def pixel_array(self, value):
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
self._px_array = value
def unlock_pixel_array(self):
"""todo"""
self._px_array = None
def preload(self, inhibit_ogl_compress=False):
self.unlock_pixel_array()
return Canvas.preload(self, inhibit_ogl_compress)
def compress(self):
self.unlock_pixel_array()
return Canvas.compress(self)
def decompress(self):
self.unlock_pixel_array()
return Canvas.decompress(self)
def plot(self, stimulus):
self.unlock_pixel_array()
return Canvas.plot(self, stimulus)
def clear_surface(self):
self.unlock_pixel_array()
return Canvas.clear_surface(self)
def copy(self):
self.unlock_pixel_array()
return Canvas.copy(self)
def unload(self, keep_surface=False):
if not keep_surface:
self.unlock_pixel_array()
return Canvas.unload(self, keep_surface)
def rotate(self, degree):
self.unlock_pixel_array()
return Canvas.rotate(self, degree)
def scale(self, factors):
self.unlock_pixel_array()
return Canvas.scale(self, factors)
# expyriment 0.8.0
# def scale_to_fullscreen(self, keep_aspect_ratio=True):
# self.unlock_pixel_array()
# return Canvas.scale_to_fullscreen(self, keep_aspect_ratio)
def flip(self, booleans):
self.unlock_pixel_array()
return Canvas.flip(self, booleans)
def blur(self, level):
self.unlock_pixel_array()
return Canvas.blur(self, level)
def scramble(self, grain_size):
self.unlock_pixel_array()
return Canvas.scramble(self, grain_size)
def add_noise(self, grain_size, percentage, colour):
self.unlock_pixel_array()
return Canvas.add_noise(self, grain_size, percentage, colour)
class Plotter(PGSurface):
"""Pygame Plotter"""
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(180, 180, 180),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
self.n_data_rows = n_data_rows
self.data_row_colours = data_row_colours
self.width = width
self.y_range = y_range
self._background_colour = background_colour
self.marker_colour = marker_colour
if axis_colour is None:
self.axis_colour = background_colour
else:
self.axis_colour = axis_colour
self._previous = [None] * n_data_rows
PGSurface.__init__(self, size=(self.width, self._height),
position=position)
self.clear_area()
@property
def y_range(self):
return self.y_range
@y_range.setter
def y_range(self, values):
"""tuple with lower and upper values"""
self._y_range = values
self._height = self._y_range[1] - self._y_range[0]
self._plot_axis = (self._y_range[0] <= 0 and \
self._y_range[1] >= 0)
@property
def data_row_colours(self):
return self._data_row_colours
@data_row_colours.setter
def data_row_colours(self, values):
"""data_row_colours: list of colour"""
try:
if not isinstance(values[0], list) and \
not isinstance(values[0], tuple): # one dimensional
values = [values]
except:
values = [[]] # values is not listpixel_array
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data row colour does not match the ' +
'defined number of data rows!')
self._data_row_colours = values
def clear_area(self):
self.pixel_array[:, :] = self._background_colour
if self._plot_axis:
self.pixel_array[:, self._y_range[1]:self._y_range[1] + 1] = \
self.axis_colour
def write_values(self, position, values, set_marker=False):
if set_marker:
self.pixel_array[position, :] = self.marker_colour
else:
self.pixel_array[position, :] = self._background_colour
if self._plot_axis and self.axis_colour != self._background_colour:
self.pixel_array[position, self._y_range[1]:self._y_range[1] + 1] = \
self.axis_colour
for c, plot_value in enumerate(self._y_range[1] - \
np.array(values, dtype=int)):
if plot_value >= 0 and self._previous[c] >= 0 \
and plot_value <= self._height and \
self._previous[c] <= self._height:
if self._previous[c] > plot_value:
self.pixel_array[position,
plot_value:self._previous[c] + 1] = \
self._data_row_colours[c]
else:
self.pixel_array[position,
self._previous[c]:plot_value + 1] = \
self._data_row_colours[c]
self._previous[c] = plot_value
def add_values(self, values, set_marker=False):
"""
"""
if type(values) is not Numpy_array_type and \
not isinstance(values, tuple) and \
not isinstance(values, list):
values = [values]
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data values does not match the ' +
'defined number of data rows!')
# move plot one pixel to the left
self.pixel_array[:-1, :] = self.pixel_array[1:, :]
self.write_values(position=-1, values=values, set_marker=set_marker)
class PlotterThread(threading.Thread):
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(80, 80, 80),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
super(PlotterThread, self).__init__()
self._plotter = Plotter(n_data_rows=n_data_rows,
data_row_colours=data_row_colours,
width=width, y_range=y_range,
background_colour=background_colour,
marker_colour=marker_colour,
position=position,
axis_colour=axis_colour)
self._new_values = []
self._lock_new_values = thre |
Pikecillo/digi-dark | digidark/parser.py | Python | gpl-3.0 | 3,699 | 0.001352 | import re
from simpleparse.parser import Parser
from simpleparse.dispatchprocessor import *
grammar = """
root := transformation
transformation := ( name, index, assign, expr )
assign := '='
expr := ( trinary )
/ ( term )
trinary := ( term, '?', term, ':', term )
term := ( factor, binaryop, term )
/ ( factor )
binaryop := '**' / '/' / '%' / '+' / '-' / '>' / '<' / '=='
/ '<=' / '>=' / '!=' / '*' / '&&' / '||'
unaryop := '-' / '!'
factor := ( opar, expr, cpar )
/ ( unaryop, factor )
/ ( name, index )
/ ( function )
/ ( name )
/ ( number )
function := ( name, opar, parameters, cpar )
parameters := ( expr, ',', parameters )
| / ( expr )
opar := '('
cpar := ')'
name := [_a-zA-Z], [_a-zA-Z0-9]*
number := ( float )
/ ( integer )
integer := [0-9]+
float := ( in | teger, '.', integer )
index := ( '[', expr, ',', expr, ']' )
/ ( '[', function, ']' )
"""
class SyntaxTreeProcessor(DispatchProcessor):
def transformation(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return " ".join(res)
def assign(self, info, buffer):
return getString(info, buffer)
def expr(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return " ".join(res)
def trinary(self, info, buffer):
(tag, left, right, children) = info
ret = dispatchList(self, children, buffer)
return "%s if %s else %s" % (ret[1], ret[0], ret[2])
def term(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return " ".join(res)
def binaryop(self, info, buffer):
return getString(info, buffer)
def factor(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return " ".join(res)
def function(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return " ".join(res)
def parameters(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return ", ".join(res)
def opar(self, info, buffer):
return getString(info, buffer)
def cpar(self, info, buffer):
return getString(info, buffer)
def number(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return "".join(res)
def integer(self, info, buffer):
return getString(info, buffer)
def float(self, info, buffer):
(tag, left, right, children) = info
res = dispatchList(self, children, buffer)
return ".".join(res)
def name(self, info, buffer):
return getString(info, buffer)
def value(self, info, buffer):
return getString(info, buffer)
def index(self, info, buffer):
(tag, left, right, children) = info
ret = dispatchList(self, children, buffer)
if len(ret) == 2:
return "[%s, %s]" % tuple(ret)
else:
return "[%s]" % tuple(ret)
class Compiler:
def __init__(self):
self.parser = Parser(grammar)
self.translator = SyntaxTreeProcessor()
def compile(self, command):
cmd = re.sub('\s', '', command)
(success, children, nextchar) = self.parser.parse(cmd)
result = self.translator((success, children, nextchar), cmd)
python_src = result[1][0]
return compile(python_src, '', 'exec')
|
antoinecarme/pyaf | tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingMedian_Seasonal_DayOfWeek_LSTM.py | Python | bsd-3-clause | 168 | 0.047619 | import t | ests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingMedian'] , ['Se | asonal_DayOfWeek'] , ['LSTM'] ); |
b1quint/samfp | other/pyadhoc.py | Python | bsd-3-clause | 8,475 | 0.006608 | import numpy as np
from scipy import constants as cst
import pyfits as pf
from pyfits import Column
from time import sleep
import matplotlib as mpl
from matplotlib import pyplot as plt
#import codecs
#verifier l'organisation des cubes fits si on les ouvre en python
#faire un programme readad qui voit l'extension pour savoir comment ouvir (soit ad2, ad3, ad1...)
#gestion des NaN a inclure (input/output)!!!
class dtu:
"""Data Trailer Unit
Class associated to any adhoc format, as HDU (Header Dat Unit) for fits format
"""
def __init__(self, data=None, trailer=None, filename=None):
self.data = data
self.trailer = trailer
self.filename = filename
def read(self, filename=None):
return
def trailer(self, filename=None):
return
def trailertoheader(self, filename=None):
return
def data(self, filename=None):
return
def writeto(self, filename=None):
return
def writetofits(self, fitsname):
return
def convertfits(self, fits, filename=None):
return
def tohdu(self):
hdu = pf.PrimaryHDU(self.data)
hdul = pf.HDUList([hdu])
return hdul
#l'idee est de creer une classe avec des fonctions associees. Au debut, seul le filename est utile
def ad2trailer():
return ad2trailer
def ad3trailer():
return ad3trailer
def readad1(filename):
return ad1
def readad2(filename):
"""Parameters
----------
filename: string
Name of the input file
"""
data = open(filename, 'rb')
data.seek(0, 2)
sz = (data.tell() - 256) / 4 # size of the data array
data.close()
dt = np.dtype([('data', np.float32, sz), ('trailer', [('nbdim', np.int32), ('id', np.int8, 8), ('lx', np.int32), ('ly', np.int32), ('lz', np.int32), ('scale', np.float32), ('ix0', np.int32), ('iy0', np.int32), ('zoom', np.float32), ('modevis', np.int32), ('thrshld', np.float32), ('step', np.float32), ('nbiso', np.int32), ('pal', np.int32), ('cdelt1', np.float64), ('cdelt2', np.float64), ('crval1', np.float64), ('crval2', np.float64), ('crpix1', np.float32), ('crpix2', np.float32), ('crota2', np.float32), ('equinox', np.float32), ('x_mirror', np.int8), ('y_mirror', np.int8), ('was_compressed', np.int8), ('none2', np.int8, 1), ('none', np.int32, 4), ('comment', np.int8, 128)])])
ad2 = np.fromfile(filename, dtype=dt)
if (ad2['trailer']['lx'] >= 32768) | (ad2['trailer']['ly'] >= 32768):
print('Error: lx or ly seems to be invalid: (' + np.str(ad2['trailer']['lx'][0]) + ', ' + np.str(ad2['trailer']['ly'][0]) + ')')
print('If you want to allow arrays as large as this, modify the code!')
return
data = ad2['data'][0].reshape(ad2['trailer']['ly'], ad2['trailer']['lx'])
data[np.where(data == -3.1E38)] = np.nan
ad2 = dtu(data, ad2['trailer'][0], filename)
# tester l'existence du fichier a lire
#ca serait bien de pouvoir lire les fichiers compresses, je crois que ca existe en python
return ad2
#info = file_info(realfilename)
#testgz = strsplit(realfilename, '.', /extract)
#testgz = testgz[n_elements(testgz)-1]
#if (info.exists eq 0) or (info.read eq 0) or (testgz eq 'gz' | ) then begin
#; On regarde si plutot le fichier est .ad3.gz...
#if (testgz ne 'gz') then begin
#realfilename = filename + '.gz'
#info = file_info(realfilename)
#endif else begin
#realfilename = filename
#endelse
#if (info.exists eq 0) or (info.read eq 0) then return, -1
#if (!version.os_family ne 'unix') then return, -1
#testgz = 'gz'
#spawn, 'gzip -l ' + realfilename, output
#output = strsplit(output[1], ' ', /extract)
| #size = ulong64(output[1])
#if testgz eq 'gz' then begin
#trailer.was_compressed = 1
#endif
def readad3(filename, xyz=True):
"""Parameters
----------
filename: string
Name of the input file
xyz=True: boolean (optional)
False to return data in standard zxy adhoc format
True to return data in xyz format (default)
"""
data = open(filename, 'rb')
data.seek(0, 2)
sz = (data.tell() - 256) / 4
data.close()
dt = np.dtype([('data', np.float32, sz), ('trailer', [('nbdim', np.int32), ('id', np.int8, 8), ('lx', np.int32), ('ly', np.int32), ('lz', np.int32), ('scale', np.float32), ('ix0', np.int32), ('iy0', np.int32), ('zoom', np.float32), ('xl1', np.float32), ('xi1', np.float32), ('vr0', np.float32), ('corrv', np.float32), ('p0', np.float32), ('xlp', np.float32), ('xl0', np.float32), ('vr1', np.float32), ('xik', np.float32), ('cdelt1', np.float64), ('cdelt2', np.float64), ('crval1', np.float64), ('crval2', np.float64), ('crpix1', np.float32), ('crpix2', np.float32), ('crota2', np.float32), ('equinox', np.float32), ('x_mirror', np.int8), ('y_mirror', np.int8), ('was_compressed', np.int8), ('none2', np.int8, 1), ('comment', np.int8, 128)])])
ad3 = np.fromfile(filename, dtype=dt)
if (ad3['trailer']['lx'][0] * ad3['trailer']['ly'][0] * ad3['trailer']['lz'][0] >= 250 * 1024 * 1024):
print('Error: lx or ly or lz seems to be invalid: (' + np.str(ad3['trailer']['lx'][0]) + ', ' + np.str(ad3['trailer']['ly'][0]) + ', ' + np.str(ad3['trailer']['lz'][0]) + ')')
print('If you want to allow arrays as large as this, modify the code!')
return
if ad3['trailer']['nbdim'] == -3: # nbdim ?
data = ad3['data'][0].reshape(ad3['trailer']['lz'], ad3['trailer']['ly'], ad3['trailer']['lx']) #
else:
data = ad3['data'][0].reshape(ad3['trailer']['ly'], ad3['trailer']['lx'], ad3['trailer']['lz'])
if xyz & (ad3['trailer']['nbdim'] == 3):
#return the data ordered in z, y, x
data = data.transpose(2, 0, 1)
if (not xyz) & (ad3['trailer']['nbdim'] == -3):
#return data ordered in y, x, z
data = data.transpose(1, 2, 0)
data[np.where(data == -3.1E38)] = np.nan
ad3 = dtu(data, ad3['trailer'][0], filename)
return ad3
def readadt(filename):
return adt
def readada(filename, xsize, ysize):
"""Parameters
----------
filename: string
Name of the input ada file
xsize: float
Size of the final image along x-axis
ysize: float
Size of the final image along y-axis
Returns
-------
out: ndarray
Image corresponding to the ada
"""
#Initialization of the image
im = np.zeros((ysize, xsize))
#We read the ada file
ada = np.fromfile(filename, dtype=np.int16)
#We reshape the data since we know the file is organize with y,x,y,x,y,x...
data = ada.reshape(ada.size / 2, 2)
plt.ion()
image = plt.imshow(im)
#we loop on each photon to create the image
for i in range(data.shape[0]):
#we check the location of each photon is inside the image
if (data[i][0] < ysize) & (data[i][1] < xsize) & (data[i][0] >= 0) & (data[i][1] >= 0):
im[data[i][0], data[i][1]] += 1
image.set_data(im)
#plt.draw()
#sleep(0.1)
plt.draw()
return im
#it seems that the first frame is duplicated
#it would be nice to be able to display the creation of the image photon by photon
def readadz(filename):
return adz
def writead1(data, trailer, filename):
return ad1
def writead2(data, trailer, filename):
return ad2
def writead3(data, trailer, filename):
return ad3
def ad2tofits(filename):
return fits
def ad3tofits(filename):
return fits
def dtutohdu(dtu):
hdu = pf.PrimaryHDU()
hdr = hdu[0].header
hdu[0].data = dtu.data
return hdu
def hdutodtu(hdu):
dtu = 0
return dtu
def ad3tofits(filename):
return fits
def ad2trailertofitsheader(ad2trailer):
hdu = pf.PrimaryHDU()
hdr = hdu[0].header
#hdr.update('key','value','comment') # ajouter ou mettre a jour des keywords
#hdr.add_history('') # indiquer la date ou la conversion a ete faite
return fitsheader
def ad3trailertofitsheader(ad3trailer):
hdu = pf.PrimaryHDU()
hdr = hdu[0].header
#hdr.update('key','value','comment') # ajouter ou mettre a jour des keywords
# |
Droriel/python_training | data/groups.py | Python | apache-2.0 | 216 | 0.004651 | # -*- coding: utf-8 -*-
from model | .group import Group
# Stałe dane testowe
testData = [
Group(name='name1', header='header1', footer='footer1'),
Group(name='name2', he | ader='header2', footer='footer2')
]
|
atuljain/odoo | addons/project/project.py | Python | agpl-3.0 | 68,455 | 0.006121 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, date
from lxml import etree
import time
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.resource.faces import task as Task
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Stage Name', required=True, size=64, translate=True),
'description': fields.text('Description'),
'sequence': fields.integer('Sequence'),
'case_default': fields.boolean('Default for New Projects',
help="If you check this field, this stage will be proposed by default on each new project. It will not assign this stage to existing projects."),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
| }
def _get_default_project_ids(self, cr, uid, ctx={}):
project_id = self.pool['project.task']._get_default_project | _id(cr, uid, context=ctx)
if project_id:
return [project_id]
return None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
_order = 'sequence'
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherits = {'account.analytic.account': "analytic_account_id",
"mail.alias": "alias_id"}
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, project.project """
# create aliases for all projects and avoid constraint errors
alias_context = dict(context, alias_model_name='project.task')
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(project, self)._auto_init,
'project.task', self._columns['alias_id'], 'id', alias_prefix='project+', alias_defaults={'project_id':'id'}, context=alias_context)
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if user == 1:
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
if context and context.get('user_preference'):
cr.execute("""SELECT project.id FROM project_project project
LEFT JOIN account_analytic_account account ON account.id = project.analytic_account_id
LEFT JOIN project_user_rel rel ON rel.project_id = project.id
WHERE (account.user_id = %s or rel.uid = %s)"""%(user, user))
return [(r[0]) for r in cr.fetchall()]
return super(project, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def _get_projects_from_tasks(self, cr, uid, task_ids, context=None):
tasks = self.pool.get('project.task').browse(cr, uid, task_ids, context=context)
project_ids = [task.project_id.id for task in tasks if task.project_id]
return self.pool.get('project.project')._get_project_and_parents(cr, uid, project_ids, context)
def _get_project_and_parents(self, cr, uid, ids, context=None):
""" return the project ids and all their parent projects """
res = set(ids)
while ids:
cr.execute("""
SELECT DISTINCT parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND project.id IN %s
""", (tuple(ids),))
ids = [t[0] for t in cr.fetchall()]
res.update(ids)
return list(res)
def _get_project_and_children(self, cr, uid, ids, context=None):
""" retrieve all children projects of project ids;
return a dictionary mapping each project to its parent project (or None)
"""
res = dict.fromkeys(ids, None)
while ids:
cr.execute("""
SELECT project.id, parent.id
FROM project_project project, project_project parent, account_analytic_account account
WHERE project.analytic_account_id = account.id
AND parent.analytic_account_id = account.parent_id
AND parent.id IN %s
""", (tuple(ids),))
dic = dict(cr.fetchall())
res.update(dic)
ids = dic.keys()
return res
def _progress_rate(self, cr, uid, ids, names, arg, context=None):
child_parent = self._get_project_and_children(cr, uid, ids, context)
# compute planned_hours, total_hours, effective_hours specific to each project
cr.execute("""
SELECT project_id, COALESCE(SUM(planned_hours), 0.0),
COALESCE(SUM(total_hours), 0.0), COALESCE(SUM(effective_hours), 0.0)
FROM project_task
LEFT JOIN project_task_type ON project_task.stage_id = project_task_type.id
WHERE project_task.project_id IN %s AND project_task_type.fold = False
GROUP BY project_id
""", (tuple(child_parent.keys()),))
# aggregate results into res
res = dict([(id, {'planned_hours':0.0, 'total_hours':0.0, 'effective_hours':0.0}) for id in ids])
for id, planned, total, effective in cr.fetchall():
# add the values specific to id to all parent projects of id in the result
while id:
if id in ids:
res[id]['planned_hours'] += planned
res[id]['total_hours'] += total
res[id]['effective_hours'] += effective
id = child_parent[id]
# compute progress rates
for id in ids:
if res[id]['total_hours']:
res[id]['progress_rate'] = round(100.0 * res[id]['effective_hours'] / res[id]['total_hours'], 2)
else:
res[id]['progress_rate'] = 0.0
return res
def unlink(self, cr, uid, ids, context=None):
alias_ids = []
mail_alias = self.pool.get('mail.alias')
for pr |
recoded-co/geonition_aliases | setup.py | Python | lgpl-2.1 | 781 | 0.002561 | from setuptools import setup
from setuptools import find_packages
setup(
name='geonition_aliases',
version='0.0.1',
author='Dariusz Walczak',
url='',
packages=find_packages(),
include_package_data=True,
package_data = {
"geoforms": [
| "templates/*.html",
"templates/*.api.js",
"templates/jstranslations.txt",
"templates/help/*.html",
"templates/admin/geoforms/geoformelement/*.html",
"locale/*/LC_MESSAGES/*.po",
"locale/*/LC_MESSAGES/*.mo",
"static/js/*",
"static/css/*.css",
"static/css/images/*.png"
],
},
zip_safe=False,
install_requires=['django',
'django-mode | ltranslation'],
)
|
veroc/Bika-LIMS | bika/lims/browser/widgets/addresswidget.py | Python | agpl-3.0 | 2,605 | 0.005374 | from AccessControl import ClassSecurityInfo
from Products.Archetypes.utils import DisplayList
from Products.Archetypes.Registry import registerWidget
from Products.Archetypes.Widget import TypesWidget
from Products.CMFPlone.i18nl10n import ulocalized_time
from Products.CMFCore.utils import getToolByName
from bika.lims.browser import BrowserView
from bika.lims.locales import COUNTRIES,STATES,DISTRICTS
import json
import plone
class AddressWidget(TypesWidget):
_properties = TypesWidget._proper | ties.copy()
_properties.update({
'macro': "bika_widgets/addresswidget",
'helper_js': ("bika_widgets/addresswidget.js",),
'helper_css': ("bika_widgets/addresswidget.css",),
'showLegend': True,
'showDistrict' | : True,
'showCopyFrom': True,
'showCity': True,
'showPostalCode': True,
'showAddress': True,
})
security = ClassSecurityInfo()
# The values in the form/field are always
# Country Name, State Name, District Name.
def getCountries(self):
items = []
items = [(x['ISO'], x['Country']) for x in COUNTRIES]
items.sort(lambda x,y: cmp(x[1], y[1]))
return items
def getDefaultCountry(self):
portal = getToolByName(self, 'portal_url').getPortalObject()
bs = portal._getOb('bika_setup')
return bs.getDefaultCountry()
def getStates(self, country):
items = []
if not country:
return items
# get ISO code for country
iso = [c for c in COUNTRIES if c['Country'] == country or c['ISO'] == country]
if not iso:
return items
iso = iso[0]['ISO']
items = [x for x in STATES if x[0] == iso]
items.sort(lambda x,y: cmp(x[2], y[2]))
return items
def getDistricts(self, country, state):
items = []
if not country or not state:
return items
# get ISO code for country
iso = [c for c in COUNTRIES if c['Country'] == country or c['ISO'] == country]
if not iso:
return items
iso = iso[0]['ISO']
# get NUMBER of the state for lookup
snr = [s for s in STATES if s[0] == iso and s[2] == state]
if not snr:
return items
snr = snr[0][1]
items = [x for x in DISTRICTS if x[0] == iso and x[1] == snr]
items.sort(lambda x,y: cmp(x[1], y[1]))
return items
registerWidget(AddressWidget,
title = 'Address Widget',
description = ('Simple address widget with country/state lookups'),
)
|
huaweiswitch/neutron | neutron/tests/unit/cisco/n1kv/test_n1kv_plugin.py | Python | apache-2.0 | 59,556 | 0.000487 | # Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob.exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron import context
import neutron.db.api as db
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.cisco.common import cisco_constants as c_const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.common import config as c_conf
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import extensions
from neutron.plugins.cisco.extensions import n1kv
from neutron.plugins.cisco.extensions import network_profile
from neutron.plugins.cisco.extensions import policy_profile
from neutron.plugins.cisco.n1kv import n1kv_client
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.cisco.n1kv import fake_client
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import test_l3_schedulers
PHYS_NET = 'some-phys-net'
VLAN_MIN = 100
VLAN_MAX = 110
class FakeResponse(object):
"""
This object is returned by mocked requests lib instead of normal response.
Initialize it with the status code, header and buffer contents you wish to
return.
"""
def __init__(self, status, response_text, headers):
self.buffer = response_text
self.status_code = status
self.headers = headers
def json(self, *args, **kwargs):
return self.buffer
def _fake_setup_vsm(self):
"""Fake establish Communication with Cisco Nexus1000V VSM."""
self.agent_vsm = True
self._populate_policy_profiles()
class NetworkProfileTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
network_profile.RESOURCE_ATTRIBUTE_MAP)
return network_profile.Network_profile.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class PolicyProfileTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
policy_profile.RESOURCE_ATTRIBUTE_MAP)
return policy_profile.Policy_profile.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
_plugin_name = ('neutron.plugins.cisco.n1kv.'
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
tenant_id = "some_tenant"
DEFAULT_RESP_BODY = ""
DEFAULT_RESP_CODE = 200
DEFAULT_CONTENT_TYPE = ""
fmt = "json"
def _make_test_policy_profile(self, name='service_profile'):
"""
Create a policy profile record for testing purpose.
:param name: string representing the name of the policy profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
"""
uuid = test_api_v2._uuid()
profile = {'id': uuid,
'name': name}
return n1kv_db_v2.create_policy_profile(profile)
def _make_test_profile(self,
name='default_network_profile',
segment_type=c_const.NETWORK_TYPE_VLAN,
segment_range='386-400'):
"""
Create a profile record for testing purposes.
:param name: string representing the name of the network profile to
create. Default argument value chosen to correspond to the
default name specified in config.py file.
:param segment_type: string representing the type of network segment.
:param segment_range: string representing the segment range for network
profile.
"""
db_session = db.get_session()
profile = {'name': name,
'segment_type': segment_type,
'tenant_id': self.tenant_id,
'segment_range': segme | nt_range}
if segment_type == c_const.NETWORK_TYPE_OVERLAY:
profile['sub_type'] = 'unicast'
profile['multicast_ip_range'] = '0.0.0.0'
net_p = n1kv_db_v2.create_network_profile(db_session, profile | )
n1kv_db_v2.sync_vxlan_allocations(db_session, net_p)
elif segment_type == c_const.NETWORK_TYPE_VLAN:
profile['physical_network'] = PHYS_NET
net_p = n1kv_db_v2.create_network_profile(db_session, profile)
n1kv_db_v2.sync_vlan_allocations(db_session, net_p)
return net_p
def setUp(self, ext_mgr=NetworkProfileTestExtensionManager()):
"""
Setup method for n1kv plugin tests.
First step is to define an acceptable response from the VSM to
our requests. This needs to be done BEFORE the setUp() function
of the super-class is called.
This default here works for many cases. If you need something
extra, please define your own setUp() function in your test class,
and set your DEFAULT_RESPONSE value also BEFORE calling the
setUp() of the super-function (this one here). If you have set
a value already, it will not be overwritten by this code.
"""
if not self.DEFAULT_RESP_BODY:
self.DEFAULT_RESP_BODY = {
"icehouse-pp": {"properties": {"name": "icehouse-pp",
"id": "some-uuid-1"}},
"havana_pp": {"properties": {"name": "havana_pp",
"id": "some-uuid-2"}},
"dhcp_pp": {"properties": {"name": "dhcp_pp",
"id": "some-uuid-3"}},
}
# Creating a mock HTTP connection object for requests lib. The N1KV
# client interacts with the VSM via HTTP. Since we don't have a VSM
# running in the unit tests, we need to 'fake' it by patching the HTTP
# library itself. We install a patch for a fake HTTP connection class.
# Using __name__ to avoid having to enter the full module path.
http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request")
FakeHttpConnection = http_patcher.start()
# Now define the return values for a few functions that may be called
# on any instance of the fake HTTP connection class.
self.resp_headers = {"content-type": "application/json"}
FakeHttpConnection.return_value = (FakeResponse(
self.DEFAULT_RESP_CODE,
self.DEFAULT_RESP_BODY,
self.resp_headers))
# Patch some internal functions in a few other parts of the system.
# The |
milad-soufastai/ansible-modules-extras | cloud/openstack/os_user_role.py | Python | gpl-3.0 | 6,078 | 0.000987 | #!/usr/bin/python
# Copyright (c) 2016 IBM
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_user_role
short_description: Associate OpenStack Identity users and roles
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
version_added: "2.1"
description:
- Grant and revoke roles in either project or domain context for
OpenStack Identity Users.
options:
role:
description:
- Name or ID for the role.
required: true
user:
description:
- Name or ID for the user. If I(user) is not specified, then
I(group) is required. Both may not be specified.
required: false
default: null
group:
description:
- Name or ID for the group. Valid only with keystone version 3.
If I(group) is not specified, then I(user) is required. Both
may not be specified.
required: false
default: null
project:
description:
- Name or ID of the project to scope the role assocation to.
If you are using keystone version 2, then this value is required.
required: false
default: null
domain:
description:
- ID of the domain to scope the role association to. Valid only with
keystone version 3, and required if I(project) is not specified.
required: false
default: null
state:
description:
- Should the roles be present or absent on the user.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Grant an admin role on the user admin in the project project1
- os_user_role:
cloud: mycloud
user: admin
role: admin
project: project1
# Revoke the admin role from the user barney in the newyork domain
- os_user_role:
cloud: mycloud
state: absent
user: barney
role: admin
domain: newyork
'''
RETURN = '''
#
'''
def _system_state_change(state, assignment):
if state == 'present' and not assignment:
return True
elif state == 'absent' and assignment:
return True
return False
def _build_kwargs(user, group, project, domain):
kwargs = {}
if user:
kwargs['user'] = user
if group:
kwargs['group'] = group
if project:
kwargs['project'] = project
if domain:
kwargs['domain'] = domain
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
role=dict(required=True),
user=dict(required=False),
group=dict(required=False),
project=dict(required=False),
domain=dict(required=False),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
required_one_of=[
['user', 'group']
])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
# role grant/revoke API introduced in 1.5.0
if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
module.fail_json(msg='shade 1.5.0 or higher is required for this module')
role = module.params.pop('role')
user = module.params.pop('user')
group = module.params.pop('group')
project = module.params.pop('project')
domain = module.params.pop('domain')
state = module.params.pop('state')
try:
cloud = shade.operator_cloud(**module.params)
filters = {}
r = cloud.get_role(role)
if r is None:
module.fail_json(msg="Role %s is not valid" % role)
filters['role'] = r['id']
if user:
u = cloud.get_user(user)
if u is None:
module.fail_json(msg="User %s is not valid" % user)
filters['user'] = u['id']
if group:
g = cloud.get_group(group)
if g is None:
module.fail_json(msg="Group %s is not valid" % group)
filters['group'] = g['id']
if project:
p = cloud.get_project(project)
if p is None:
module.fail_json(msg="Project %s is not valid" % project)
filters['project'] = p['id']
if domain:
d = cloud.get_domain(domain)
if d is None:
module.fail_json(msg="Domain %s is not valid" % domain)
filters['domain'] = d['id']
assignment = cloud.list_role_assignments(filters=filters)
if | module.check_mode:
module.exit_json(changed=_system_state_change(state, assignment))
changed = False
if state == 'present':
if not assignment:
kwargs = _build_kwargs(user, group, project, domain)
cloud.grant_role(role, **kwargs)
changed = True
elif state == 'absent':
if assignment:
kwargs = _build_kwargs(user, g | roup, project, domain)
cloud.revoke_role(role, **kwargs)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
KrisKennaway/pyapple2disk | src/apple2disk/process.py | Python | bsd-2-clause | 1,497 | 0.002672 | import disk
import dos33disk
import os
import sys
def main():
disks = {}
for root, dirs, files in os.walk(sys.argv[1]):
for f in files:
if not f.lower().endswith('.dsk') and not f.lower().endswith('.do'):
continue
print f
b = bytearray(open(os.path.join(root, f), 'r').read())
try:
img = disk.Disk(f, b)
except IOError:
continue
except AssertionError:
continue
# See if this is a DOS 3.3 disk
try:
img = dos33disk.Dos33Disk.Taste(img)
print "%s is a DOS 3.3 disk, volume %d" % (f, img.volume)
for fn in img.filenames:
f = img.files[fn]
print f.catalog_entry
if f.parsed_contents:
print f.parsed_contents
except IOError:
pass
e | xcept AssertionError:
pass
disks[f] = img
for ts, data in sorted(img.sectors.iteritems()):
print data
# Group disks by hash of boot1 sector
boot1_hashes = {}
for f, d in d | isks.iteritems():
boot1_hash = d.Boot1.hash
boot1_hashes.setdefault(boot1_hash, []).append(f)
for h, disks in boot1_hashes.iteritems():
print h
for d in sorted(disks):
print " %s" % d
if __name__ == "__main__":
main()
|
trabacus-softapps/docker-magicecommerce | additional_addons/product_pricelist_fixed_price/model/__init__.py | Python | agpl-3.0 | 909 | 0.0011 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in | the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a c | opy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import product_pricelist_item
|
yourlabs/django-autocomplete-light | test_project/select2_djhacker_formfield/urls.py | Python | mit | 446 | 0.004484 | from dal import autocomplete
from django.conf.urls import url
from .models import TModel
urlpatterns = [ |
url(
'test-autocomplete/$',
autocomplete.Select2QuerySetView.as_view(model=TModel),
name='select2_djhacker_formfield',
),
]
import djhacker
from django import forms
djhacker.formfield(
TModel.test,
forms.ModelChoiceField,
widget=autocomplete.ModelSelect2(url='select2 | _djhacker_formfield')
)
|
naelstrof/PugBot-Discord-Django | sample_secret_key.py | Python | mit | 64 | 0 | SECRET_KEY = 'KEY GENERATED BY KEY GEN FUNCTION IN sett | ings.p | y'
|
muffinresearch/solitude | lib/bango/client.py | Python | bsd-3-clause | 7,873 | 0.000127 | from datetime import datetime
import functools
import os
import uuid
from time import time
from django.conf import settings
from django_statsd.clients import statsd
from mock import Mock
from requests import post
from suds import client as sudsclient
from suds.transport import Reply
from suds.transport.http import HttpTransport
from solitude.logger import getLogger
from .constants import (ACCESS_DENIED, HEADERS_SERVICE, INTERNAL_ERROR,
SERVICE_UNAVAILABLE)
from .errors import AuthError, BangoError, BangoFormError, ProxyError
root = os.path.join(settings.ROOT, 'lib', 'bango', 'wsdl', settings.BANGO_ENV)
wsdl = {
'exporter': 'file://' + os.path.join(root, 'mozilla_exporter.wsdl'),
'billing': 'file://' + os.path.join(root, 'billing_configuration.wsdl'),
'direct': 'file://' + os.path.join(root, 'direct_billing.wsdl'),
}
# Add in the whitelist of supported methods here.
exporter = [
'AcceptSBIAgreement',
'CreateBangoNumber',
'CreateBankDetails',
'CreatePackage',
'DeleteVATNumber',
'GetAcceptedSBIAgreement',
'GetPackage',
'GetSBIAgreement',
'MakePremiumPerAccess',
'SetVATNumber',
'UpdateAddressDetails',
'UpdateFinanceEmailAddress',
'UpdateRating',
'UpdateSupportEmailAddress',
]
billing = [
'CreateBillingConfiguration',
]
direct = [
'DoRefund',
'GetRefundStatus',
]
# Status codes from the proxy that raise an error and stop processing.
FATAL_PROXY_STATUS_CODES = (404, 500,)
# Turn the method into the approiate name. If the Bango WSDL diverges this will
# need to change.
def get_request(name):
return name + 'Request'
def get_response(name):
return name + 'Response'
def get_result(name):
return name + 'Result'
log = getLogger('s.bango')
class Client(object):
def __getattr__(self, attr):
for name, methods in (['exporter', exporter],
['billing', billing],
['direct', direct]):
if attr in methods:
return functools.partial(self.call, attr, wsdl=str(name))
raise AttributeError('Unknown request: %s' % attr)
def call(self, name, data, wsdl='exporter'):
client = self.client(wsdl)
package = client.factory.create(get_request(name))
for k, v in data.iteritems():
setattr(package, k, v)
package.username = settings.BANGO_AUTH.get('USER', '')
package.password = settings.BANGO_AUTH.get('PASSWORD', '')
# Actually call Bango.
with statsd.timer('solitude.bango.request.%s' % name.lower()):
response = getattr(client.service, name)(package)
self.is_error(response.responseCode, response.responseMessage)
return response
def client(self, name):
return sudsclient.Client(wsdl[name])
def is_error(self, code, message):
# Count the numbers of responses we get.
statsd.incr('solitude.bango.response.%s' % code.lower())
# If there was an error raise it.
if code == ACCESS_DENIED:
raise AuthError(ACCESS_DENIED, message)
# These are fatal Bango errors that the data can't really do much
# about.
elif code in (INTERNAL_ERROR, SERVICE_UNAVAILABLE):
raise BangoError(code, message)
# Assume that all other errors are errors from the data.
elif code != 'OK':
raise BangoFormError(code, message)
class Proxy(HttpTransport):
def send(self, request):
response = post(settings.BANGO_PROXY,
data=request.message,
headers={HEADERS_SERVICE: request.url},
verify=False)
if response.status_code in FATAL_PROXY_STATUS_CODES:
msg = ('Proxy returned: %s from: %s' %
(response.status_code, request.url))
log.error(msg)
raise ProxyError(msg)
return Reply(response.status_code, {}, response.content)
class ClientProxy(Client):
def client(self, name):
return sudsclient.Client(wsdl[name], transport=Proxy())
# Add in your mock method data here. If the method only returns a
# responseCode and a responseMessage, there's no need to add the method.
#
# Use of time() | for ints, mean that tests work and so do requests from the
# command line using mock. As long as you don't do them too fast.
ltime = | lambda: str(int(time() * 1000000))[8:]
mock_data = {
'CreateBangoNumber': {
'bango': 'some-bango-number',
},
'CreatePackage': {
'packageId': ltime,
'adminPersonId': ltime,
'supportPersonId': ltime,
'financePersonId': ltime
},
'UpdateSupportEmailAddress': {
'personId': ltime,
'personPassword': 'xxxxx',
},
'UpdateFinanceEmailAddress': {
'personId': ltime,
'personPassword': 'xxxxx',
},
'CreateBillingConfiguration': {
'billingConfigurationId': uuid.uuid4,
},
'GetAcceptedSBIAgreement': {
'sbiAgreementAccepted': True,
'acceptedSBIAgreement': '2013-01-23 00:00:00',
'sbiAgreementExpires': '2014-01-23 00:00:00'
},
'GetSBIAgreement': {
'sbiAgreement': 'Blah...',
# Although its a date, the WSDL has this as a date time.
'sbiAgreementValidFrom': '2010-08-31T00:00:00',
},
'DoRefund': {
'refundTransactionId': uuid.uuid4
},
'GetPackage': {
'adminEmailAddress': 'admin@email.com',
'supportEmailAddress': 'support@email.com',
'financeEmailAddress': 'finance@email.com',
'paypalEmailAddress': 'paypal@email.com',
'vendorName': 'Some Vendor',
'companyName': 'Some Company',
'address1': 'Address 1',
'address2': 'Address 2',
'addressCity': 'City',
'addressState': 'State',
'addressZipCode': '90210',
'addressPhone': '1234567890',
'addressFax': '1234567890',
'vatNumber': '1234567890',
'countryIso': 'BMU',
'currencyIso': 'EUR',
'homePageURL': 'http://mozilla.org',
'eventNotificationEnabled': False,
'eventNotificationURL': '',
'status': 'LIC',
'sbiAgreementAccepted': True,
'acceptedSBIAgreement': datetime.today,
'sbiAgreementExpires': datetime.today,
}
}
class ClientMock(Client):
def mock_results(self, key, data=None):
"""
Returns result for a key. Data can be passed in to override mock_data.
"""
result = data or mock_data.get(key, {}).copy()
for key, value in (['responseCode', 'OK'], ['responseMessage', '']):
if key not in result:
result[key] = value
return result
def call(self, name, data, wsdl=''):
"""
This fakes out the client and just looks up the values in mock_results
for that service.
"""
bango = dict_to_mock(self.mock_results(name), callables=True)
self.is_error(bango.responseCode, bango.responseMessage)
return bango
def response_to_dict(resp):
"""Converts a suds response into a dictionary suitable for JSON"""
return dict((k, getattr(resp, k)) for k in resp.__keylist__)
def dict_to_mock(data, callables=False):
"""
Converts a dictionary into a suds like mock.
callables: will call any value if its callable, default False.
"""
result = Mock()
result.__keylist__ = data.keys()
for k, v in data.iteritems():
if callables and callable(v):
v = v()
setattr(result, k, v)
return result
def get_client():
"""
Use this to get the right client and communicate with Bango.
"""
if settings.BANGO_MOCK:
return ClientMock()
if settings.BANGO_PROXY:
return ClientProxy()
return Client()
|
facebookresearch/ParlAI | parlai/crowdsourcing/utils/worlds.py | Python | mit | 3,648 | 0.001919 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.worlds import World
class CrowdDataWorld(World):
def prep_save_data(self, workers):
"""
This prepares data to be saved for later review, including chats from individual
worker perspectives.
"""
custom_data = self.get_custom_task_data()
save_data = {'custom_data': custom_data, 'worker_data': {}}
return save_data
def get_custom_task_data(self):
"""
This function should take the contents of whatever was collected during this
task that should be saved and return it in some format, preferably a dict
containing acts.
If you need some extraordinary data storage that this doesn't cover, you can
extend the ParlAIChatBlueprint and write your own ParlAIChatAgentState that
defines the behavior you want.
"""
# return {
# 'acts': [self.important_turn1, self.important_turn2]
# 'context': self.some_context_data_of_importance
# }
pass
class CrowdOnboardWorld(CrowdDataWorld):
"""
Generic world for onboarding a Turker and collecting information from them.
"""
def __init__(self, opt, agent):
"""
Init should set up resources for running the onboarding world.
"""
self.agent = agent
self.episodeDone = False
def parley(self):
"""
A parley should represent one turn of your onboarding task.
"""
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
"""
Clear up resources needed for this world.
"""
pass
class CrowdTaskWorld(CrowdDataWorld):
"""
Generic | world for Crowd tasks.
"""
def __init__(self, opt, agent):
"""
Init should set up resources for running the task world.
"""
| self.agent = agent
self.episodeDone = False
def parley(self):
"""
A parley should represent one turn of your task.
"""
self.episodeDone = True
def episode_done(self):
"""
A ParlAI-Mephisto task ends and allows workers to be marked complete when the
world is finished.
"""
return self.episodeDone
def shutdown(self):
"""
Should be used to free the world's resources and shut down the agents.
"""
self.agent.shutdown()
def review_work(self):
"""
Programmatically approve/reject this work. Doing this now (if possible) means
that you don't need to do the work of reviewing later on.
For example:
.. code-block:: python
mephisto_agent = self.agent.mephisto_agent
if self.response == '0':
mephisto_agent.reject_work(
'You rated our model's response as a 0/10 but we '
'know we\'re better than that'
)
else:
if self.response == '10':
mephisto_agent.pay_bonus(1, 'Thanks for a great rating!')
mephisto_agent.approve_work()
"""
# mephisto_agent = self.agent.mephisto_agent
# mephisto_agent.approve_work()
# mephisto_agent.reject_work()
# mephisto_agent.pay_bonus(1000) # Pay $1000 as bonus
# mephisto_agent.block_worker() # Block this worker from future work
pass
|
LighthouseHPC/lighthouse | sandbox/lily/django_orthg/orthg/database/writeUrl.py | Python | mit | 396 | 0.035354 | import csv
with open('guided_least.csv', 'rb') as csvfile:
spamreader = csv.rea | der(csvfile)
url = open('url.csv','w')
for row in spamreader:
url.write(row[0]+',')
url.write(row[1]+',')
url.write(row[2]+',')
url.write("/"+row[1]+row[2]+',')
url.write('\n')
print "/"+row[1]+row[2]+'.f'+', | '
|
EventGhost/EventGhost | plugins/TheaterTek/__init__.py | Python | gpl-2.0 | 31,794 | 0.008178 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Ralph Eisenbach
#
# This plugin is based on the plugin for ZoomPlayer
# by Lars-Peter Voss <bitmonster@eventghost.net>
#
# This file is a plugin for EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
eg.RegisterPlugin(
name = "TheaterTek",
author = "SurFan",
version = "0.0.1",
kind = "program",
guid = "{EF830DA5-EF08-4050-BAE0-D5FC0057D149}",
canMultiLoad = True,
createMacrosOnAdd = True,
description = (
'Adds actions to control <a href="http://www.theatertek.com/">TheaterTek</a>.'
'\n\n<p><b>Notice:</b><br>'
'To make it work, you have to enable TCP control in TheaterTek. '
),
url = "http://www.eventghost.net/forum/viewtopic.php?t=559",
icon = (
"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACGElEQVR42m1RPWhaURQ+"
"gg6lCjooxnZIH9iARAi9QqwQTcBmioNQcWskwxV5i+AgDqHERZdsEfreUE3BDFJI4XUq"
"EeqeizjUnw6CKO1SUJ4lWKR5PXotjaZ3eefv+873nafT6/XT6dRkMp1+PgUA0Svy9Ozs"
"zfY2cbvdmLpcri/VOsyfDgiAxGP8SpgyxmSQFyUGQAjE2a0yWQJQoBL5i+MNWbeIcCAO"
"qwCNaLMkPhsilFyTa9zjiXtmXQeAcg9AGEoB5mBwAChHk7TBYBAIBNLpNJYvUhfq1x/L"
"Hti/7Yebh6VSCekbbxvomM+dn5df7b9cNY3ckWGkUqkgq9fgxUI2m81kMni0VqtVr9cP"
"PPt3NjCghEpUUhTl5ONJ0BLsdrsIlmVZFEWn09lsNtHYHEABvoO0JlFKY7FYuVxGbi4m"
"l8uFw+F8Pu/z+bCL4DkgBHRtxo0TuH0ymdjt9uMPxxs/N7BSLBbREpeMyxcA7bUGyw9t"
"7Jp2G41Gv9/vdDpcVTKZ5JIIxcMCE64ESzCIw8OrYdfSxTLqsVqttVotkUiYzeZQKKQz"
"Go3j8dhgMBwVjrZ+b/V6PVSMqd/vr1arGHAzKAan2+227vbb5K6Sd5/er68/xlMiIJVK"
"CYJgs9kikQiy4ImeOTZXARyzs/McR1VVLRQKaGBv7wWy+J/O/sx/APjGD39dXio3NyrG"
"o9EoGo0+efCIt/4ArUT50E11E2MAAAAASUVORK5CYII="
),
)
# ===================================================================
# TheaterTek TCP/IP Interface
# ===================================================================
"""\
IP COMMANDS
-----------
TT->AP Sent from TT to client application
AP->TT Sent from client application to TT
TT<-->AP Sent from TT and can be polled by client.
Commands are sent ASCII in the form:
4 byte command, space, {parameter} CRLF
A successful command returns:
Command, space, 0
OR
Command, space, response
An unsuccessful command returns:
Command, space, -1
Example:
0000 // Client app
0000 TheaterTek DVD // Returned value
Enum values
-----------
IP_MEDIASTATE 0=Stopped/NoMedia, 1=Playing, 2=paused, 3=FF, 4=RW
IP_FULLSCREEN 0=Minimized, 1=Windowed, 2=Fullscreen
IP_GETPRIVATE Allows client to set/get a private string up to 1024 bytes on TT. This data persists as long as TT is running.
#define IP_APPLICATION 0 // TT<-->AP Application name
#define IP_VERSION 1 // TT<-->AP Application version
#define IP_FLASH 500 // TT<-->AP OSD Flash message
#define IP_FULLSCREEN 510 // TT<-->AP Fullscreen/windowed status
#define IP_MEDIASTATE 1000 // TT<-->AP State enum
#define IP_MEDIATIME 1010 // TT<-->AP Media time (hh:mm:ss / hh:mm:ss)
#define IP_MEDIAPOS 1020 // AP->TT Set media time (hh:mm:ss)
#define IP_ENDOFMEDIA 1030 // TT->AP Signals end of media
#define IP_FORMAT 1040 // TT->AP (0=NTSC, 1=PAL)
#define IP_GETAR 1300 // TT<-->AP Return Current AR (name)
#define IP_ARCOUNT 1310 // AP->TT AR Count
#define IP_ARNAMES 1320 // AP->TT AR Names (name|name)
#define IP_SETAR 1330 // AP->TT Set Current AR (number)
#define IP_CURFILE 1400 // TT<-->AP Current file
#define IP_DISKINSERTION 1410 // TT->AP Disk inserted
#define IP_DISKEJECTION 1420 // TT->AP Disk ejected
#define IP_DVDUNIQUEID 1500 // AP->TT DVD unique ID
#define IP_DVDTITLE 1510 // TT<-->AP Current Title
#define IP_DVDTITLECOUNT 1520 // AP->TT Title count
#define IP_DVDPLAYTITLE 1530 // AP->TT Play Title
#define IP_DVDCHAPTER 1600 // TT<-->AP Current Chapter
#define IP_DVDCHAPTERCOUNT 1610 // AP->TT Chapter count
#define IP_DVDPLAYCHAPTER 1620 // AP->TT Play chapter
#define IP_DVDPLAYTITCHAP 1630 // AP->TT Play Chapter in Title (Chapter Title)
#define IP_DVDAUDIO 1700 // TT<-->AP Current audio stream
#define IP_DVDSETAUDIO 1710 // AP->TT Set audio stream
#define IP_DVDAUDIOCOUNT 1720 // AP->TT Audio stream count
#define IP_DVDAUDIONAMES 1730 // AP->TT Audio stream names (name|name)
#define IP_DVDSUBTITLE 1800 // TT<-->AP Current subtitle stream
#define IP_DVDSETSUBTITLE 1810 // AP->TT Set subtitle stream, -1 to disable
#define IP_DVDSUBTITLECOUNT 1820 // AP->TT Subtitle stream count
#define IP_DVDSUBTITLENAMES 1830 // AP->TT Subtitle names (name|name)
#define IP_DVDANGLE 1900 // TT<-->AP Current angle
#define IP_DVDSETANGLE 1910 // AP->TT Set angle
#define IP_DVDANGLECOUNT 1920 // AP->TT Angle count
#define IP_DVDMENUMODE 2000 // TT<-->AP Menu mode
#define IP_DOMAIN 2010 // TT->AP DVD Domain
#define IP_GETVOLUME 2100 // TT<-->AP Get Current volume
#define IP_SETVOLUME 2110 // AP->TT Set Current volume
#define IP_GETAUDIOOUTPUT 2120 // AP->TT Get Current audio output
#define IP_SETAUDIOOUTPUT 2130 // AP->TT Set audio output
#define IP_ADDBOOKMARK 2200 // AP->TT Add a bookmark
#define IP_NEXTBOOKMARK 2210 // AP->TT Next bookmark
#define IP_PREVBOOKMARK 2220 // AP->TT Previous bookmark
#define IP_PLAYFILE 3000 // AP->TT Play file
#define IP_ADDFILE 3010 // AP->TT Add file to playlist
#define IP_CLEARLIST 3020 // AP->TT Clear playlist
#define IP_GETINDEX 3030 // AP->TT Current item index
#define IP_PLAYATINDEX 3040 // AP->TT Play item at index
#define IP_GETL | ISTCOUNT 3050 // AP->TT Current list count
#define IP_GETLIST 3060 // AP->TT Get playlist (name|name)
#define IP_DELATINDEX 3070 // AP->TT Delete file at index
#define IP_SETPRIVATE 4000 // AP->TT Private app | string
#define IP_GETPRIVATE 4010 // AP->TT Private app string
#define IP_WM_COMMAND 5000 // AP->TT Internal command
#define IP_KEYPRESS 5010 // AP->TT Key code
#define IP_SENDMSG 5020 // AP->TT Send message
#define IP_POSTMSG 5030 // AP->TT Post message
Auto Killer Commands
--------------------
#define IP_LAUNCH 8000 // AP->AK
#define IP_QUIT 8010 // AP->AK
#define IP_MOUNTDISK 8020 // AP->AK Changer#, Slot#
#define IP_UNMOUNTDISK 8030 // AP->AK Changer# ->Slot#
#define IP_EJECTDISK 8040 // AP->AK Changer#, Slot#
#define IP_GETSLOTDATA 8050 // AP->AK Changer#, Slot#
#define IP_GETDRIVEDATA 8060 // AP->AK Changer# ->DriveData
#define IP_CHECKCHANGED 8070 // AP->AK
#define IP_REBUILDDATA 8080 // AP->AK
#define IP_DATACHANGED 8100 // AK->AP Notification of data change
#define IP_COUNTCHANGERS 8110 // AP->AK
WM_COMMANDS
-----------
#define ID_PLAY 32771
#define ID_STOP 32772
#define ID_PAUSE 32773
#define ID_NEXT 32774
#define ID_PREVIOUS 32775
#define ID_EXIT 32776
#define ID_FF 32777
#define ID_RW 32778
#define ID_MENU_LIST 32779
#define ID_TITLE_MENU 32780
#define ID_FF_1X 32782
#define ID_FF_2X 32784
#define ID_FF_5X 32785
#define ID_FF_10X 32786
#define ID_FF_20X 32787
#define ID_FF_SLOW 32788
#define ID_RW_1X 32790
#define ID_RW_2X 32791
#define ID_RW_5X 32792
#define ID_RW_10X 32793
#define ID_RW_20X 32794
|
dims/nova | nova/cells/state.py | Python | apache-2.0 | 18,615 | 0.000269 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import collections
import copy
import datetime
import functools
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova.cells import rpc_driver
import nova.conf
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova import rpc
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = {k: v for k, v in six.iteritems(cell_db_info)
if k != 'name'}
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url = rpc.get_transport_url(self.db_info['transport_url'])
if url.hosts:
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = getattr(url.hosts[0], field)
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_before(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self._cell_data_sync()
return f(self, *args, **kwargs)
return wrapper
def sync_after(f):
"""Use as a decorator to wrap methods that update cell information
in the database to make sure the data is synchronized immediately.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self._cell_data_sync(force=True)
return result
return wrapper
_unset = object()
class CellStateManager(base.Base):
def __new__(cls, cell_state_cls=None, cells_config=_unset):
if cls is not CellStateManager:
return super(CellStateManager, cls).__new__(cls)
if cells_config is _unset:
cells_config = CONF.cells.cells_config
if cells_config:
return CellStateManagerFile(cell_state_cls)
return CellStateManagerDB(cell_state_cls)
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self.servicegroup_api = servicegroup.API()
attempts = 0
while True:
try:
self._cell_data_sync(force=True)
break
except db_exc.DBError:
attempts += 1
if attempts > 120:
raise
LOG.exception(_LE('DB error'))
time.sleep(30)
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh | _cells_from_dict(self, db_cells_dict):
"""Make our cell info map match the db."""
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
| is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, ctxt=None):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can build for
every distinct memory or disk requirement that we have based on
instance types. This number is computed by looking at room available
on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_tha |
tchellomello/home-assistant | homeassistant/components/heos/media_player.py | Python | apache-2.0 | 12,782 | 0.000469 | """Denon HEOS Media Player."""
from functools import reduce, wraps
import logging
from operator import ior
from typing import Sequence
from pyheos import HeosError, const as heos_const
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
DOMAIN,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_URL,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from .const import DATA_SOURCE_MANAGER, DOMAIN as HEOS_DOMAIN, SIGNAL_HEOS_UPDATED
BASE_SUPPORTED_FEATURES = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
)
PLAY_STATE_TO_STATE = {
heos_const.PLAY_STATE_PLAY: STATE_PLAYING,
heos_const.PLAY_STATE_STOP: STATE_IDLE,
heos_const.PLAY_STATE_PAUSE: STATE_PAUSED,
}
CONTROL_TO_SUPPORT = {
heos_const.CONTROL_PLAY: SUPPORT_PLAY,
heos_const.CONTROL_PAUSE: SUPPORT_PAUSE,
heos_const.CONTROL_STOP: SUPPORT_STOP,
heos_const.CONTROL_PLAY_PREVIOUS: SUPPORT_PREVIOUS_TRACK,
heos_const.CONTROL_PLAY_NEXT: SUPPORT_NEXT_TRACK,
}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
):
"""Add media players for a config entry."""
players = hass.data[HEOS_DOMAIN][DOMAIN]
devices = [HeosMediaPlayer(player) for player in players.values()]
async_add_entities(devices, True)
def log_command_error(command: str):
"""Return decorator that logs command failure."""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
await func(*args, **kwargs)
except (HeosError, ValueError) as ex:
_LOGGER.error("Unable to %s: %s", command, ex)
return wrapper
return decorator
class HeosMediaPlayer(MediaPlayerEntity):
"""The HEOS player."""
def __init__(self, player):
"""Initialize."""
self._media_position_updated_at = None
self._player = player
self._signals = []
self._supported_features = BASE_SUPPORTED_FEATURES
self._source_manager = None
async def _player_update(self, player_id, event):
"""Handle player attribute updated."""
if self._player.player_id != player_id:
return
if event == heos_const.EVENT_PLAYER_NOW_PLAYING_PROGRESS:
self._media_position_updated_at = utcnow()
await self.async_update_ha_state(True)
async def _heos_updated(self):
"""Handle sources changed."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Device added to hass."""
# Update state when attributes of the player change
self._signals.append(
self._player.heos.dispatcher.connect(
heos_const.SIGNAL_PLAYER_EVENT, self._player_update
)
)
# Update state when heos changes
self._signals.append(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_HEOS_UPDATED, self._heos_updated
)
)
@log_command_error("clear playlist")
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._player.clear_queue()
@log_command_error("pause")
async def async_media_pause(self):
"""Send pause command."""
await self._player.pause()
@log_command_error("play")
async def async_media_play(self):
"""Send play command."""
await self._player.play()
@log_command_error("move to previous track")
async def async_media_previous_track(self):
"""Send previous track command."""
await self._player.play_previous()
@log_command_error("move to next track")
async def async_media_next_track(self):
"""Send next track command."""
await self._player.play_next()
@log_command_error("stop")
async def async_media_stop(self):
"""Send stop command."""
await self._player.stop()
@log_command_error("set mute")
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self._player.set_mute(mute)
@log_command_error("play media")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if media_type in (MEDIA_TYPE_URL, MEDIA_TYPE_MUSIC):
await self._player.play_url(media_id)
return
if media_type == "quick_select":
# media_id may be an int or a str
selects = await self._player.get_quick_selects()
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(index for index, select in selects.items() if select == media_id),
None,
)
if index is None:
raise ValueError(f"Invalid quick select '{media_id}'")
await self._player.play_quick_select(index)
return
if media_type == MEDIA_TYPE_PLAYLIST:
playlists = await self._player.heos.get_playlists()
playlist = next((p for p in playlists if p.name == media_id), None)
if not playlist:
raise ValueError(f"Invalid playlist '{media_id}'")
add_queue_option = (
heos_const.ADD_QUEUE_ADD_TO_END
if kwargs.get(ATTR_MEDIA_ENQUEUE)
else heos_const.ADD_QUEUE_REPLACE_AND_PLAY
)
await self._player.add_to_queue(playlist, add_queue_option)
return
if media_type == "favorite":
# media_id may be an int or str
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(
index
for index, favorite in self._source_manager.favorites.items()
if favorite.name == media_id
),
None,
)
if index is None:
raise ValueError(f"Invalid favorite '{media_id}'")
await self._player.play_favorite(index)
return
raise ValueError(f"Unsupported media type '{media_type}'")
@log_command_error("select source")
async def async_select_source(self, source):
"""Select input source."""
await self._source_manager.play_source(source, self._player)
@log_command_error("set shuffle")
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._player.set_play_mode(self._player.repeat, shuffle)
@log_command_error("set volume level")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._play | er.set_volume(int(volume * 1 | 00))
async def async_update(self):
"""Update supported features of the player."""
controls = self._player.now_playing_media.supported_controls
current_support = [CONTROL_TO_SUPPORT[control] for control in controls]
self._supported_features = reduce(ior, current_support, BASE_SUPPORTED_FEATURES)
if self._source_manager is None:
self._source_manager = self.hass.data[HEOS_DOMAIN][DATA_SOURCE_MANAGER]
async def async_will_remove_from_hass(self):
"""Disconnect the device when removed."""
for si |
KaiRo-at/socorro | alembic/versions/32b54dec3fc0_fixes_bug_970406_add_raw_adi_logs_table.py | Python | mpl-2.0 | 1,194 | 0.008375 | """Fixes bug 970406 - add raw_adi_logs table
Revision ID: 32b54dec3fc0
Revises: 1ab8d5514ce2
Create Date: 2014-06-12 11:47:19.398882
"""
# revision identifiers, used by Alembic.
revision = '32b54dec3fc0'
down_revision = '1ef041dfc3d5'
from alembic import op
from socorrolib.lib import citexttype, jsontype, buildtype
from socorrolib.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op | .create_table('raw_adi_logs',
sa.Column('report_date', sa.DATE(), nullable=True),
sa.Column('product_name', sa.TEXT(), nullable=True),
sa.Column('product_os_platform', sa.TEXT(), nullable=True),
sa.Column('product_os_version', sa.TEXT(), nullable=True),
sa.Column('product_version', sa.TEXT(), nullable=True),
sa.Column('build', sa.TEXT(), nu | llable=True),
sa.Column('build_channel', sa.TEXT(), nullable=True),
sa.Column('product_guid', sa.TEXT(), nullable=True),
sa.Column('count', sa.INTEGER(), nullable=True)
)
def downgrade():
op.drop_table('raw_adi_logs')
|
simongibbons/numpy | numpy/core/_type_aliases.py | Python | bsd-3-clause | 7,272 | 0.002338 | """
Due to compatibility, numpy has a very large number of different naming
conventions for the scalar types (those subclassing from `numpy.generic`).
This file produces a convoluted set of dictionaries mapping names to types,
and sometimes other mappings too.
.. data:: allTypes
A dictionary of names to types that will be exposed as attributes through
``np.core.numerictypes.*``
.. data:: sctypeDict
Similar to `allTypes`, but maps a broader set of aliases to their types.
.. data:: sctypes
A dictionary keyed by a "type group" string, providing a list of types
under that group.
"""
from numpy.compat import unicode
from numpy.core._string_helpers import english_lower
from numpy.core.multiarray import typeinfo, dtype
from numpy.core._dtype import _kind_name
sctypeDict = {} # Contains all leaf-node scalar types with aliases
allTypes = {} # Collect the types we will add to the module
# separate the actual type info from the abstract base classes
_abstract_types = {}
_concrete_typeinfo = {}
for k, v in typeinfo.items():
# make all the keys lowercase too
k = english_lower(k)
if isinstance(v, type):
_abstract_types[k] = v
else:
_concrete_typeinfo[k] = v
_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
def _bits_of(obj):
try:
info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
except StopIteration:
if obj in _abstract_types.values():
msg = "Cannot count the bits of an abstract type"
raise ValueError(msg) from None
# some third-party type - make a best-guess
return dtype(obj).itemsize * 8
else:
return info.bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
bits = _bits_of(obj)
dt = dtype(obj)
char = dt.kind
base = _kind_name(dt)
if base == 'object':
bits = 0
if bits != 0:
char = "%s%d" % (char, bits // 8)
return base, bits, char
def _add_types():
for name, info in _concrete_typeinfo.items():
# define C-name and insert typenum and typechar references also
allTypes[name] = info.type
sctypeDict[name] = info.type
sctypeDict[info.char] = info.type
sctypeDict[info.num] = info.type
for name, cls in _abstract_types.items():
allTypes[name] = cls
_add_types()
# This is the priority order used to assign the bit-sized NPY_INTxx names, which
# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
# consistent.
# If two C types have the same size, then the earliest one in this list is used
# as the sized name.
_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
_uint_ctypes = list('u' + t for t in _int_ctypes)
def _add_aliases():
for name, info in _concrete_typeinfo.items():
# these are handled by _add_integer_aliases
if name in _int_ctypes or name in _uint_ctypes:
continue
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(info.type)
myname = "%s%d" % (base, bit)
# ensure that (c)longdouble does not overwrite the aliases assigned to
# (c)double
if name in ('longdouble', 'clongdouble') and myname in allTypes:
continue
allTypes[myname] = info.type
# add mapping for both the bit name and the numarray name
sctypeDict[myname] = info.type
# add forward, reverse, and string mapping to numarray
sctypeDict[char] = info.type
_add_aliases()
def _add_integer_aliases():
seen_bits = set()
for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
i_info = _concrete_typeinfo[i_ctype]
u_info = _concrete_typeinfo[u_ctype]
bits = i_info.bits # same for both
for info, charname, intname in [
(i_info,'i%d' % (bits//8,), 'int%d' % bits),
(u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
if bits not in seen_bits:
# sometimes two different types have the same number of bits
# if so, the one iterated over first takes precedence
allTypes[intname] = info.type
sctypeDict[intname] = info.type
sctypeDict[charname] = info.type
seen_bits.add(bits)
_add_integer_aliases()
# We use these later
void = allTypes['void']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('bytes_', 'string'),
('string_', 'string'),
('str_', 'unicode'),
('unicode_', 'unicode'),
('object_', 'object')]
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'int', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta',
'bytes', 'str']
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, bytes, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
in | dx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
# Add additional strings to the sctypeDict
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', ('a', 'bytes_')]
for name in _t | oadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = allTypes[name[1]]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
|
StructuralNeurobiologyLab/SyConn | syconn/proc/graphs.py | Python | gpl-2.0 | 26,039 | 0.001613 | # -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max Planck Institute of Neurobiology, Martinsried, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import itertools
from typing import List, Any, Optional, TYPE_CHECKING
import networkx as nx
import numpy as np
import tqdm
from knossos_utils.skeleton import Skeleton, SkeletonAnnotation, SkeletonNode
from scipy import spatial
if TYPE_CHECKING:
from ..reps.super_segmentation import SuperSegmentationObject
from .. import global_params
from ..mp.mp_utils import start_multiprocess_imap as start_multiprocess
def bfs_smoothing(vertices, vertex_labels, max_edge_length=120, n_voting=40):
"""
Smooth vertex labels by applying | a majority vote on a
BFS subset of nodes for every node in the graph
Parameters
Args:
vertices: np.array
N, 3
vertex_labels: np.array
N, 1
max_edge_length: float
maximum distance between vertices to consider them connected in the
graph
n_voting: int
Number of collected nodes during BFS used for majority vote
Returns: np.array
smoothed vertex labels
| """
G = create_graph_from_coords(vertices, max_dist=max_edge_length, mst=False,
force_single_cc=False)
# create BFS subset
bfs_nn = split_subcc(G, max_nb=n_voting, verbose=False)
new_vertex_labels = np.zeros_like(vertex_labels)
for ii in range(len(vertex_labels)):
curr_labels = vertex_labels[bfs_nn[ii]]
labels, counts = np.unique(curr_labels, return_counts=True)
majority_label = labels[np.argmax(counts)]
new_vertex_labels[ii] = majority_label
return new_vertex_labels
def split_subcc(g, max_nb, verbose=False, start_nodes=None):
"""
Creates subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Graph
max_nb: int
verbose: bool
start_nodes: iterable
node ID's
Returns: dict
"""
subnodes = {}
if verbose:
nb_nodes = g.number_of_nodes()
pbar = tqdm.tqdm(total=nb_nodes, leave=False)
if start_nodes is None:
iter_ixs = g.nodes()
else:
iter_ixs = start_nodes
for n in iter_ixs:
n_subgraph = [n]
nb_edges = 0
for e in nx.bfs_edges(g, n):
n_subgraph.append(e[1])
nb_edges += 1
if nb_edges == max_nb:
break
subnodes[n] = n_subgraph
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return subnodes
def chunkify_contiguous(l, n):
"""Yield successive n-sized chunks from l.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_subcc_join(g: nx.Graph, subgraph_size: int, lo_first_n: int = 1) -> List[List[Any]]:
"""
Creates a subgraph for each node consisting of nodes until maximum number of
nodes is reached.
Args:
g: Supervoxel graph
subgraph_size: Size of subgraphs. The difference between `subgraph_size` and `lo_first_n` defines the
supervoxel overlap.
lo_first_n: Leave out first n nodes: will collect `subgraph_size` nodes starting from center node and then
omit the first lo_first_n nodes, i.e. not use them as new starting nodes.
Returns:
"""
start_node = list(g.nodes())[0]
for n, d in dict(g.degree).items():
if d == 1:
start_node = n
break
dfs_nodes = list(nx.dfs_preorder_nodes(g, start_node))
# get subgraphs via splicing of traversed node list into equally sized fragments. they might
# be unconnected if branch sizes mod subgraph_size != 0, then a chunk will contain multiple connected components.
chunks = list(chunkify_contiguous(dfs_nodes, lo_first_n))
sub_graphs = []
for ch in chunks:
# collect all connected component subgraphs
sg = g.subgraph(ch).copy()
sub_graphs += list((sg.subgraph(c) for c in nx.connected_components(sg)))
# add more context to subgraphs
subgraphs_withcontext = []
for sg in sub_graphs:
# add context but omit artificial start node
context_nodes = []
for n in list(sg.nodes()):
subgraph_nodes_with_context = []
nb_edges = sg.number_of_nodes()
for e in nx.bfs_edges(g, n):
subgraph_nodes_with_context += list(e)
nb_edges += 1
if nb_edges == subgraph_size:
break
context_nodes += subgraph_nodes_with_context
# add original nodes
context_nodes = list(set(context_nodes))
for n in list(sg.nodes()):
if n in context_nodes:
context_nodes.remove(n)
subgraph_nodes_with_context = list(sg.nodes()) + context_nodes
subgraphs_withcontext.append(subgraph_nodes_with_context)
return subgraphs_withcontext
def merge_nodes(G, nodes, new_node):
""" FOR UNWEIGHTED, UNDIRECTED GRAPHS ONLY
"""
if G.is_directed():
raise ValueError('Method "merge_nodes" is only valid for undirected graphs.')
G.add_node(new_node)
for n in nodes:
for e in G.edges(n):
# add edge between new node and original partner node
edge = list(e)
edge.remove(n)
paired_node = edge[0]
G.add_edge(new_node, paired_node)
for n in nodes: # remove the merged nodes
G.remove_node(n)
def split_glia_graph(nx_g, thresh, clahe=False, nb_cpus=1, pred_key_appendix=""):
"""
Split graph into glia and non-glua CC's.
Args:
nx_g: nx.Graph
thresh: float
clahe: bool
nb_cpus: int
pred_key_appendix: str
verbose: bool
Returns: list, list
Neuron, glia connected components.
"""
glia_key = "glia_probas"
if clahe:
glia_key += "_clahe"
glia_key += pred_key_appendix
glianess, size = get_glianess_dict(list(nx_g.nodes()), thresh, glia_key,
nb_cpus=nb_cpus)
return remove_glia_nodes(nx_g, size, glianess, return_removed_nodes=True)
def split_glia(sso, thresh, clahe=False, pred_key_appendix=""):
"""
Split SuperSegmentationObject into glia and non glia
SegmentationObjects.
Args:
sso: SuperSegmentationObject
thresh: float
clahe: bool
pred_key_appendix: str
Defines type of glia predictions
Returns: list, list (of SegmentationObject)
Neuron, glia nodes
"""
nx_G = sso.rag
nonglia_ccs, glia_ccs = split_glia_graph(nx_G, thresh=thresh, clahe=clahe,
nb_cpus=sso.nb_cpus, pred_key_appendix=pred_key_appendix)
return nonglia_ccs, glia_ccs
def create_ccsize_dict(g: nx.Graph, bbs: dict, is_connected_components: bool = False) -> dict:
"""
Calculate bounding box size of connected components.
Args:
g: Supervoxel graph.
bbs: Bounding boxes (physical units).
is_connected_components: If graph `g` already is connected components. If False,
``nx.connected_components`` is applied.
Returns:
Look-up which stores the connected component bounding box for every single node in the input Graph `g`.
"""
if not is_connected_components:
ccs = nx.connected_components(g)
else:
ccs = g
node2cssize_dict = {}
for cc in ccs:
# if ID is not in bbs, it was skipped due to low voxel count
curr_bbs = [bbs[n] for n in cc if n in bbs]
if len(curr_bbs) == 0:
raise ValueError(f'Could not find a single bounding box for connected component with IDs: {cc}.')
else:
curr_bbs = np.concatenate(curr_bbs)
cc_size = np.linalg.norm(np.max(curr_bbs, axis=0) -
np.min(curr_bbs, axis=0), ord=2)
for n in cc:
n |
starlure/google_python_class | babynames/babynames.py | Python | apache-2.0 | 2,697 | 0.021505 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical | order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
dict = {}
printlist = []
f = open(filename, 'rU')
totalstr = f.read()
matchyear = re.search('<h\d.*>Popularity\sin\s(\d\d\d\d)</h\d>',totalstr)
printlist.append(matchyear.group(1))
#return matchyear.group(1)
namerank = re.findall('<tr.*<td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>',totalstr)
#return namerank
for tuples in namerank:
if not tuples[1] in dict.keys():
dic | t.keys().extend(tuples[1])
dict[tuples[1]] = tuples[0]
if not tuples[2] in dict.keys():
dict.keys().extend(tuples[2])
dict[tuples[2]] = tuples[0]
#return sorted(dict.items())
for pairs in sorted(dict.items()):
printlist.append(pairs[0] + ' ' + pairs[1])
return printlist
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
if summary == True:
for filename in args:
f = open(filename + '.summary','w')
f.write('\n'.join(extract_names(filename)) + '\n')
f.close()
else:
for filename in args:
print '\n'.join(extract_names(filename)) + '\n'
if __name__ == '__main__':
main()
|
alan-mnix/MLFinalProject | pynlpl/formats/timbl.py | Python | gpl-2.0 | 4,614 | 0.014088 | ###############################################################
# PyNLPl - Timbl Classifier Output Library
# by Maarten van Gompel (proycon)
# http://ilk.uvt.nl/~mvgompel
# Induction for Linguistic Knowledge Research Group
# Universiteit van Tilburg
#
# Derived from code by Sander Canisius
#
# Licensed under GPLv3
#
# This library offers a TimblOutput class for reading Timbl
# classifier output. It supports full distributions (+v+db) and comment (#)
#
###############################################################
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import sys
if sys.version < '3':
from codecs import getwriter
stderr = getwriter('utf-8')(sys.stderr)
stdout = getwriter('utf-8')(sys.stdout)
else:
stderr = sys.stderr
stdout = sys.stdout
from pynlpl.statistics import Distribution
class TimblOutput(object):
"""A class for reading Timbl classifier output, supports the +v+db option and ignores comments starting with #"""
def __init__(self, stream, delimiter = ' ', ignorecolumns = [], ignorevalues = []):
self.stream = stream
self.delimiter = delimiter
self.ignorecolumns = ignorecolumns #numbers, ignore the specified FEATURE columns: first column is | 1
self.ignorevalues = ignorevalues #Ignore columns with the following values
de | f __iter__(self):
# Note: distance parsing (+v+di) works only if distributions (+v+db) are also enabled!
for line in self.stream:
endfvec = None
line = line.strip()
if line and line[0] != '#': #ignore empty lines and comments
segments = [ x for i, x in enumerate(line.split(self.delimiter)) if x not in self.ignorevalues and i+1 not in self.ignorecolumns ]
#segments = [ x for x in line.split() if x != "^" and not (len(x) == 3 and x[0:2] == "n=") ] #obtain segments, and filter null fields and "n=?" feature (in fixed-feature configuration)
if not endfvec:
try:
# Modified by Ruben. There are some cases where one of the features is a {, and then
# the module is not able to obtain the distribution of scores and senses
# We have to look for the last { in the vector, and due to there is no rindex method
# we obtain the reverse and then apply index.
aux=list(reversed(segments)).index("{")
endfvec=len(segments)-aux-1
#endfvec = segments.index("{")
except ValueError:
endfvec = None
if endfvec > 2: #only for +v+db
try:
enddistr = segments.index('}',endfvec)
except ValueError:
raise
distribution = self.parseDistribution(segments, endfvec, enddistr)
if len(segments) > enddistr + 1:
distance = float(segments[-1])
else:
distance = None
else:
endfvec = len(segments)
distribution = None
distance = None
#features, referenceclass, predictedclass, distribution, distance
yield segments[:endfvec - 2], segments[endfvec - 2], segments[endfvec - 1], distribution, distance
def parseDistribution(self, instance, start,end= None):
dist = {}
i = start + 1
if not end:
end = len(instance) - 1
while i < end: #instance[i] != "}":
label = instance[i]
try:
score = float(instance[i+1].rstrip(","))
dist[label] = score
except:
print("ERROR: pynlpl.input.timbl.TimblOutput -- Could not fetch score for class '" + label + "', expected float, but found '"+instance[i+1].rstrip(",")+"'. Instance= " + " ".join(instance)+ ".. Attempting to compensate...",file=stderr)
i = i - 1
i += 2
if not dist:
print("ERROR: pynlpl.input.timbl.TimblOutput -- Did not find class distribution for ", instance,file=stderr)
return Distribution(dist)
|
Zenohm/mafiademonstration | tests/test_stage.py | Python | mit | 336 | 0 | from unittest import TestCase
class TestStage(TestCase):
def tes | t_add_players(self):
self.fail()
def test_initialize_settings(self):
self.fail()
def test_initialize_players(self):
self.fail()
| def test_on_enter(self):
self.fail()
def test_on_pre_leave(self):
self.fail()
|
sebrandon1/tempest | tempest/api/orchestration/stacks/test_soft_conf.py | Python | apache-2.0 | 7,882 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class TestSoftwareConfig(base.BaseOrchestrationTest):
def setUp(self):
super(TestSoftwareConfig, self).setUp()
self.configs = []
# Add 2 sets of software configuration
self.configs.append(self._config_create('a'))
self.configs.append(self._config_create('b'))
# Create a deployment using config a's id
self._deployment_create(self.configs[0]['id'])
def _config_create(self, suffix):
configuration = {'group': 'script',
'inputs': [],
'outputs': [],
'options': {}}
configuration['name'] = 'heat_soft_config_%s' % suffix
configuration['config'] = '#!/bin/bash echo init-%s' % suffix
api_config = self.client.create_software_config(**configuration)
configuration['id'] = api_config['software_config']['id']
self.addCleanup(self._config_delete, configuration['id'])
self._validate_config(configuration, api_config)
return configuration
def _validate_config(self, configuration, api_config):
# Assert all expected keys are present with matching data
for k in configuration:
self.assertEqual(configuration[k],
api_config['software_config'][k])
def _deployment_create(self, config_id):
self.server_id = data_utils.rand_name('dummy-server')
self.action = 'ACTION_0'
self.status = 'STATUS_0'
self.input_values = {}
self.output_values = []
self.status_reason = 'REASON_0'
self.signal_transport = 'NO_SIGNAL'
self.deployment = self.client.create_software_deploy(
self.server_id, config_id, self.action, self.status,
self.input_values, self.output_values, self.status_reason,
self.signal_transport)
self.deployment_id = self.deployment['software_deployment']['id']
self.addCleanup(self._deployment_delete, self.deployment_id)
def _deployment_delete(self, deploy_id):
self.client.delete_software_deploy(deploy_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_deployment,
self.deployment_id)
def _config_delete(self, config_id):
self.client.delete_software_config(config_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_config, config_id)
@test.attr(type='smoke')
@test.idempotent_id('136162ed-9445-4b9c-b7fc-306af8b5da99')
def test_get_software_config(self):
"""Testing software config get."""
for conf in self.configs:
api_config = self.client.show_software_config(conf['id'])
self._validate_config(conf, api_config)
@test.attr(type='smoke')
@test.idempotent_id('1275c835-c967-4a2c-8d5d-ad533447ed91')
def test_get_deployment_list(self):
"""Getting a list of all deployments"""
deploy_list = self.client.list_software_deployments()
deploy_ids = [deploy['id'] for deploy in
deploy_list['software_deployments']]
self.assertIn(self.deployment_id, deploy_ids)
@test.attr(type='smoke')
@test.idempotent_id('fe7cd9f9-54b1-429c-a3b7-7df8451db913')
def test_get_deployment_metadata(self):
"""Testing deployment metadata get"""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
conf_ids = [conf['id'] for conf in metadata['metadata']]
self.assertIn(self.configs[0]['id'], conf_ids)
def _validate_deployment(self, action, status, reason, config_id):
deployment = self.client.show_software_deployment(self.deployment_id)
self.assertEqual(action, deployment['software_deployment']['action'])
self.assertEqual(status, deployment['software_deployment']['status'])
self.assertEqual(reason,
deployment['software_deployment']['status_reason'])
self.assertEqual(config_id,
deployment['software_deployment']['config_id'])
@test.attr(type='smoke')
@test.idempotent_id('f29d21f3-ed75-47cf-8cdc-ef1bdeb4c674')
def test_software_deployment_create_validate(self):
"""Testing software deployment was created as expected."""
# Asserting that | all fields were created
self.assert_fields_in_dict(
self.deployment['software_deployment'], 'action', 'config_id',
'id', 'input_values', 'output_values', 'server_id', 'status',
'status_reason')
# Testing get for this deployment and verifying parameters
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[0]['id'])
@test.attr(type='smoke')
@test.idempotent_i | d('2ac43ab3-34f2-415d-be2e-eabb4d14ee32')
def test_software_deployment_update_no_metadata_change(self):
"""Testing software deployment update without metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Updating values without changing the configuration ID
new_action = 'ACTION_1'
new_status = 'STATUS_1'
new_reason = 'REASON_1'
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[0]['id'],
new_action, new_status, self.input_values, self.output_values,
new_reason, self.signal_transport)
# Verifying get and that the deployment was updated as expected
self._validate_deployment(new_action, new_status,
new_reason, self.configs[0]['id'])
# Metadata should not be changed at this point
test_metadata = self.client.show_software_deployment_metadata(
self.server_id)
for key in metadata['metadata'][0]:
self.assertEqual(
metadata['metadata'][0][key],
test_metadata['metadata'][0][key])
@test.attr(type='smoke')
@test.idempotent_id('92c48944-d79d-4595-a840-8e1a581c1a72')
def test_software_deployment_update_with_metadata_change(self):
"""Testing software deployment update with metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[1]['id'],
self.action, self.status, self.input_values,
self.output_values, self.status_reason, self.signal_transport)
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[1]['id'])
# Metadata should now be changed
new_metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Its enough to test the ID in this case
meta_id = metadata['metadata'][0]['id']
test_id = new_metadata['metadata'][0]['id']
self.assertNotEqual(meta_id, test_id)
|
yspanchal/storm-example | petrel/petrel/generated/storm/Nimbus.py | Python | bsd-3-clause | 98,735 | 0.016104 | #
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
pass
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
pass
def killTopology(self, name):
"""
Parameters:
- name
"""
pass
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def activate(self, name):
"""
Parameters:
- name
"""
pass
def deactivate(self, name):
"""
Parameters:
- name
"""
pass
def rebalance(self, name, options):
"""
Parameters:
- name
- options
"""
pass
def beginFileUpload(self):
pass
def uploadChunk(self, location, chunk):
"""
Parameters:
- location
- chunk
"""
pass
def finishFileUpload(self, location):
"""
Parameters:
- location
"""
pass
def beginFileDownload(self, file):
"""
Parameters:
- file
"""
pass
def downloadChunk(self, id):
"""
Parameters:
- id
"""
pass
def getNimbusConf(self):
pass
def getClusterInfo(self):
pass
def getTopologyInfo(self, id):
"""
Parameters:
- id
"""
pass
def getTopologyConf(self, id):
"""
Parameters:
- id
"""
pass
def getTopology(self, id):
"""
Parameters:
- id
"""
pass
def getUserTopology(self, id):
"""
Parameters:
- id
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
"""
self.send_submitTopology(name, uploadedJarLocation, jsonConf, topology)
self.recv_submitTopology()
def send_submitTopology(self, name, uploadedJarLocation, jsonConf, topology):
self._oprot.writeMessageBegin('submitTopology', TMessageType.CALL, self._seqid)
args = submitTopology_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopology(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
"""
Parameters:
- name
- uploadedJarLocation
- jsonConf
- topology
- options
"""
self.send_submitTopologyWithOpts(name, uploadedJarLocation, jsonConf, topology, options)
self.recv_submitTopologyWithOpts()
def send_submitTopologyWithOpts(self, name, uploadedJarLocation, jsonConf, topology, options):
self._oprot.writeMessageBegin('submitTopologyWithOpts', TMessageType.CALL, self._seqid)
args = submitTopologyWithOpts_args()
args.name = name
args.uploadedJarLocation = uploadedJarLocation
args.jsonConf = jsonConf
args.topology = topology
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_submitTopologyWithOpts(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = submitTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
if result.ite is not None:
raise result.ite
return
def killTopology(self, name):
"""
Parameters:
- name
"""
self.send_killTopology(name)
self.recv_killTopology()
def send_killTopology(self, name):
self._oprot.writeMessageBegin('killTopology', TMessageType.CALL, self._seqid)
args = killTopology_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopology(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopology_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def killTopologyWithOpts(self, name, options):
"""
Parameters:
- name
- options
"""
self.send_killTopologyWithOpts(name, options)
self.recv_killTopologyWithOpts()
def send_killTopologyWithOpts(self, name, options):
self._oprot.writeMessageBegin('killTopologyWithOpts', TMessageType.CALL, self._seqid)
args = killTopologyWithOpts_args()
args.name = name
args.options = options
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_killTopologyWithOpts(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = killTopologyWithOpts_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def activate(self, name):
"""
Parameters:
- name
"""
self.send_activate(name)
self.recv_activate()
def send_activate(self, name):
self._oprot.writeMessageBegin('activate', TMessageType.CALL, self._seqid)
args = activate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_activate(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TAp | plicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = activate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def deactivate(self, name):
"""
Parameters:
- name
"""
self.send | _deactivate(name)
self.recv_deactivate()
def send_deactivate(self, name):
self._oprot.writeMessageBegin('deactivate', TMessageType.CALL, self._seqid)
args = deactivate_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deactivate(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = deactivate_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
def rebalance(self, name, options):
"""
Parameters:
- n |
OpenUpSA/contact-mps | manage.py | Python | mit | 253 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
| os.environ.setdefault("DJANGO_SETTINGS | _MODULE", "contactmps.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
SchrodingersGat/kicad-footprint-generator | scripts/Connector/Connector_Harwin/conn_harwin_m20-781xx45_smd_top_dual_row.py | Python | gpl-3.0 | 9,793 | 0.008475 | #!/usr/bin/env python3
'''
kicad-footprint-generator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kicad-footprint-generator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
'''
import sys
import os
#sys.path.append(os.path.join(sys.path[0],"..","..","kicad_mod")) # load kicad_mod path
# export PYTHONPATH="${PYTHONPATH}<path to kicad-footprint-generator directory>"
sys.path.append(os.path.join(sys.path[0], "..", "..", "..")) # load parent path of KicadModTree
from math import sqrt
import argparse
import yaml
#from helpers import *
from KicadModTree import *
sys.path.append(os.path.join(sys.path[0], "..", "..", "tools")) # load parent path of tools
from footprint_text_fields import addTextFields
series = 'M20'
series_long = 'Female Vertical Surface Mount Double Row 2.54mm (0.1 inch) Pitch PCB Connector'
manufacturer = 'Harwin'
datasheet = 'https://cdn.harwin.com/pdfs/M20-781.pdf'
# https://cdn.harwin.com/pdfs/Harwin_Product_Catalog_page_225.pdf
pn = 'M20-781{n:02}45'
number_of_rows = 2
orientation = 'V'
pitch = 2.54
peg_drill_tht = 1.02
mount_drill = 1.8
pad_size = [1.78, 1.02]
pincount_range = [2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15, 20]
def generate_footprint(pins, configuration):
mpn = pn.format(n=pins)
pins_per_row = pins
# handle arguments
orientation_str = configuration['orientation_options'][orientation]
footprint_name = configuration['fp_name_format_string'].format(man=manufacturer,
series=series,
mpn=mpn, num_rows=number_of_rows, pins_per_row=pins_per_row, mounting_pad = "",
pitch=pitch, orientation=orientation_str)
kicad_mod = Footprint(footprint_name)
kicad_mod.setDescription("Harwin {:s}, {:s}, {:d} Pins per row ({:s}), generated with kicad-footprint-generator".format(series_long, mpn, pins_per_row, datasheet))
kicad_mod.setTags(configuration['keyword_fp_string'].format(series=series,
orientation=orientation_str, man=manufacturer,
entry=configuration['entry_direction'][orientation]))
kicad_mod.setAttribute('smd')
########################## Dimensions ##############################
A = 2.54 * pins
B = 2.54 * (pins-1)
C = B - 2.54
body_edge={
'left': -2.54,
'right': 2.54,
'top': -A/2,
'bottom': A/2
}
############################# Pads ##################################
#
# Mount Pegs
#
if pins == 2:
kicad_mod.append(Pad(at=[0, 0], number="",
type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE, size=mount_drill,
drill=mount_drill, layers=Pad.LAYERS_NPTH))
else:
kicad_mod.append(Pad(at=[0, -C/2], number="",
type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE, size=mount_drill,
drill=mount_drill, layers=Pad.LAYERS_NPTH))
kicad_mod.append(Pad(at=[0, C/2], number="",
type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE, size=mount_drill,
drill=mount_drill, layers=Pad.LAYERS_NPTH))
#
# THT Pegs
#
kicad_mod.append(PadArray(start=[-1.27, -B/2], initial="", pincount=pins,
y_spacing=pitch, type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE,
size=peg_drill_tht, drill=peg_drill_tht, layers=Pad.LAYERS_NPTH))
kicad_mod.append(PadArray(start=[1.27, -B/2], initial="", pincount=pins,
y_spacing=pitch, type=Pad.TYPE_NPTH, shape=Pad.SHAPE_CIRCLE,
size=peg_drill_tht, drill=peg_drill_tht, layers=Pad.LAYERS_NPTH))
#
# Add pads
#
kicad_mod.append(PadArray(start=[-2.91, -B/2], initial=1,
pincount=pins, increment=1, y_spacing=pitch, size=pad_size,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=Pad.LAYERS_SMT))
kicad_mod.append(PadArray(start=[2.91, -B/2], initial=pins+1,
pincount=pins, increment=1, y_spacing=pitch, size=pad_size,
type=Pad.TYPE_SMT, shape=Pad.SHAPE_RECT, layers=Pad.LAYERS_SMT))
######################## Fabrication Layer ###########################
main_body_poly= [
{'x': body_edge['left'], 'y': body_edge['top']},
{'x': body_edge['right'], 'y': body_edge['top']},
{'x': body_edge['right'], 'y': body_edge['bottom']},
{'x': body_edge['left'], 'y': body_edge['bottom']},
{'x': body_edge['left'], 'y': body_edge['top']}
]
kicad_mod.append(PolygoneLine(polygone=main_body_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
main_arrow_poly= [
{'x': -2.54, 'y': body_edge['top'] + 1.27 - .4},
{'x': -1.9, 'y': body_edge['top'] + 1.27},
{'x': -2.54, 'y': body_edge['top'] + 1.27 + .4},
]
kicad_mod.append(PolygoneLine(polygone=main_arrow_poly,
width=configuration['fab_line_width'], layer="F.Fab"))
######################## SilkS Layer ###########################
poly_s_top= [
{'x': body_edge['left'] - configuration['silk_fab_offset'], 'y': body_edge['top'] - configuration['silk_fab_offset'] + .7},
{'x': body_edge['left'] - configuration['silk_fab_offset'], 'y': body_edge['top'] - configuration['silk_fab_offset']},
{'x': body_edge['right'] + configuration['silk_fab_offset'], 'y': body_edge['top'] - configuration['silk_fab_offset']},
{'x': body_edge['right'] + configuration['silk_fab_offset'], 'y': body_edge['top'] - configuration['silk_fab_offset'] + .7},
]
kicad_mod.append(PolygoneLine(polygone=poly_s_top,
width=configuration['silk_line_width'], layer="F.SilkS"))
poly_s_bot= [
{'x': body_edge['left'] - configuration['silk_fab_offset'], 'y': body_edge['bottom'] + configuration['silk_fab_offset'] - .7},
{'x': body_edge['left'] - configuration['silk_fab_offset'], 'y': body_edge['bottom'] + configuration['silk_fab_offset']},
{'x': body_edge['right'] + configuration['silk_fab_offset'], 'y': body_edge['bottom'] + configuration['silk_fab_offset']},
{'x': body_edge['right'] + configuration['silk_fab_offset'], 'y': body_edge['bottom'] + configuration['silk_fab_offset'] - .7},
]
kicad_mod.append(PolygoneLine(polygone=poly_s_bot,
width=configuration['silk_line_width'], layer="F.SilkS"))
| ######################## CrtYd Layer ###########################
CrtYd_offset = configuration['courtyard_offset']['connector']
CrtYd_grid = configuration['courtyard_grid']
poly_yd = [
{'x': -3.8 - | CrtYd_offset, 'y': body_edge['top'] - CrtYd_offset},
{'x': 3.8 + CrtYd_offset, 'y': body_edge['top'] - CrtYd_offset},
{'x': 3.8 + CrtYd_offset, 'y': body_edge['bottom'] + CrtYd_offset},
{'x': -3.8 - CrtYd_offset, 'y': body_edge['bottom'] + CrtYd_offset},
{'x': -3.8 - CrtYd_offset, 'y': body_edge['top'] - CrtYd_offset}
]
kicad_mod.append(PolygoneLine(polygone=poly_yd,
layer='F.CrtYd', width=configuration['courtyard_line_width']))
######################### Text Fields ###############################
cy1 = body_edge['top'] - configuration['courtyard_offset']['connector']
cy2 = body_edge['bottom'] + configuration['courtyard_offset']['connector'] + 0.2
addTextFields(kicad_mod=kicad_mod, configuration=configuration, body_edges=body_edge,
courtyard={'top':cy1, 'bottom':cy2}, fp_name=footprint_name, text_y_inside_position='top')
##################### Write to File and 3D ############################
model3d_path_prefix = configuration.get('3d_model_prefix','${KISYS3DMOD}/')
lib_name = configuration['lib_name_format_string'].format(series=series, man=manufacturer)
model_name = '{model3d_path_prefix:s}{lib_name:s}.3dshapes/{fp_name:s}.wrl'.format(
model3d_path_prefix=model3d_path_prefix, lib_name=lib_name, fp_name=fo |
SHS-ComputerScience/A-Level_2016-18 | 2. Exemplars/Factorial_Finder/factorial.py | Python | gpl-3.0 | 853 | 0.007034 | """ Factorial Finder
Name: Mr Gorman
Date: 01/12/2016
"""
def find_factorial_loop(n):
""" Function to find factorial using iteration.
For more info on range:
https://docs.python.org/3/library/stdtypes.html#range
"""
factorial = 1
for i in range(n, 0, -1): # alternatively, for i in rang | e(1, n + 1):
factorial *= i
return factorial
def find_factorial_recursive(n):
""" Function to find factorial using recursion.
For more info on recursion limit:
https://docs.python.org/3/library/sys.html#sys.getrecursionlimit
"""
if n == 0:
return 1
else:
return n * find_factorial_recursive(n - 1)
def main():
number = 5
print(find_factorial_loop(number))
print(find_factorial_recursive(number))
if __name__ == "__main__":
| main() |
ESOedX/edx-platform | openedx/core/djangoapps/site_configuration/context_processors.py | Python | agpl-3.0 | 626 | 0.003195 | """
Django template context processors.
"""
from __future__ import absolute_import
from | django.conf import settings
from django.utils.http import urlquote_plus
from openedx.core.djangoapps.site_configuratio | n import helpers as configuration_helpers
def configuration_context(request):
"""
Configuration context for django templates.
"""
return {
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME),
'current_url': urlquote_plus(request.build_absolute_uri(request.path)),
'current_site_url': urlquote_plus(request.build_absolute_uri('/')),
}
|
hfaran/slack-export-viewer | slackviewer/user.py | Python | mit | 2,188 | 0.002285 | # User info wrapper object
import logging
class User(object):
"""
Wrapper o | bject around an entry in users.json. Behaves like a read-only dictionary if
asked, but adds some useful logic to decouple the front end from the JSON structure.
"""
_NAME_KEYS = ["display_name", "real_name"]
_DEFAULT_IMAGE_KEY = "image_512"
def __init_ | _(self, raw_data):
self._raw = raw_data
def __getitem__(self, key):
return self._raw[key]
@property
def display_name(self):
"""
Find the most appropriate display name for a user: look for a "display_name", then
a "real_name", and finally fall back to the always-present "name".
"""
for k in self._NAME_KEYS:
if self._raw.get(k):
return self._raw[k]
if "profile" in self._raw and self._raw["profile"].get(k):
return self._raw["profile"][k]
return self._raw["name"]
@property
def email(self):
"""
Shortcut property for finding the e-mail address or bot URL.
"""
if "profile" in self._raw:
email = self._raw["profile"].get("email")
elif "bot_url" in self._raw:
email = self._raw["bot_url"]
else:
email = None
if not email:
logging.debug("No email found for %s", self._raw.get("name"))
return email
def image_url(self, pixel_size=None):
"""
Get the URL for the user icon in the desired pixel size, if it exists. If no
size is supplied, give the URL for the full-size image.
"""
if "profile" not in self._raw:
return
profile = self._raw["profile"]
if (pixel_size):
img_key = "image_%s" % pixel_size
if img_key in profile:
return profile[img_key]
return profile[self._DEFAULT_IMAGE_KEY]
def deleted_user(id):
"""
Create a User object for a deleted user.
"""
deleted_user = {
"id": id,
"name": "deleted-" + id,
"deleted": True,
"is_bot": False,
"is_app_user": False,
}
return User(deleted_user)
|
yippeecw/sfa | sfa/managers/slice_manager.py | Python | mit | 23,958 | 0.008598 | import sys
import time
import traceback
from StringIO import StringIO
from copy import copy
from lxml import etree
from sfa.trust.sfaticket import SfaTicket
from sfa.trust.credential import Credential
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, urn_to_hrn
from sfa.util.version import version_core
from sfa.util.callids import Callids
from sfa.util.cache import Cache
from sfa.client.multiclient import MultiClient
from sfa.rspecs.rspec_converter import RSpecConverter
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
from sfa.client.client_helper import sfa_to_pg_users_arg
from sfa.client.return_value import ReturnValue
class SliceManager:
# the cache instance is a class member so it survives across incoming requests
cache = None
def __init__ (self, config):
self.cache=None
if config.SFA_SM_CACHING:
if SliceManager.cache is None:
SliceManager.cache = Cache()
self.cache = SliceManager.cache
def GetVersion(self, api, options):
# peers explicitly in aggregates.xml
peers =dict ([ (peername,interface.get_url()) for (peername,interface) in api.aggregates.iteritems()
if peername != api.hrn])
version_manager = VersionManager()
ad_rspec_versions = []
request_rspec_versions = []
cred_types = [{'geni_type': 'geni_sfa', 'geni_version': str(i)} for i in range(4)[-2:]]
for rspec_version in version_manager.versions:
if rspec_version.content_type in ['*', 'ad']:
ad_rspec_versions.append(rspec_version.to_dict())
if rspec_version.content_type in ['*', 'request']:
request_rspec_versions.append(rspec_version.to_dict())
xrn=Xrn(api.hrn, 'authority+sm')
version_more = {
'interface':'slicemgr',
'sfa': 2,
'geni_api': 3,
'geni_api_versions': {'3': 'http://%s:%s' % (api.config.SFA_SM_HOST, api.config.SFA_SM_PORT)},
'hrn' : xrn.get_hrn(),
'urn' : xrn.get_urn(),
'peers': peers,
'geni_single_allocation': 0, # Accept operations that act on as subset of slivers in a given state.
'geni_allocate': 'geni_many',# Multiple slivers can exist and be incrementally added, including those which connect or overlap in some way.
'geni_credential_types': cred_types,
}
sm_version=version_core(version_more)
# local aggregate if present needs to have localhost resolved
if api.hrn in api.aggregates:
local_am_url=api.aggregates[api.hrn].get_url()
sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
return sm_version
def drop_slicemgr_stats(self, rspec):
try:
stats_elements = rspec.xml.xpath('//statistics')
for node in stats_elements:
node.getparent().remove(node)
except Exception, e:
logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
def add_slicemgr_stat(self, rspec, callname, aggname, elapsed, status, exc_info=None):
try:
stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
if stats_tags:
stats_tag = stats_tags[0]
else:
stats_tag = rspec.xml.root.add_element("statistics", call=callname)
stat_tag = stats_tag.add_element("aggregate", name=str(aggname),
elapsed=str(elapsed), status=str(status))
if exc_info:
exc_tag = stat_tag.add_element("exc_info", name=str(exc_info[1]))
# formats the traceback as one big text blob
#exc_tag.text = "\n".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
# formats the traceback as a set of xml elements
tb = traceback.extract_tb(exc_info[2])
for item in tb:
exc_frame = exc_tag.add_element("tb_frame", filename=str(item[0]),
line=str(item[1]), func=str(item[2]), code=str(item[3]))
except Exception, e:
logger.warn("add_slicemgr_stat failed on %s: %s" %(aggname, str(e)))
def ListResources(self, api, creds, options):
call_id = options.get('call_id')
if Callids().already_handled(call_id): return ""
version_manager = VersionManager()
def _ListResources(aggregate, server, credential, options):
forward_options = copy(options)
tStart = time.time()
try:
version = api.get_cached_server_version(server)
# force ProtoGENI aggregates to give us a v2 RSpec
forward_options['geni_rspec_version'] = options.get('geni_rspec_version')
result = server.ListResources(credential, forward_options)
return {"aggregate": aggregate, "result": result, | "elapsed": time.time()-tStart, "status": "success"}
except Exception, e:
api.logger.log_exc("ListResources failed at %s" %(server.url))
return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception", "exc_info": sys.exc_info()}
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
| (hrn, type) = urn_to_hrn(xrn)
if 'geni_compressed' in options:
del(options['geni_compressed'])
# get the rspec's return format from options
rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
version_string = "rspec_%s" % (rspec_version)
# look in cache first
cached_requested = options.get('cached', True)
if not xrn and self.cache and cached_requested:
rspec = self.cache.get(version_string)
if rspec:
api.logger.debug("SliceManager.ListResources returns cached advertisement")
return rspec
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
# attempt to use delegated credential first
cred = api.getDelegatedCredential(creds)
if not cred:
cred = api.getCredential()
multiclient = MultiClient()
for aggregate in api.aggregates:
# prevent infinite loop. Dont send request back to caller
# unless the caller is the aggregate's SM
if caller_hrn == aggregate and aggregate != api.hrn:
continue
# get the rspec from the aggregate
interface = api.aggregates[aggregate]
server = api.server_proxy(interface, cred)
multiclient.run(_ListResources, aggregate, server, [cred], options)
results = multiclient.get_results()
rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
if xrn:
result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
else:
result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
rspec = RSpec(version=result_version)
for result in results:
self.add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"],
result["status"], result.get("exc_info",None))
if result["status"]=="success":
res = result['result']['value']
try:
rspec.version.merge(ReturnValue.get_value(res))
except:
api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
# cache the result
if self.cache and not xrn:
api.logger.debug("SliceManager.ListResources caches advertisement")
self.cache.add |
VirusTotal/content | Packs/CortexXDR/Integrations/XDR_iocs/XDR_iocs.py | Python | mit | 17,908 | 0.00201 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import hashlib
import secrets
import string
import tempfile
from datetime import timezone
from typing import Dict, Optional, List, Tuple, Union
from dateparser import parse
from urllib3 import disable_warnings
from math import ceil
disable_warnings()
DEMISTO_TIME_FORMAT: str = '%Y-%m-%dT%H:%M:%SZ'
xdr_types_to_demisto: Dict = {
"DOMAIN_NAME": 'Domain',
"HASH": 'File',
"IP": 'IP'
}
xdr_reputation_to_demisto: Dict = {
'GOOD': 1,
'SUSPICIOUS': 2,
'BAD': 3
}
demisto_score_to_xdr: Dict[int, str] = {
1: 'GOOD',
2: 'SUSPICIOUS',
3: 'BAD'
}
class Client:
severity: str = ''
query: str = 'reputation:Bad and (type:File or type:Domain or type:IP)'
tag = 'Cortex XDR'
tlp_color = None
error_codes: Dict[int, str] = {
500: 'XDR internal server error.',
401: 'Unauthorized access. An issue occurred during authentication. This can indicate an ' # noqa: W504
+ 'incorrect key, id, or other invalid authentication parameters.',
402: 'Unauthorized access. User does not have the required license type to run this API.',
403: 'Unauthorized access. The provided API key does not have the required RBAC permissions to run this API.',
404: 'XDR Not found: The provided URL may not be of an active XDR server.',
413: 'Request entity too large. Please reach out to the XDR support team.'
}
def __init__(self, params: Dict):
self._base_url: str = urljoin(params.get('url'), '/public_api/v1/indicators/')
self._verify_cert: bool = not params.get('insecure', False)
self._params = params
handle_proxy()
def http_request(self, url_suffix: str, requests_kwargs=None) -> Dict:
if requests_kwargs is None:
requests_kwargs = dict()
res = requests.post(url=self._base_url + url_suffix,
verify=self._verify_cert,
headers=self._headers,
**requests_kwargs)
if res.status_code in self.error_codes:
raise DemistoException(self.error_codes[res.status_code], res=res)
try:
return res.json()
except json.decoder.JSONDecodeError as e:
raise DemistoException(f'Could not parse json out of {res.content.decode()}', exception=e, res=res)
@property
def _headers(self):
# the header should be calculated at most 5 min before the request fired
return get_headers(self._params)
def get_headers(params: Dict) -> Dict:
api_key: str = str(params.get('apikey'))
api_key_id: str = str(params.get('apikey_id'))
nonce: str = "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(64)])
timestamp: str = str(int(datetime.now(timezone.utc).timestamp()) * 1000)
auth_key = "%s%s%s" % (api_key, nonce, timestamp)
auth_key = auth_key.encode("utf-8")
api_key_hash: str = hashlib.sha256(auth_key).hexdigest()
headers: Dict = {
"x-xdr-timestamp": timestamp,
"x-xdr-nonce": nonce,
"x-xdr-auth-id": str(api_key_id),
"Authorization": api_key_hash,
"x-iocs-source": "xsoar"
}
return headers
def get_requests_kwargs(_json=None, file_path: Optional[str] = None) -> Dict:
if _json is not None:
return {'data': json.dumps({"request_data": _json})}
elif file_path is not None:
return {'files': [('file', ('iocs.json', open(file_path, 'rb'), 'application/json'))]}
else:
return {}
def prepare_get_changes(time_stamp: int) -> Tuple[str, Dict]:
url_suffix: str = 'get_changes'
_json: Dict = {'last_update_ts': time_stamp}
return url_suffix, _json
def prepare_enable_iocs(iocs: str) -> Tuple[str, List]:
url_suffix: str = 'enable_iocs'
_json: List = argToList(iocs)
return url_suffix, _json
def prepare_disable_iocs(iocs: str) -> Tuple[str, List]:
url_suffix: str = 'disable_iocs'
_json: List = argToList(iocs)
return url_suffix, _json
def create_file_iocs_to_keep(file_path, batch_size: int = 200):
with open(file_path, 'a') as _file:
total_size: int = get_iocs_size()
for i in range(0, ceil(total_size / batch_size)):
iocs: List = get_iocs(page=i, size=batch_size)
for ios in map(lambda x: x.get('value', ''), iocs):
_file.write(ios + '\n')
def create_file_sync(file_path, batch_size: int = 200):
with open(file_path, 'a') as _file:
total_size: int = get_iocs_size()
for i in range(0, ceil(total_size / batch_size)):
iocs: List = get_iocs(page=i, size=batch_size)
for ioc in map(lambda x: demisto_ioc_to_xdr(x), iocs):
if ioc:
_file.write(json.dumps(ioc) + '\n')
def get_iocs_size(query=None) -> int:
search_indicators = IndicatorsSearcher()
query = query if query else Client.query
query = f'expirationStatus:active AND ({query})'
return search_indicators.search_indicators_by_version(query=query, size=1)\
.get('total', 0)
def get_iocs(page=0, size=200, query=None) -> List:
search_indicators = IndicatorsSearcher(page=page)
query = query if query else Client.query
query = f'expirationStatus:active AND ({query})'
return search_indicators.search_indicators_by_version(query=query, size=size)\
.get('iocs', [])
def demisto_expiration_to_xdr(expiration) -> int:
if expiration and not expiration.startswith('0001'):
try:
return int(parse(expiration).astimezone(timezone.utc).timestamp() * 1000)
except ValueError:
pass
return -1
def demisto_reliability_to_xdr(reliability: str) -> str:
if reliability:
return reliability[0]
else:
return 'F'
def demisto_vendors_to_xdr(demisto_vendors) -> List[Dict]:
xdr_vendors: List[Dict] = []
for module_id, data in demisto_vendors.items():
reliability = demisto_reliability_to_xdr(data.get('reliability'))
reputation = demisto_score_to_xdr.get(data.get('score'), 'UNKNOWN')
if module_id and reputation and reliability:
xdr_vendors.append({
'vendor_name': data.get('sourceBrand', module_id),
'reputation': reputation,
'reliability': reliability
})
return xdr_v | endors
def demisto_types_to_xdr(_type: str) -> str:
xdr_type = _type.upper()
if xdr_type.startswith('FILE'):
return 'HASH'
elif xdr_type == 'DOMAIN':
return 'DOMAIN_NAME'
else:
return xdr_type
def demisto_ioc_to_xdr(ioc: Dict) -> Dict:
try:
xdr_ioc: Dict = {
'indicator': ioc['value'],
'severity': Client.severity,
'type': demis | to_types_to_xdr(str(ioc['indicator_type'])),
'reputation': demisto_score_to_xdr.get(ioc.get('score', 0), 'UNKNOWN'),
'expiration_date': demisto_expiration_to_xdr(ioc.get('expiration'))
}
# get last 'IndicatorCommentRegular'
comment: Dict = next(filter(lambda x: x.get('type') == 'IndicatorCommentRegular', reversed(ioc.get('comments', []))), {})
if comment:
xdr_ioc['comment'] = comment.get('content')
if ioc.get('aggregatedReliability'):
xdr_ioc['reliability'] = ioc['aggregatedReliability'][0]
vendors = demisto_vendors_to_xdr(ioc.get('moduleToFeedMap', {}))
if vendors:
xdr_ioc['vendors'] = vendors
threat_type = ioc.get('CustomFields', {}).get('threattypes', {})
if threat_type:
threat_type = threat_type[0] if isinstance(threat_type, list) else threat_type
threat_type = threat_type.get('threatcategory')
if threat_type:
xdr_ioc['class'] = threat_type
if ioc.get('CustomFields', {}).get('xdrstatus') == 'disabled':
xdr_ioc['status'] = 'DISABLED'
return xdr_ioc
except KeyError as error:
demisto.debug(f'unexpected IOC format in key: {str(error)}, {str(ioc)}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.