repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
MangoMangoDevelopment/neptune
|
lib/ros_comm-1.12.0/utilities/roswtf/src/roswtf/graph.py
|
Python
|
bsd-3-clause
| 14,968
| 0.008485
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
from __future__ import print_function
from __future__ import with_statement
import os
import itertools
import socket
import sys
import time
try:
from xmlrpc.client import ServerProxy
except ImportError:
from xmlrpclib import ServerProxy
import rospkg.environment
import rosgraph
import rosgraph.rosenv
import rosgraph.network
import rosnode
import rosservice
from roswtf.context import WtfException
from roswtf.environment import paths, is_executable
from roswtf.model import WtfWarning, WtfError
from roswtf.rules import warning_rule, error_rule
def _businfo(ctx, node, bus_info):
# [[connectionId1, destinationId1, direction1, transport1, ...]... ]
edges = []
for info in bus_info:
#connection_id = info[0]
dest_id = info[1]
if dest_id.startswith('http://'):
if dest_id in ctx.uri_node_map:
dest_id = ctx.uri_node_map[dest_id]
else:
dest_id = 'unknown (%s)'%dest_id
direction = info[2]
#transport = info[3]
topic = info[4]
if len(info) > 5:
connected = info[5]
else:
connected = True #backwards compatibility
if connected:
if direction == 'i':
edges.append((topic, dest_id, node))
elif direction == 'o':
edges.append((topic, node, dest_id))
elif direction == 'b':
print("cannot handle bidirectional edges", file=sys.stderr)
else:
raise Exception()
return edges
def unexpected_edges(ctx):
if not ctx.system_state or not ctx.nodes:
return
unexpected = set(ctx.actual_edges) - set(ctx.expected_edges)
return ["%s->%s (%s)"%(p, s, t) for (t, p, s) in unexpected]
def missing_edges(ctx):
if not ctx.system_state or not ctx.nodes:
return
missing = set(ctx.expected_edges) - set(ctx.actual_edges)
return ["%s->%s (%s)"%(p, s, t) for (t, p, s) in missing]
def ping_check(ctx):
if not ctx.system_state or not ctx.nodes:
return
_, unpinged = rosnode.rosnode_ping_all()
return unpinged
def simtime_check(ctx):
if ctx.use_sim_time:
master = rosgraph.Master('/roswtf')
try:
pubtopics = master.getPublishedTopics('/')
except rosgraph.MasterException:
ctx.errors.append(WtfError("Cannot talk to ROS master"))
raise WtfException("roswtf lost connection to the ROS Master at %s"%rosgraph.rosenv.get_master_uri())
for topic, _ in pubtopics:
if topic in ['/time', '/clock']:
return
return True
## contact each service and make sure it returns a header
def probe_all_services(ctx):
master = rosgraph.Master('/roswtf')
errors = []
for service_name in ctx.services:
try:
service_uri = master.lookupService(service_name)
except:
ctx.errors.append(WtfError("cannot contact ROS Master at %s"%rosgraph.rosenv.get_master_uri()))
raise WtfException("roswtf lost connection to the ROS Master at %s"%rosgraph.rosenv.get_master_uri())
try:
headers = rosservice.get_service_headers(service_name, service_uri)
if not headers:
errors.append("service [%s] did not return service headers"%service_name)
except rosgraph.network.ROSHandshakeException as e:
errors.append("service [%s] appears to be malfunctioning"%service_name)
except Exception as e:
errors.append("service [%s] appears to be malfunctioning: %s"%(service_name, e))
return errors
def unconnected_subscriptions(ctx):
ret = ''
whitelist = ['/reset_time']
if ctx.use_sim_time:
for sub, l in ctx.unconnected_subscriptions.items():
l = [t for t in l if t not in whitelist]
if l:
ret += ' * %s:\n'%sub
ret += ''.join([" * %s\n"%t for t in l])
else:
for sub, l in ctx.unconnected_subscriptions.items():
l = [t for t in l if t not in ['/time', '/clock']]
if l:
ret += ' * %s:\n'%sub
ret += ''.join([" * %s\n"%t for t in l])
return ret
graph_warnings = [
(unconnected_subscriptions, "The following node subscriptions are unconnected:\n"),
(unexpected_edges, "The following nodes are unexpectedly connected:"),
]
graph_errors = [
(simtime_check, "/use_simtime is set but no publisher of /clock is present"),
(ping_check, "Could not contact the following nodes:"),
(missing_edges, "The following nodes should be connected but aren't:"),
(probe_all_services, "Errors connecting to the following services:"),
]
def topic_timestamp_drift(ctx, t):
#TODO: get msg_class, if msg_class has header, receive a message
# and compare its time to ros time
if 0:
rospy.Subscriber(t, msg_class)
#TODO: these are mainly future enhancements. It's unclear to me whether or not this will be
#useful as most of the generic rules are capable of targetting these problems as well.
#The only
|
rule that in particular seems useful is the timestamp drift. It may be too
#expensive otherwise to run, though it would be interesting to attempt to receive a
#message from every single topic.
#TODO: parameter audit?
service_errors = [
]
service_warnings = [
]
topic_errors = [
(topic_timestamp_drift, "Timestamp drift:")
]
topic_warnings = [
]
node_errors = [
]
node_warnings = [
|
]
## cache sim_time calculation sot that multiple rules can use
def _compute_sim_time(ctx):
param_server = rosgraph.Master('/roswtf')
ctx.use_sim_time = False
try:
val = simtime = param_server.getParam('/use_sim_time')
if val:
ctx.use_sim_time = True
except:
pass
def _compute_system_state(ctx):
socket.setdefaulttimeout(3.0)
master = rosgraph.Master('/roswtf')
# store system state
try:
val = master.getSystemState()
except rosgraph.MasterException:
return
ctx.system_state = val
pubs, subs, srvs = val
# compute list of topics and services
topics = []
for t, _ in itertools.chain(pubs, subs):
topics.append(t)
services = []
service_providers = []
for s, l in srvs:
services.append(s)
service_providers.extend(l)
ctx.topics = topics
ctx.services = services
ctx.service_providers =
|
The-OpenROAD-Project/OpenROAD
|
src/odb/test/unitTestsPython/TestWireCodec.py
|
Python
|
bsd-3-clause
| 5,470
| 0.005484
|
import opendbpy as odb
import helper
import odbUnitTest
class TestWireCodec(odbUnitTest.TestCase):
#This Function
|
is called before each of the test cases defined below
def setUp(self):
self.db, self.tech, self.m1, self.m2, self.m3, self.v1
|
2, self.v23 = helper.createMultiLayerDB()
self.chip = odb.dbChip_create(self.db)
self.block = odb.dbBlock_create(self.chip, "chip")
self.net = odb.dbNet_create(self.block, "net")
self.wire = odb.dbWire_create(self.net)
self.pathsEnums = ["PATH", "JUNCTION", "SHORT", "VWIRE", "POINT", "POINT_EXT", "VIA", "TECH_VIA", "RECT", "ITERM", "BTERM", "RULE", "END_DECODE"]
#this function is called after each of the test cases
def tearDown(self):
self.db.destroy(self.db)
def test_decoder(self):
encoder = odb.dbWireEncoder()
encoder.begin(self.wire)
encoder.newPath(self.m1, "ROUTED")
encoder.addPoint(2000, 2000)
j1 = encoder.addPoint(10000, 2000)
encoder.addPoint(18000, 2000)
encoder.newPath(j1)
encoder.addTechVia(self.v12)
j2 = encoder.addPoint(10000, 10000)
encoder.addPoint(10000, 18000)
encoder.newPath(j2)
j3 = encoder.addTechVia(self.v12)
encoder.addPoint(23000, 10000, 4000)
encoder.newPath(j3)
encoder.addPoint(3000, 10000)
encoder.addTechVia(self.v12)
encoder.addTechVia(self.v23)
encoder.addPoint(3000, 10000, 4000)
encoder.addPoint(3000, 18000, 6000)
encoder.end()
decoder = odb.dbWireDecoder()
decoder.begin(self.wire)
# Encoding started with a path
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.PATH
# Check first point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [2000, 2000]
# Check second point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 2000]
# Check third point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [18000, 2000]
# Check first junction id
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.JUNCTION
jid = decoder.getJunctionValue()
assert jid == j1
# Check junction point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 2000]
# Check tech via
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.TECH_VIA
tchVia = decoder.getTechVia()
assert tchVia.getName() == self.v12.getName()
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 10000]
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 18000]
# Check second junction id
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.JUNCTION
jid = decoder.getJunctionValue()
assert jid == j2
# Check junction point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 10000]
# Check tech via
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.TECH_VIA
tchVia = decoder.getTechVia()
assert tchVia.getName() == self.v12.getName()
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT_EXT
point = decoder.getPoint_ext()
assert point == [23000, 10000, 4000]
# Check third junction id
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.JUNCTION
jid = decoder.getJunctionValue()
assert jid == j3
# Check junction point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [10000, 10000]
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT
point = decoder.getPoint()
assert point == [3000, 10000]
# Check tech via
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.TECH_VIA
tchVia = decoder.getTechVia()
assert tchVia.getName() == self.v12.getName()
# Check tech via
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.TECH_VIA
tchVia = decoder.getTechVia()
assert tchVia.getName() == self.v23.getName()
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT_EXT
point = decoder.getPoint_ext()
assert point == [3000, 10000, 4000]
# Check next point
nextOp = decoder.next()
assert nextOp == odb.dbWireDecoder.POINT_EXT
point = decoder.getPoint_ext()
assert point == [3000, 18000, 6000]
if __name__=='__main__':
odbUnitTest.mainParallel(TestWireCodec)
|
TE-ToshiakiTanaka/bantorra.old
|
script/testcase_base.py
|
Python
|
mit
| 1,751
| 0.001142
|
import os
import sys
import argparse
import ConfigParser
import testcase_service
from bantorra.util import define
from bantorra.util.log import LOG as L
class TestCase_Base(testcase_service.TestCaseUnit):
config = {}
"""
TestCase_Base.
- Parse Command Line Argument.
- Create Service's Instance.
- Read Config File and get value.
"""
def __init__(self, *args, **kwargs):
super(TestCase_Base, self).__init__(*args, **kwargs)
self.parse()
self.get_config()
self.service_check()
self.get_service()
@classmethod
def set(cls, name, value):
cls.config[name] = value
@classmethod
def get(cls, name):
return cls.config[name]
def parse(self):
"""
|
Parse Command Line Arguments.
"""
return None
@classmethod
def get_servic
|
e(cls):
"""
Get Service.
in the wifi branch, Used service is there.
"""
cls.core = cls.service["core"].get()
cls.picture = cls.service["picture"].get()
@classmethod
def get_config(cls, conf=""):
"""
Get Config File.
:arg string conf: config file path.
"""
cls.config = {}
if conf == "":
conf = os.path.join(define.APP_SCRIPT, "config.ini")
try:
config = ConfigParser.ConfigParser()
config.read(conf)
for section in config.sections():
for option in config.options(section):
cls.config["%s.%s" % (section, option)] = config.get(section, option)
except Exception as e:
L.warning('error: could not read config file: %s' % e)
|
kylon/pacman-fakeroot
|
test/pacman/tests/sync701.py
|
Python
|
gpl-2.0
| 504
| 0
|
self.description = "incoming package replaces symlink with directory (order
|
1)"
lp = pmpkg("pkg1")
lp.files = ["usr/lib/foo",
"lib -> usr/lib"]
self.addpkg2db("local", lp)
p1 = pmpkg("pkg1", "1.0-2")
p1.files = ["usr/lib/foo"]
self.addpkg2db("sync", p1)
p2 = pmpkg("pkg2")
p2.files = ["lib/bar"]
self.addpkg2db("sync", p2)
self.args = "-S pkg1 pkg2"
self.addrule("PACMAN_RETCODE=0")
self.addrule
|
("PKG_VERSION=pkg1|1.0-2")
self.addrule("PKG_EXIST=pkg2")
self.addrule("FILE_TYPE=lib|dir")
|
pniedzielski/fb-hackathon-2013-11-21
|
src/py/level3.py
|
Python
|
agpl-3.0
| 1,260
| 0.001587
|
# Learn Python -- level 2 logic
# Copyright (C) 2013 Cornell FB Hackathon Team.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
player1._position = [5, 6]
_world.grid[player1._position[1]][player1._position[0]] = player1
player2._position =
|
[14, 6]
_world.grid[player2._position[1]][player2._position[0]] = player2
items = [Item("bread", [0, 2]),
Item("mushroom", [11, 9]),
Item("scepter", [5, 4]),
Item("banana", [17, 3]),
Item("bread", [10, 1]),
Item("sword", [8, 9])]
def at_end():
if (player1.inventory.count() != 3 or
player2.inventory.count() != 3):
raise Exception("Failure")
|
jokey2k/sentry
|
src/sentry/testutils/helpers/auth_header.py
|
Python
|
bsd-3-clause
| 431
| 0
|
from __future__ import absolute_import
__all__ = ('get_auth_header',)
def get_auth_header(client, api_key=None, secret_key=No
|
ne):
header = [
('sentry_client', client),
('sentry_version', '5'),
]
if api_key:
header.append(('sentry_key', api_key))
if secret_key:
header.append(('sentry_secret', secret_key))
return 'Sentry %s' % ', '.join('%s=%s' % (k, v
|
) for k, v in header)
|
therewillbecode/ichnaea
|
ichnaea/api/locate/query.py
|
Python
|
apache-2.0
| 10,806
| 0
|
"""Code representing a query."""
import six
from ichnaea.api.locate.constants import (
DataAccuracy,
MIN_WIFIS_IN_QUERY,
)
from ichnaea.api.locate.schema import (
CellAreaLookup,
CellLookup,
FallbackLookup,
WifiLookup,
)
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
if six.PY2: # pragma: no cover
from ipaddr import IPAddress as ip_address # NOQA
else: # pragma: no cover
from ipaddress import ip_address
METRIC_MAPPING = {
0: 'none',
1: 'one',
2: 'many',
}
class Query(object):
_country = None
_fallback = None
_geoip = None
_ip = None
def __init__(self, fallback=None, ip=None, cell=None, wifi=None,
api_key=None, api_type=None, session=None,
http_session=None, geoip_db=None, stats_client=None):
"""
A class representing a concrete query.
:param fallback: A dictionary of fallback options.
:type fallback: dict
:param ip: An IP address, e.g. 127.0.0.1.
:type ip: str
:param cell: A list of cell query dicts.
:type cell: list
:param wifi: A list of wifi query dicts.
:type wifi: list
:param api_key: An ApiKey instance for the current query.
:type api_key: :class:`ichnaea.models.api.ApiKey`
:param api_type: The type of query API, for example `locate`.
:type api_type: str
:param session: An open database session.
:param http_session: An open HTTP/S session.
:param ge
|
oip_db: A geoip database.
:type geoip_db: :class:`~ichnaea.geoip.GeoIPWrapper`
:param stats_client: A stats client.
:type stats_client: :class:`~ichnaea.log.StatsClient`
"""
self.geoip_db = geoip_db
self.http_session = http_session
self.session = session
self.stats_client = stats_client
self.fallback = fallback
self.ip = ip
self.cell = cell
self
|
.wifi = wifi
self.api_key = api_key
if api_type not in (None, 'country', 'locate'):
raise ValueError('Invalid api_type.')
self.api_type = api_type
@property
def fallback(self):
"""
A validated
:class:`~ichnaea.api.locate.schema.FallbackLookup` instance.
"""
return self._fallback
@fallback.setter
def fallback(self, values):
if not values:
values = {}
valid = FallbackLookup.create(**values)
if valid is None: # pragma: no cover
valid = FallbackLookup.create()
self._fallback = valid
@property
def country(self):
"""
The two letter country code of origin for this query.
Can return None, if no country could be determined.
"""
return self._country
@property
def geoip(self):
"""
A GeoIP database entry for the originating IP address.
Can return None if no database match could be found.
"""
return self._geoip
@property
def ip(self):
"""The validated IP address."""
return self._ip
@ip.setter
def ip(self, value):
if not value:
value = None
try:
valid = str(ip_address(value))
except ValueError:
valid = None
self._ip = valid
if valid:
country = None
geoip = None
if self.geoip_db:
geoip = self.geoip_db.geoip_lookup(valid)
if geoip:
country = geoip.get('country_code')
if country:
country = country.upper()
self._geoip = geoip
self._country = country
@property
def cell(self):
"""
The validated list of
:class:`~ichnaea.api.locate.schema.CellLookup` instances.
If the same cell network is supplied multiple times, this chooses only
the best entry for each unique network.
"""
return self._cell
@property
def cell_area(self):
"""
The validated list of
:class:`~ichnaea.api.locate.schema.CellAreaLookup` instances.
If the same cell area is supplied multiple times, this chooses only
the best entry for each unique area.
"""
if self.fallback.lacf:
return self._cell_area
return []
@cell.setter
def cell(self, values):
if not values:
values = []
values = list(values)
self._cell_unvalidated = values
filtered_areas = OrderedDict()
filtered_cells = OrderedDict()
for value in values:
valid_area = CellAreaLookup.create(**value)
if valid_area:
existing = filtered_areas.get(valid_area.hashkey())
if existing is not None and existing.better(valid_area):
pass
else:
filtered_areas[valid_area.hashkey()] = valid_area
valid_cell = CellLookup.create(**value)
if valid_cell:
existing = filtered_cells.get(valid_cell.hashkey())
if existing is not None and existing.better(valid_cell):
pass
else:
filtered_cells[valid_cell.hashkey()] = valid_cell
self._cell_area = list(filtered_areas.values())
self._cell = list(filtered_cells.values())
@property
def wifi(self):
"""
The validated list of
:class:`~ichnaea.api.locate.schema.WifiLookup` instances.
If the same Wifi network is supplied multiple times, this chooses only
the best entry for each unique network.
If fewer than :data:`~ichnaea.api.locate.constants.MIN_WIFIS_IN_QUERY`
unique valid Wifi networks are found, returns an empty list.
"""
return self._wifi
@wifi.setter
def wifi(self, values):
if not values:
values = []
values = list(values)
self._wifi_unvalidated = values
filtered = OrderedDict()
for value in values:
valid_wifi = WifiLookup.create(**value)
if valid_wifi:
existing = filtered.get(valid_wifi.mac)
if existing is not None and existing.better(valid_wifi):
pass
else:
filtered[valid_wifi.mac] = valid_wifi
if len(filtered) < MIN_WIFIS_IN_QUERY:
filtered = {}
self._wifi = list(filtered.values())
@property
def expected_accuracy(self):
accuracies = [DataAccuracy.none]
if self.wifi:
if self.api_type == 'country':
accuracies.append(DataAccuracy.none)
else:
accuracies.append(DataAccuracy.high)
if self.cell:
if self.api_type == 'country':
accuracies.append(DataAccuracy.low)
else:
accuracies.append(DataAccuracy.medium)
if ((self.cell_area and self.fallback.lacf) or
(self.ip and self.fallback.ipf)):
accuracies.append(DataAccuracy.low)
# return the best possible (smallest) accuracy
return min(accuracies)
def result_status(self, result):
"""
Returns either hit or miss, depending on whether the result
matched the expected query accuracy.
"""
if result.data_accuracy <= self.expected_accuracy:
# equal or better / smaller accuracy
return 'hit'
return 'miss'
def internal_query(self):
"""Returns a dictionary of this query in our internal format."""
result = {}
if self.cell:
result['cell'] = []
for cell in self.cell:
cell_data = {}
for field in cell._fields:
cell_data[field] = getattr(cell, field)
result['cell'].append(cell_data)
if self.wifi:
result['wifi'] = []
for wif
|
solashirai/edx-platform
|
lms/djangoapps/teams/serializers.py
|
Python
|
agpl-3.0
| 7,981
| 0.00213
|
"""Defines serializers used by the Team API."""
from copy import deepcopy
from django.contrib.auth.models import User
from django.db.models import Count
from django.conf import settings
from django_countries import countries
from rest_framework import serializers
from openedx.core.lib.api.serializers import CollapsedReferenceSerializer
from openedx.core.lib.api.fields import ExpandableField
from openedx.core.djangoapps.user_api.accounts.serializers import UserReadOnlySerializer
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
class CountryField(serializers.Field):
"""
Field to serialize a country code.
"""
COUNTRY_CODES = dict(countries).keys()
def to_representation(self, obj):
"""
Represent the country as a 2-character unicode identifier.
"""
return unicode(obj)
def to_internal_value(self, data):
"""
Check that the code is a valid country code.
We leave the data in its original format so that the Django model's
CountryField can convert it to the internal representation used
by the django-countries library.
"""
if data and data not in self.COUNTRY_CODES:
raise serializers.ValidationError(
u"{code} is not a valid country code".format(code=data)
)
return data
class UserMembershipSerializer(serializers.ModelSerializer):
"""Serializes CourseTeamMemberships with only user and date_joined
Used for listing team members.
"""
profile_configuration = deepcopy(settings.ACCOUNT_VISIBILITY_CONFIGURATION)
profile_configuration['shareable_fields'].append('url')
profile_configuration['public_fields'].append('url')
user = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=User,
id_source='username',
view_name='accounts_api',
read_only=True,
),
expanded_serializer=UserReadOnlySerializer(configuration=profile_configuration),
)
class Meta(object):
model = CourseTeamMembership
fields = ("user", "date_joined", "last_activity_at")
read_only_fields = ("date_joined", "last_activity_at")
class CourseTeamSerializer(serializers.ModelSerializer):
"""Serializes a CourseTeam with membership information."""
id = serializers.CharField(source='team_id', read_only=True) # pylint: disable=invalid-name
membership = UserMembershipSerializer(many=True, read_only=True)
country = CountryField()
class Meta(object):
model = CourseTeam
fields = (
"id",
"discussion_topic_id",
"name",
"course_id",
"topic_id",
"date_created",
"description",
"country",
"language",
"last_activity_at",
"membership",
)
read_only_fields = ("course_id", "date_created", "discussion_topic_id", "last_activity_at")
class CourseTeamCreationSerializer(serializers.ModelSerializer):
"""Deserializes a CourseTeam for creation."""
country = CountryField(required=False)
class Meta(object):
model = CourseTeam
fields = (
"name",
"course_id",
"description",
"topic_id",
"country",
"language",
)
def create(self, validated_data):
team = CourseTeam.create(
name=validated_data.get("name", ''),
course_id=validated_data.get("course_id"),
description=validated_data.get("description", ''),
topic_id=validated_data.get("topic_id", ''),
country=validated_data.get("country", ''),
language=validated_data.get("language", ''),
)
team.save()
return team
class CourseTeamSerializerWithoutMembership(CourseTeamSerializer):
"""The same as the `CourseTeamSerializer`, but elides the membership field.
Intended to be used as a sub-serializer for serializing team
memberships, since the membership field is redundant in that case.
"""
def __init__(self, *args, **kwargs):
super(CourseTeamSerializerWithoutMembership, self).__init__(*args, **kwargs)
del self.fields['membership']
class MembershipSerializer(serializers.ModelSerializer):
"""Serializes CourseTeamMemberships with information about both teams and users."""
profile_configuration = deepcopy(settings.ACCOUNT_VISIBILITY_CONFIGURATION)
profile_configuration['shareable_fields'].append('url')
profile_configuration['public_fields'].append('url')
user = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=User,
id_source='username',
view_name='accounts_api',
read_only=True,
),
expanded_serializer=UserReadOnlySerializer(configuration=profile_configuration)
)
team = ExpandableField(
collapsed_serializer=CollapsedReferenceSerializer(
model_class=CourseTeam,
id_source='team_id',
view_name='teams_detail',
read_only=True,
),
expanded_serializer=CourseTeamSerializerWithoutMembership(read_only=True),
)
class Meta(object):
model = CourseTeamMembership
fields = ("user", "team", "date_joined", "last_activity_at")
read_only_fields = ("date_joined", "last_activity_at")
class BaseTopicSerializer(serializers.Serializer):
"""Serializes a topic without team_count."""
description = serializers.CharField()
name = serializers.CharField()
id = serializers.CharField() # pylint: disable=invalid-name
class TopicSerializer(BaseTopicSerializer):
"""
Adds team_count to the basic topic serializer, checking if team_count
is al
|
ready present in the topic data, and if not, querying the CourseTeam
model to get the count. Requires that `context` is provided with a valid course_id
in order to filter teams within the course.
"""
team_count = serializers.SerializerMethodField()
def get_team_count(self, topic):
"""Get the number of teams associated with this topic"""
# If team_count is already present (possible if topic data was pre-processed for sorting), return it.
if 'team_count' in
|
topic:
return topic['team_count']
else:
return CourseTeam.objects.filter(course_id=self.context['course_id'], topic_id=topic['id']).count()
class BulkTeamCountTopicListSerializer(serializers.ListSerializer): # pylint: disable=abstract-method
"""
List serializer for efficiently serializing a set of topics.
"""
def to_representation(self, obj):
"""Adds team_count to each topic. """
data = super(BulkTeamCountTopicListSerializer, self).to_representation(obj)
add_team_count(data, self.context["course_id"])
return data
class BulkTeamCountTopicSerializer(BaseTopicSerializer): # pylint: disable=abstract-method
"""
Serializes a set of topics, adding the team_count field to each topic as a bulk operation.
Requires that `context` is provided with a valid course_id in order to filter teams within the course.
"""
class Meta(object):
list_serializer_class = BulkTeamCountTopicListSerializer
def add_team_count(topics, course_id):
"""
Helper method to add team_count for a list of topics.
This allows for a more efficient single query.
"""
topic_ids = [topic['id'] for topic in topics]
teams_per_topic = CourseTeam.objects.filter(
course_id=course_id,
topic_id__in=topic_ids
).values('topic_id').annotate(team_count=Count('topic_id'))
topics_to_team_count = {d['topic_id']: d['team_count'] for d in teams_per_topic}
for topic in topics:
topic['team_count'] = topics_to_team_count.get(topic['id'], 0)
|
lablup/sorna-repl
|
python-tensorflow/test_run.py
|
Python
|
lgpl-3.0
| 2,665
| 0.001876
|
import argparse
from getpass import getpass
import json
import sys
import textwrap
import zmq
import colorama
from colorama import Fore
def execute(code):
ctx = zmq.Context.instance()
ctx.setsockopt(zmq.LINGER, 50)
repl_in = ctx.socket(zmq.PUSH)
repl_in.connect('tcp://127.0.0.1:2000')
rep
|
l_out =
|
ctx.socket(zmq.PULL)
repl_out.connect('tcp://127.0.0.1:2001')
with repl_in, repl_out:
msg = (b'xcode1', code.encode('utf8'))
repl_in.send_multipart(msg)
while True:
data = repl_out.recv_multipart()
msg_type = data[0].decode('ascii')
msg_data = data[1].decode('utf8')
if msg_type == 'finished':
print('--- finished ---')
break
elif msg_type == 'stdout':
print(msg_data, end='')
sys.stdout.flush()
elif msg_type == 'stderr':
print(Fore.RED + msg_data + Fore.RESET, end='', file=sys.stderr)
sys.stderr.flush()
elif msg_type == 'waiting-input':
opts = json.loads(msg_data)
if opts['is_password']:
t = getpass(prompt='')
else:
t = input()
repl_in.send_multipart([b'input', t.encode('utf8')])
else:
print('--- other msg ---')
print(msg_type)
print(msg_data)
sources = {
'interleaving': '''
import sys
print('asdf', end='', file=sys.stderr)
print('qwer', end='', file=sys.stdout)
print('zxcv', file=sys.stderr)
''',
'long_running': '''
import time
for i in range(10):
time.sleep(1)
print(i)
''',
'user_input': '''
import hashlib
import getpass
print('Please type your name.')
name = input('>> ')
print('Hello, {0}'.format(name))
print('Please type your password.')
pw = getpass.getpass()
m = hashlib.sha256()
m.update(pw.encode('utf8'))
print('Your password hash is {0}'.format(m.hexdigest()))
''',
'early_exception': '''a = wrong-+****syntax''',
'runtime_error': '''
def x():
raise RuntimeError('asdf')
def s():
x()
if __name__ == '__main__':
s()
''',
'tensorflow': '''
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print(tf.test.is_gpu_available())
print('ok')'''
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('program_name')
args =parser.parse_args()
src = sources[args.program_name]
print('Test code:')
print(textwrap.indent(src, ' '))
print()
print('Execution log:')
execute(src)
if __name__ == '__main__':
colorama.init()
main()
|
Gorbeh/sf
|
src/WindowManager.py
|
Python
|
agpl-3.0
| 16,966
| 0.0234
|
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: WindowManager.py 30 2009-04-20 05:16:40Z patryn $
#*****************************************************************************
# -*- coding: utf-8 -*-
import PyQt4
import sys
from PyQt4 import QtGui, QtCore
from Ui_MainWindow import Ui_MainWindow
from Ui_TableWindow import Ui_TableWindow
from Ui_PortDialog import Ui_PortDialog
from Ui_ParamDialog import Ui_ParamDialog
from Ui_AboutWindow import Ui_AboutDialog
from Ui_SimSettingsWindow import Ui_SimSettingsWindow
from CreateOutput import *
from Common import *
ROW_ADD, ROW_EDIT = range(2)
#--------------------------------------------------------------------
class WindowManagerClass:
def __init__(self, MainWindow):
self.MainWindow = MainWindow
self.Settings = DataHolderClass()
self.WindowIndex = MAIN_WINDOW
self.Ui = None
Screen = QtGui.QDesktopWidget().screenGeometry()
Size = self.MainWindow.geometry()
self.MainWindow.move((Screen.width() - Size.width()) / 2, (Screen.height() - Size.height()) / 2)
def ShowAbout(self):
AboutDialog = QtGui.QDialog()
Ui = Ui_AboutDialog()
Ui.setupUi(AboutDialog)
AboutDialog.exec_()
def ShowAboutQt(self):
QtGui.QMessageBox.aboutQt(None)
#---------------------- Main Window -------------------
def CreateMainWindow(self):
self.Ui.setupUi(self.MainWindow)
self.Ui.lineEditName.setText(self.Settings.sfunctionName)
self.Ui.comboBoxSampleTime.addItems(SampleTimesList)
self.Ui.comboBoxSampleTime.setEditText(self.Settings.sfunctionSampleTime)
self.Ui.comboBoxOffsetTime.addItems(OffsetTimesList)
self.Ui.comboBoxOffsetTime.setEditText(self.Settings.sfunctionOffsetTime)
self.Ui.comboBoxCont.addItems(StatesList)
self.Ui.comboBoxCont.setEditText(self.Settings.sfunctionContStateNum)
self.Ui.comboBoxDisc.addItems(StatesList)
self.Ui.comboBoxDisc.setEditText(self.Settings.sfunctionDiscStateNum)
QtCore.QObject.connect(self.Ui.pushButtonNext, QtCore.SIGNAL('clicked()'), self.NextWindow)
def ProcessMainWindowValues(self):
self.Settings.sfunctionName = self.Ui.lineEditName.text()
self.Settings.sfunctionSampleTime = self.Ui.comboBoxSampleTime.currentText()
self.Settings.sfunctionOffsetTime = self.Ui.comboBoxOffsetTime.currentText()
self.Settings.sfunctionContStateNum = self.Ui.comboBoxCont.currentText()
self.Settings.sfunctionDiscStateNum = self.Ui.comboBoxDisc.currentText()
if not IsValidName(self.Settings.sfunctionName):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Invalid sfunction Name", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return False
if (not IsValidNumber(self.Settings.sfunctionSampleTime)) and (self.Settings.sfunctionSampleTime not in SampleTimesList):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Invalid Sample Time", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return False
if (not IsValidNumber(self.Settings.sfunctionOffsetTime)) and (self.Settings.sfunctionOffsetTime not in OffsetTimesList):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Invalid Offset Time", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return False
if (not IsValidNumber(self.Settings.sfunctionContStateNum)) and (self.Settings.sfunctionContStateNum not in StatesList):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Invalid number of Continuous States", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return False
if (not IsValidNumber(self.Settings.sfunctionDiscStateNum)) and (self.Settings.sfunctionDiscStateNum not in StatesList):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Invalid number of Discrete States", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return False
return True
#---------------------- Ports, Parameters and PWork Windows -------------------
def ReadTableRow(self, CurrentRow):
ValuesList = []
if(self.WindowIndex == PORTS_WINDOW):
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PORT_NAME_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PORT_DIR_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PORT_TYPE_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PORT_WIDTH_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PORT_COUNT_COL).text())
if(self.WindowIndex == PARAMS_WINDOW):
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PARAM_NAME_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PARAM_TYPE_COL).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PARAM_MDLINIT).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PARAM_MDLSTART).text())
ValuesList.append(self.Ui.tableWidgetTable.item(CurrentRow, PARAM_MDLOUTPUTS).text())
return ValuesList
def ShowInputDialog(self):
InputDialog = QtGui.QDialog()
if(self.WindowIndex == PORTS_WINDOW):
self.InputUi = Ui_PortDialog()
self.InputUi.setupUi(InputDialog)
self.InputUi.comboBoxDirection.addItems(PortDirectonList)
self.InputUi.comboBoxType.addItems(PortTypeList)
if (self.TableAction == ROW_EDIT):
CurrentRow = self.Ui.tableWidgetTable.currentRow()
ValuesList = self.ReadTableRow(CurrentRow)
self.InputUi.lineEditPortName.setText(ValuesList[PORT_NAME_COL])
self.InputUi.comboBoxDirection.setCurrentIndex(PortDirectonList.index(ValuesList[PORT_DIR_COL]))
self.InputUi.comboBoxType.setCurrentIndex(PortTypeList.index(ValuesList[PORT_TYPE_COL]))
self.InputUi.spinBoxWidth.setValue(int(ValuesList[PORT_WIDTH_COL]))
self.InputUi.spinBoxCount.setValue(int(ValuesList[PORT_COUNT_COL]))
elif (self.WindowIndex == PARAMS_WINDOW):
self.InputUi = Ui_ParamDialog()
self.InputUi.setupUi(InputDialog)
self.InputUi.comboBoxType.addItems(ParamTypeList)
if (self.Tab
|
leAction == ROW_EDIT):
CurrentRow = self.Ui.tableWidgetTable.currentRow()
ValuesList = self.ReadTableRow(CurrentRow)
self.InputUi.lineEditParamName.setText(ValuesList[PARAM_NAME_COL])
self.InputUi.comboBoxType.setCurrentIndex(Para
|
mTypeList.index(ValuesList[PARAM_TYPE_COL]))
if (ValuesList[PARAM_MDLINIT] == "Yes"):
self.InputUi.checkBoxmdlInitializeSizes.setCheckState(QtCore.Qt.Checked)
else:
self.InputUi.checkBoxmdlInitializeSizes.setCheckState(QtCore.Qt.Unchecked)
if (ValuesList[PARAM_MDLSTART] == "Yes"):
self.InputUi.checkBoxmdlStart.setCheckState(QtCore.Qt.Checked)
else:
self.InputUi.checkBoxmdlStart.setCheckState(QtCore.Qt.Unchecked)
if (ValuesList[PARAM_MDLOUTPUTS] == "Yes"):
self.InputUi.checkBoxmdlOutputs.setCheckState(QtCore.Qt.Checked)
else:
self.InputUi.checkBoxmdlOutputs.setCheckState(QtCore.Qt.Unchecked)
QtCore.QObject.connect(self.InputUi.buttonBox, QtCore.SIGNAL('accepted()'), self.ItemAdded)
InputDialog.exec_()
def TableWindowAddItem(self):
self.TableAction = ROW_ADD
self.ShowInputDialog()
def TableWindowEditItem(self):
if (self.Ui.tableWidgetTable.currentRow() == -1):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Select a row to edit", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return
self.TableAction = ROW_EDIT
self.ShowInputDialog()
def TableWindowRemoveItem(self):
if (self.Ui.tableWidgetTable.currentRow() == -1):
QtGui.QMessageBox.critical(self.MainWindow, "Error", "Select a row to remove", QtGui.QMessageBox.Ok, QtGui.QMessageBox.Ok)
return
CurrentRow = self.Ui.tableWidgetTable.currentRow()
self.Ui.tableWidgetTable.removeRow(CurrentRow)
self.Ui.tableWidgetTable.setCurrentCell(Cur
|
ferrine/gelato
|
gelato/tests/conftest.py
|
Python
|
mit
| 130
| 0
|
import numpy as
|
np
import gelato
import pytest
@pytest.fixture()
def seeded():
gelato.set_tt_rng(42)
np.random.seed
|
(42)
|
frontendphil/analyzr
|
parsr/checkers.py
|
Python
|
mit
| 15,240
| 0.001247
|
import subprocess
import json
import math
import re
import lizard
from jinja2 import Environment, FileSystemLoader
from xml.dom import minidom
from decimal import Decimal
from analyzr.settings import CONFIG_PATH, PROJECT_PATH, LAMBDA
XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])'
RE_XML_ILLEGAL = XML_ILLEGAL % (
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff),
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff),
unichr(0xd800),
unichr(0xdbff),
unichr(0xdc00),
unichr(0xdfff)
)
class CheckerException(Exception):
def __init__(self, checker, cmd, stdout="", stderr=""):
self.checker = checker
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
super(CheckerException, self).__init__()
def __str__(self):
value = "STDOUT:\n%s\n\nSTDERR:\n%s" % (self.stdout, self.stderr)
return "%s raised an error while running command:\n\n%s\n\n%s" % (
self.checker,
" ".join(self.cmd),
value
)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__unicode__()
class Checker(object):
def __init__(self, config_path, result_path):
self.measures = {}
self.env = Environment(loader=FileSystemLoader(CONFIG_PATH))
self.config_path = config_path
self.result_path = result_path
self.files = []
def __str__(self):
return self.__unicode__()
def includes(self, filename):
for f in self.files:
if f.endswith(filename):
return True
return False
def get_decimal(self, value):
return Decimal("%s" % round(float(value), 2))
def execute(self, cmd):
# close_fds must be true as python would otherwise reuse created
# file handles. this would cause a serious memory leak.
# btw: the file handles are craeted because we pipe stdout and
# stderr to them.
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
raise CheckerException(self, cmd, stdout=stdout, stderr=stderr)
return stdout
def stub(self):
return {
"cyclomatic_complexity": 0,
"halstead_volume": 0,
"halstead_difficulty": 0,
"fan_in": 0,
"fan_out": 0,
"sloc_absolute": 0,
"sloc": 0
}
def set(self, filename, key, value):
if not filename in self.measures:
self.measures[filename] = self.stub()
self.measures[filename][key] = self.get_decimal(value)
def get_value_in_range(self, value, low, high):
high = high * 1.0
low = low * 1.0
if value <= low:
return 3.0
if value >= high:
return 0.0
return 3.0 - 3.0 * (value / high)
def squale(self, marks):
sum_marks = math.fsum([math.pow(LAMBDA, -1.0 * mark) for mark in marks])
return -1.0 * math.log(sum_marks / (1.0 * len(marks)), LAMBDA)
def get_hv_mark(self, value):
return self.get_value_in_range(value, 20, 1000)
def get_hd_mark(self, value):
return self.get_value_in_range(value, 10, 50)
def get_cc_mark(self, value):
if value <= 2:
return 3.0
if value >= 20:
return 0.0
return math.pow(2, (7 - value) / 3.5)
def get_sloc_mark(self, value):
if value <= 37:
return 3.0
if value >= 162:
return 0.0
return math.pow(2, (70 - value) / 21.0)
def get_fan_in_mark(self, value):
if value <= 19:
return 3.0
if value >= 60:
return 0.0
return math.pow(2, (30 - value) / 7.0)
def get_fan_out_mark(self, value):
if value <= 6:
return 3.0
if value >= 19:
return 0.0
return math.pow(2, (10 - value) / 2.0)
def configure(self, files, revision, connector):
raise NotImplementedError
def run(self):
raise NotImplementedError
def parse(self, connector):
raise NotImplementedError
class JHawk(Checker):
# determines how many files are analyzed at once
# this is important as for revisions with a lot of files the
# generated report might not fit into main memory or can't
# be parsed.
FILE_BATCH_SIZE = 50
def __init__(self, config_path, result_path):
super(JHawk, self).__init__(config_path, result_path)
self.name = "jhawk"
self.files = []
self.configurations = []
self.results = []
def config_file(self, revision, part):
return "%s/%s_%d.xml" % (self.config_path, revision.identifier, part)
def result_file(self, revision, part):
return "%s/%s_%d" % (self.result_path, revision.identifier, part)
def configure(self, files, revision, connector):
for f in files:
self.files.append(f.full_path())
self.measures = {}
self.configurations = []
self.results = []
|
template
|
= self.env.get_template("%s.xml" % self.name)
file_count = len(files)
chunks = int(math.ceil(file_count / self.FILE_BATCH_SIZE))
if not file_count % self.FILE_BATCH_SIZE == 0:
chunks = chunks + 1
for i in range(chunks):
start = i * self.FILE_BATCH_SIZE
end = min((i + 1) * self.FILE_BATCH_SIZE, file_count)
chunk = files[start:end]
filename = self.config_file(revision, i)
result_file = self.result_file(revision, i)
options = {
"checker": self.name,
"project_path": PROJECT_PATH,
"base_path": connector.get_repo_path(),
"target": result_file,
"filepattern": "|".join([".*/%s" % f.name for f in chunk])
}
with open(filename, "wb") as f:
f.write(template.render(options))
self.configurations.append(filename)
self.results.append(result_file)
self.revision = revision
def run(self):
for configuration in self.configurations:
cmd = [
"ant",
"-lib", "%s/lib/%s/JHawkCommandLine.jar" % (PROJECT_PATH, self.name),
"-f", configuration
]
self.execute(cmd)
# Don't allow multiple runs with the same configuration
self.configurations = []
return True
def get_metrics(self, parent):
for node in parent.childNodes:
if node.localName == "Metrics":
return node
def get_node_value(self, parent, node_name):
for node in parent.childNodes:
if node.localName == node_name:
return node.firstChild.nodeValue
def get_number(self, parent, node_name):
return float(self.get_node_value(parent, node_name))
def get_name(self, parent):
return self.get_node_value(parent, "Name")
def get_sloc_squale(self, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
marks.append(self.get_sloc_mark(self.get_number(metrics, "loc")))
return self.squale(marks)
def get_hv_squale(self, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
marks.append(self.get_hv_mark(self.get_number(metrics, "halsteadVolume")))
return self.squale(marks)
def add_halstead_metrics(self, filename, methods):
marks = []
for method in methods:
metrics = self.get_metrics(method)
volume = self.get_number(metrics, "halsteadVolume")
effort = self.get_number(metrics, "halsteadEffort")
difficulty = effort / volume
marks.append(self.get_
|
ryepdx/scale_proxy_server
|
header_decorators.py
|
Python
|
agpl-3.0
| 376
| 0.005319
|
def json_headers(f):
def wrappe
|
d(*args, **kwargs):
resp = f(*args, **kwargs)
resp.headers['Content-Type'] = 'application/json'
return resp
return wrapped
def max_age_headers(f):
def wrapped(*args, **kwargs):
resp = f(*args, **kwargs)
resp.headers['Access-Co
|
ntrol-Max-Age'] = 9999999
return resp
return wrapped
|
pombredanne/django-custard
|
custard/tests/test.py
|
Python
|
mit
| 17,657
| 0.003908
|
from __future__ import unicode_literals
from datetime import date, time, datetime
import django
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, Client
from django.test.client import RequestFactory
from django.test.utils import override_settings
from custard.conf import (CUSTOM_TYPE_TEXT, CUSTOM_TYPE_INTEGER,
CUSTOM_TYPE_BOOLEAN, CUSTOM_TYPE_FLOAT,
CUSTOM_TYPE_DATE, CUSTOM_TYPE_DATETIME,
CUSTOM_TYPE_TIME, settings)
from custard.builder import CustomFieldsBuilder
from custard.utils import import_class
from .models import (SimpleModelWithManager, SimpleModelWithoutManager,
CustomFieldsModel, CustomValuesModel, builder,
SimpleModelUnique, CustomFieldsUniqueModel, CustomValuesUniqueModel, builder_unique)
#==============================================================================
class SimpleModelWithManagerForm(builder.create_modelform()):
class Meta:
model = SimpleModelWithManager
fields = '__all__'
#class ExampleAdmin(admin.ModelAdmin):
# form = ExampleForm
# search_fields = ('name',)
#
# def get_search_results(self, request, queryset, search_term):
# queryset, use_distinct = super(ExampleAdmin, self).get_search_results(request, queryset, search_term)
# queryset |= self.model.objects.search(search_term)
# return queryset, use_distinct
#
# admin.site.register(Example, ExampleAdmin)
#==============================================================================
class CustomModelsTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.simple_with_manager_ct = ContentType.objects.get_for_model(SimpleModelWithManager)
self.simple_without_manager_ct = ContentType.objects.get_for_model(SimpleModelWithoutManager)
self.simple_unique = ContentType.objects.get_for_model(SimpleModelUnique)
self.cf = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='text_field',
label="Text field",
data_type=CUSTOM_TYPE_TEXT)
self.cf.save()
self.cf2 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='another_text_field',
label="Text field 2",
data_type=CUSTOM_TYPE_TEXT,
required=True,
searchable=False)
self.cf2.clean()
self.cf2.save()
self.cf3 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='int_field', label="Integer field",
data_type=CUSTOM_TYPE_INTEGER)
self.cf3.save()
self.cf4 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='boolean_field', label="Boolean field",
data_type=CUSTOM_TYPE_BOOLEAN)
self.cf4.save()
self.cf5 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='float_field', label="Float field",
data_type=CUSTOM_TYPE_FLOAT)
self.cf5.save()
self.cf6 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='date_field', label="Date field",
data_type=CUSTOM_TYPE_DATE)
self.cf6.save()
self.cf7 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='datetime_field', label="Datetime field",
data_type=CUSTOM_TYPE_DATETIME)
self.cf7.save()
self.cf8 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='time_field', label="Time field",
data_type=CUSTOM_TYPE_TIME)
self.cf8.save()
self.obj = SimpleModelWithManager.objects.create(name='old test')
self.obj.save()
def tearDown(self):
CustomFieldsModel.objects.all().delete()
def test_import_class(self):
self.assertEqual(import_class('custard.builder.CustomFieldsBuilder'), CustomFieldsBuilder)
def test_model_repr(self):
self.assertEqual(repr(self.cf), "<CustomFieldsModel: text_field>")
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="abcdefg")
val.save()
self.assertEqual(repr(val), "<CustomValuesModel: text_field: abcdefg>")
@override_settings(CUSTOM_CONTENT_TYPES=['tests.SimpleModelWithManager'])
def test_field_creation(self):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel',
'tests.CustomValuesModel',
settings.CUSTOM_CONTENT_TYPES)
class TestCustomFieldsModel(builder2.create_fields()):
class Meta:
app_label = 'tests'
self.assertQuerysetEqual(ContentType.objects.filter(builder2.content_types_query),
ContentType.objects.filter(Q(app_label__in=['tests'],
model__in=['SimpleModelWithManager'])))
def test_mixin(self):
self.assertIn(self.cf, self.obj.get_custom_fields())
self.assertIn(self.cf, SimpleModelWithManager.get_model_custom_fields())
with self.assertRaises(ObjectDoesNotExist):
self.obj.get_custom_value(self.cf2)
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="123456")
val.save()
self.assertEqual("123456", self.obj.get_custom_value(self.cf).value)
self.obj.set_custom_value(self.cf, "abcdefg")
self.assertEqual("abcdefg", self.obj.get_custom_value(self.cf).value)
val.delete()
def test_field_model_clean(self):
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='xxx',
label="Field not present anywhere",
data_type=CUSTOM_TYPE_TEXT)
cf.full_clean()
cf.save()
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='xxx',
label="Field already in custom fields",
data_type=CUSTOM_TYPE_TEXT)
with self.assertRaises(ValidationError):
|
cf.full_clean()
|
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='name',
label="Field already present in model",
data_type=CUSTOM_TYPE_INTEGER)
with self.assertRaises(ValidationError):
cf.full_clean()
def test_value_model_clean(self):
val = CustomValuesModel.objects.create(custom_field=self.cf2,
|
QuLogic/burnman
|
burnman/data/input_raw_endmember_datasets/HHPH2013data_to_burnman.py
|
Python
|
gpl-2.0
| 3,130
| 0.01246
|
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion f
|
or b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class',
|
m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
|
tencrance/cool-config
|
python_tricks/call_str_repr.py
|
Python
|
mit
| 510
| 0.018595
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/12/7 17:13
# @Author : Zhiwei Yang
# @File : call_str_repr.py.py
class print_name(object):
def __init__(self,name):
self.name = name
class print_name_pro(object):
def __int__(self,name):
self.name = name
def __str__(self):
return "%s" % self.name
if __name__ == '__main__':
a =
|
print_name("yang")
print (a) # 这样打印不好看,所以请看类B
b = print_name_pro("zhi"
|
)
print (b)
|
cidadania/e-cidadania
|
src/core/spaces/models.py
|
Python
|
apache-2.0
| 8,796
| 0.003183
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
|
datetime import datetime
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from core.spaces.file_validation import ContentTypeRestrictedFileField
from fields import StdImageField
from allowed_types import ALLOWED_CONTENT_TYPES
class Space(models.Model):
"""
Spaces mo
|
del. This model stores a "space" or "place" also known as a
participative process in reality. Every place has a minimum set of
settings for customization.
There are three main permission roles in every space: administrator
(admins), moderators (mods) and regular users (users).
"""
name = models.CharField(_('Name'), max_length=250, unique=True,
help_text=_('Max: 250 characters'))
url = models.CharField(_('URL'), max_length=100, unique=True,
validators=[RegexValidator(regex='^[a-z0-9_]+$',
message='Invalid characters in the space URL.')],
help_text=_('Valid characters are lowercase, digits and \
underscore. This will be the accesible URL'))
description = models.TextField(_('Description'),
default=_('Write here your description.'))
pub_date = models.DateTimeField(_('Date of creation'), auto_now_add=True)
author = models.ForeignKey(User, blank=True, null=True,
verbose_name=_('Space creator'), help_text=_('Select a user that \
will be marked as creator of the space'))
logo = StdImageField(upload_to='spaces/logos', size=(100, 75, False),
help_text = _('Valid extensions are jpg, jpeg, png and gif'))
banner = StdImageField(upload_to='spaces/banners', size=(500, 75, False),
help_text = _('Valid extensions are jpg, jpeg, png and gif'))
public = models.BooleanField(_('Public space'), help_text=_("This will \
make the space visible to everyone, but registration will be \
necessary to participate."))
# Modules
mod_debate = models.BooleanField(_('Debate'))
mod_proposals = models.BooleanField(_('Proposals'))
mod_news = models.BooleanField(_('News'))
mod_cal = models.BooleanField(_('Calendar'))
mod_docs = models.BooleanField(_('Documents'))
mod_voting = models.BooleanField(_('Voting'))
class Meta:
ordering = ['name']
verbose_name = _('Space')
verbose_name_plural = _('Spaces')
get_latest_by = 'pub_date'
permissions = (
('view_space', 'Can view this space.'),
('admin_space', 'Can administrate this space.'),
('mod_space', 'Can moderate this space.')
)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('space-index', (), {
'space_url': self.url})
class Entity(models.Model):
"""
This model stores the name of the entities responsible for the creation
of the space or supporting it.
"""
name = models.CharField(_('Name'), max_length=100, unique=True)
website = models.CharField(_('Website'), max_length=100, null=True,
blank=True)
logo = models.ImageField(upload_to='spaces/logos', verbose_name=_('Logo'),
blank=True, null=True)
space = models.ForeignKey(Space, blank=True, null=True)
class Meta:
ordering = ['name']
verbose_name = _('Entity')
verbose_name_plural = _('Entities')
def __unicode__(self):
return self.name
class Document(models.Model):
"""
This models stores documents for the space, like a document repository,
There is no restriction in what a user can upload to the space.
:methods: get_file_ext, get_file_size
"""
title = models.CharField(_('Document title'), max_length=100,
help_text=_('Max: 100 characters'))
space = models.ForeignKey(Space, blank=True, null=True,
help_text=_('Change the space to whom belongs this document'))
docfile = ContentTypeRestrictedFileField(_('File'),
upload_to='spaces/documents/%Y/%m/%d',
content_types=ALLOWED_CONTENT_TYPES,
max_upload_size=26214400,
help_text=_('Permitted file types: DOC, DOCX, PPT, ODT, ODF, ODP, \
PDF, RST, TXT.'))
pub_date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, verbose_name=_('Author'), blank=True,
null=True, help_text=_('Change the user that will figure as the \
author'))
def get_file_ext(self):
filename = self.docfile.name
extension = filename.split('.')
return extension[1].upper()
def get_file_size(self):
if self.docfile.size < 1023:
return str(self.docfile.size) + " Bytes"
elif self.docfile.size >= 1024 and self.docfile.size <= 1048575:
return str(round(self.docfile.size / 1024.0, 2)) + " KB"
elif self.docfile.size >= 1048576:
return str(round(self.docfile.size / 1024000.0, 2)) + " MB"
class Meta:
ordering = ['pub_date']
verbose_name = _('Document')
verbose_name_plural = _('Documents')
get_latest_by = 'pub_date'
# There is no 'view-document' view, so I'll leave the get_absolute_url
# method without permalink. Remember that the document files are accesed
# through the url() method in templates.
def get_absolute_url(self):
return '/spaces/%s/docs/%s' % (self.space.url, self.id)
class Event(models.Model):
"""
Meeting data model. Every space (process) has N meetings. This will
keep record of the assistants, meeting name, etc.
"""
title = models.CharField(_('Event name'), max_length=250,
help_text="Max: 250 characters")
space = models.ForeignKey(Space, blank=True, null=True)
user = models.ManyToManyField(User, verbose_name=_('Users'),
help_text=_('List of the users that will assist or assisted to the \
event.'))
pub_date = models.DateTimeField(auto_now_add=True)
event_author = models.ForeignKey(User, verbose_name=_('Created by'),
blank=True, null=True, related_name='meeting_author',
help_text=_('Select the user that will be designated as author.'))
event_date = models.DateTimeField(verbose_name=_('Event date'),
help_text=_('Select the date where the event is celebrated.'))
description = models.TextField(_('Description'), blank=True, null=True)
location = models.TextField(_('Location'), blank=True, null=True)
latitude = models.DecimalField(_('Latitude'), blank=True, null=True,
max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))
longitude = models.DecimalField(_('Longitude'), blank=True, null=True,
max_digits=17, decimal_places=15, help_text=_('Specify it in decimal'))
def is_due(self):
if self.event_date < datetime.now():
return True
else:
return False
class Meta:
ordering = ['event_date']
verbose_name = _('Event')
verbose_name_plural = _('Events')
get_latest_by = 'event_date'
permissions = (
('view_event', 'Can view this event'),
('admin_event', 'Can administrate this event'),
('mod_event', 'Can moderate this event'),
)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('view-event', (), {
|
yosshy/osclient2
|
osclient2/cinder/v2/volume_backup.py
|
Python
|
apache-2.0
| 4,420
| 0
|
# Copyright 2014-2017 by Akira Yoshiyama <akirayoshiyama@gmail.com>.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource class and its manager for volume backup on Block Storage V2 API
"""
from osclient2 import base
from osclient2 import mapper
from . import volume_transfer
ATTRIBUTE_MAPPING = [
('id', 'id', mapper.Noop),
('name', 'display_name', mapper.Noop),
('description', 'description', mapper.Noop),
('availability_zone', 'availability_zone',
mapper.Resource('availability_zone')),
('source_volume', 'volume_id', mapper.Resource('cinder.volume')),
('size', 'size', mapper.Noop),
('object_count', 'object_count', mapper.Noop),
('container', 'container', mapper.Noop),
('created_at', 'created_at', mapper.DateTime),
('updated_at', 'updated_at', mapper.DateTime),
('status', 'status', mapper.Noop),
('fail_reason', 'fail_reason', mapper.Noop),
('has_dependent_backups', 'has_dependent_backups', mapper.Noop),
('is_incremental', 'incremental', mapper.Noop),
('is_incremental', 'is_incremental', mapper.Noop),
]
class Resource(base.Resource):
"""resource class for volume backups on Block Storage V2 API"""
_stable_state = ['available', 'error', 'error_deleting']
def restore(self, volume=None, transfer=None):
"""
Restore a volume from a volume backup
@keyword volume: Destination volume
@type volume: osclient2.cinder.v2.volume.Resource
@keyword transfer: Volume transfer
@type transfer: osclient2.cinder.v2.volume_transfer.Resource
@rtype: None
"""
transfer_name = None
if isinstance(transfer, volume_transfer.Resource):
transfer_name = transfer.name
self._http.post(self._url_resource_path, self._id, 'restore',
data=utils.get_json_body("restore",
|
volume=volume.get_id(),
name=transfer_name))
def delete(self, force=False):
"""
Delete a volume backup
@keyword force: Whether the deletion is forced
@type force: bool
@rtype: None
"""
if not force:
super(Resource. self).delete()
return
self.
|
_http.post(self._url_resource_path, self._id, 'action',
data=utils.get_json_body("os-force_delete"))
class Manager(base.Manager):
"""manager class for volume backups on Block Storage V2 API"""
resource_class = Resource
service_type = 'volume'
_attr_mapping = ATTRIBUTE_MAPPING
_json_resource_key = 'backup'
_json_resources_key = 'backups'
_hidden_methods = ["update"]
_url_resource_list_path = '/backups/detail'
_url_resource_path = '/backups'
def create(self, name=None, description=None, source_volume=None,
container=None, is_incremental=False):
"""
Create a backup of a volume
@keyword name: Snapshot name
@type name: str
@keyword description: Description
@type description: str
@keyword source_volume: Source volume
@type source_volume: osclient2.cinder.v2.volume.Resource
@keyword container: Container for store a volume backup
@type source_volume: str
@keyword is_incremental: Whether the backup is incremental
@type is_incremental: bool
@return: Created volume object
@rtype: osclient2.cinder.v2.snapshot.Resource
"""
return super(Manager, self).create(name=name,
description=description,
source_volume=source_volume,
container=container,
is_incremental=is_incremental)
|
JaDogg/__py_playground
|
reference/peglet/examples/json.py
|
Python
|
mit
| 2,243
| 0.012929
|
"""
Example: parse JSON.
"""
from peglet import Parser, hug, join, attempt
literals = dict(true=True,
false=False,
null=None)
mk_literal = literals.get
mk_object = lambda *pairs: dict(pairs)
escape = lambda s: s.decode('unicode-escape')
mk_number = float
# Following http://www.json.org/
json_parse = Parser(r"""
start = _ value
object = { _ members } _ mk_object
| { _ } _ mk_object
members = pair , _ members
| pair
pair = string : _ value hug
array = \[ _ elements \] _ hug
| \[ _ \] _ hug
elements = value , _ elements
| value
value = string | number
| object | array
| (true|false|null)\b _ mk_literal
string = " chars " _ join
chars = char chars
|
char = ([^\x00-\x1f"\\])
| \\(["/\\])
| (\\[bfnrt]) escape
| (\\u) xd xd xd xd join escape
xd = ([0-9a-fA-F])
number = int frac exp _ join mk_number
| int frac _ join mk_number
| int exp _ join mk_number
| int _ join mk_number
int = (-?) (0) !\d
| (-?) ([1-9]\d*)
frac = ([.]\d+)
exp = ([eE][+-]?\d+)
_ = \s*
""", **globals())
# XXX The spec says "whitespace may be inserted between any pair of
# tokens, but leaves open just what's a token. So is the '-' in '-1' a
# token? Should I allow whitespace there?
## json_parse('[1,1]')
#. ((1.0, 1.0),)
## json_parse('true')
#. (True,)
## json_parse(r'"hey \b\n \u01ab o hai"')
#. (u'hey \x08\n \u01ab o hai',)
## json_parse('{"hey": true}')
#. ({'hey': True},)
## json_parse('[{"hey": true}]')
#. (({'hey': True},),)
## json_parse('[{"hey": true}, [-12.34]]')
#. (({'hey': True}, (-12.34,)),)
## json_pars
|
e('0')
#. (0.0,)
## json_parse('0.125e-2')
#. (0.00125,)
## attempt(json_parse, '0377')
## attempt(json_parse, '{"hi"]')
# Udacity CS212 problem 3.1:
## json_parse('["testing", 1, 2, 3]')
#. (('testing', 1.0, 2.0, 3.0),)
## json_parse('-123.456e+789')
#. (-inf,)
## json_parse('{"age": 21, "state":"CO","occupation":"rides the rodeo"}')
#. ({'age':
|
21.0, 'state': 'CO', 'occupation': 'rides the rodeo'},)
|
bbsan2k/nzbToMedia
|
core/transmissionrpc/torrent.py
|
Python
|
gpl-3.0
| 16,349
| 0.001529
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license.
import sys
import datetime
from core.transmissionrpc.constants import PRIORITY, RATIO_LIMIT, IDLE_LIMIT
from core.transmissionrpc.utils import Field, format_timedelta
from six import integer_types, string_types, text_type, iteritems
def get_status_old(code):
"""Get the torrent status using old status codes"""
mapping = {
(1 << 0): 'check pending',
(1 << 1): 'checking',
(1 << 2): 'downloading',
(1 << 3): 'seeding',
(1 << 4): 'stopped',
}
return mapping[code]
def get_status_new(code):
"""Get the torrent status using new status codes"""
mapping = {
0: 'stopped',
1: 'check pending',
2: 'checking',
3: 'download pending',
4: 'downloading',
5: 'seed
|
pending',
6: 'seeding',
}
return mapping[code]
class Torrent(object):
"""
Torrent is a class holding the data received from Transmission regarding a bittorrent transfer.
All fetched torrent fields are accessible through this class using attributes.
This class has a few convenience properties using the torrent data.
"""
def __init__(self, client, fields):
if 'id' not in fields:
|
raise ValueError('Torrent requires an id')
self._fields = {}
self._update_fields(fields)
self._incoming_pending = False
self._outgoing_pending = False
self._client = client
def _get_name_string(self, codec=None):
"""Get the name"""
if codec is None:
codec = sys.getdefaultencoding()
name = None
# try to find name
if 'name' in self._fields:
name = self._fields['name'].value
# if name is unicode, try to decode
if isinstance(name, text_type):
try:
name = name.encode(codec)
except UnicodeError:
name = None
return name
def __repr__(self):
tid = self._fields['id'].value
name = self._get_name_string()
if isinstance(name, str):
return '<Torrent {0:d} \"{1}\">'.format(tid, name)
else:
return '<Torrent {0:d}>'.format(tid)
def __str__(self):
name = self._get_name_string()
if isinstance(name, str):
return 'Torrent \"{0}\"'.format(name)
else:
return 'Torrent'
def __copy__(self):
return Torrent(self._client, self._fields)
def __getattr__(self, name):
try:
return self._fields[name].value
except KeyError:
raise AttributeError('No attribute {0}'.format(name))
def _rpc_version(self):
"""Get the Transmission RPC API version."""
if self._client:
return self._client.rpc_version
return 2
def _dirty_fields(self):
"""Enumerate changed fields"""
outgoing_keys = ['bandwidthPriority', 'downloadLimit', 'downloadLimited', 'peer_limit', 'queuePosition',
'seedIdleLimit', 'seedIdleMode', 'seedRatioLimit', 'seedRatioMode', 'uploadLimit',
'uploadLimited']
fields = []
for key in outgoing_keys:
if key in self._fields and self._fields[key].dirty:
fields.append(key)
return fields
def _push(self):
"""Push changed fields to the server"""
dirty = self._dirty_fields()
args = {}
for key in dirty:
args[key] = self._fields[key].value
self._fields[key] = self._fields[key]._replace(dirty=False)
if len(args) > 0:
self._client.change_torrent(self.id, **args)
def _update_fields(self, other):
"""
Update the torrent data from a Transmission JSON-RPC arguments dictionary
"""
if isinstance(other, dict):
for key, value in iteritems(other):
self._fields[key.replace('-', '_')] = Field(value, False)
elif isinstance(other, Torrent):
for key in list(other._fields.keys()):
self._fields[key] = Field(other._fields[key].value, False)
else:
raise ValueError('Cannot update with supplied data')
self._incoming_pending = False
def _status(self):
"""Get the torrent status"""
code = self._fields['status'].value
if self._rpc_version() >= 14:
return get_status_new(code)
else:
return get_status_old(code)
def files(self):
"""
Get list of files for this torrent.
This function returns a dictionary with file information for each file.
The file information is has following fields:
::
{
<file id>: {
'name': <file name>,
'size': <file size in bytes>,
'completed': <bytes completed>,
'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download>
}
...
}
"""
result = {}
if 'files' in self._fields:
files = self._fields['files'].value
indices = range(len(files))
priorities = self._fields['priorities'].value
wanted = self._fields['wanted'].value
for item in zip(indices, files, priorities, wanted):
selected = True if item[3] else False
priority = PRIORITY[item[2]]
result[item[0]] = {
'selected': selected,
'priority': priority,
'size': item[1]['length'],
'name': item[1]['name'],
'completed': item[1]['bytesCompleted']}
return result
@property
def status(self):
"""
Returns the torrent status. Is either one of 'check pending', 'checking',
'downloading', 'seeding' or 'stopped'. The first two is related to
verification.
"""
return self._status()
@property
def progress(self):
"""Get the download progress in percent."""
try:
size = self._fields['sizeWhenDone'].value
left = self._fields['leftUntilDone'].value
return 100.0 * (size - left) / float(size)
except ZeroDivisionError:
return 0.0
@property
def ratio(self):
"""Get the upload/download ratio."""
return float(self._fields['uploadRatio'].value)
@property
def eta(self):
"""Get the "eta" as datetime.timedelta."""
eta = self._fields['eta'].value
if eta >= 0:
return datetime.timedelta(seconds=eta)
else:
raise ValueError('eta not valid')
@property
def date_active(self):
"""Get the attribute "activityDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['activityDate'].value)
@property
def date_added(self):
"""Get the attribute "addedDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['addedDate'].value)
@property
def date_started(self):
"""Get the attribute "startDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['startDate'].value)
@property
def date_done(self):
"""Get the attribute "doneDate" as datetime.datetime."""
return datetime.datetime.fromtimestamp(self._fields['doneDate'].value)
def format_eta(self):
"""
Returns the attribute *eta* formatted as a string.
* If eta is -1 the result is 'not available'
* If eta is -2 the result is 'unknown'
* Otherwise eta is formatted as <days> <hours>:<minutes>:<seconds>.
"""
eta = self._fields['eta'].value
if eta == -1:
return 'not available'
elif eta == -2:
return 'unknown'
else:
return format_timedelta(self.eta)
def _get_download_li
|
charanpald/sandbox
|
sandbox/recommendation/MaxLocalAUC.py
|
Python
|
gpl-3.0
| 37,126
| 0.016134
|
import array
import itertools
import logging
import multiprocessing
import numpy
import scipy.sparse
import sharedmem
import sppy
import time
from sandbox.misc.RandomisedSVD import RandomisedSVD
from sandbox.recommendation.AbstractRecommender import AbstractRecommender
from sandbox.recommendation.IterativeSoftImpute import IterativeSoftImpute
from sandbox.recommendation.MaxAUCTanh import MaxAUCTanh
from sandbox.recommendation.MaxAUCHinge import MaxAUCHinge
from sandbox.recommendation.MaxAUCSquare import MaxAUCSquare
from sandbox.recommendation.MaxAUCLogistic import MaxAUCLogistic
from sandbox.
|
recommendation.MaxAUCSigmoid import MaxAUCSigmoid
from sandbox.recommendation.Recommender
|
Utils import computeTestMRR, computeTestF1
from sandbox.recommendation.WeightedMf import WeightedMf
from sandbox.util.MCEvaluatorCython import MCEvaluatorCython
from sandbox.util.MCEvaluator import MCEvaluator
from sandbox.util.Sampling import Sampling
from sandbox.util.SparseUtilsCython import SparseUtilsCython
from sandbox.util.SparseUtils import SparseUtils
from sklearn.grid_search import ParameterGrid
def computeObjective(args):
"""
Compute the objective for a particular parameter set. Used to set a learning rate.
"""
X, testX, maxLocalAuc = args
U, V, trainMeasures, testMeasures, iterations, totalTime = maxLocalAuc.singleLearnModel(X, verbose=True)
obj = trainMeasures[-1, 0]
logging.debug("Final objective: " + str(obj) + " with t0=" + str(maxLocalAuc.t0) + " and alpha=" + str(maxLocalAuc.alpha))
return obj
def updateUVBlock(sharedArgs, methodArgs):
"""
Compute the objective for a particular parameter set. Used to set a learning rate.
"""
rowIsFree, colIsFree, iterationsPerBlock, gradientsPerBlock, U, V, muU, muV, lock = sharedArgs
learner, rowBlockSize, colBlockSize, indPtr, colInds, permutedRowInds, permutedColInds, gi, gp, gq, normGp, normGq, pid, loopInd, omegasList = methodArgs
while (iterationsPerBlock < learner.parallelStep).any():
#Find free block
lock.acquire()
inds = numpy.argsort(numpy.ravel(iterationsPerBlock))
foundBlock = False
#Find the block with smallest number of updates which is free
for i in inds:
rowInd, colInd = numpy.unravel_index(i, iterationsPerBlock.shape)
if rowIsFree[rowInd] and colIsFree[colInd]:
rowIsFree[rowInd] = False
colIsFree[colInd] = False
foundBlock = True
break
blockRowInds = permutedRowInds[rowInd*rowBlockSize:(rowInd+1)*rowBlockSize]
blockColInds = permutedColInds[colInd*colBlockSize:(colInd+1)*colBlockSize]
ind = iterationsPerBlock[rowInd, colInd] + loopInd
sigmaU = learner.getSigma(ind, learner.alpha, muU.shape[0])
sigmaV = learner.getSigma(ind, learner.alpha, muU.shape[0])
lock.release()
#Now update U and V based on the block
if foundBlock:
ind = iterationsPerBlock[rowInd, colInd] + loopInd
sigmaU = learner.getSigma(ind, learner.alpha, muU.shape[0])
sigmaV = learner.getSigma(ind, learner.alpha, muU.shape[0])
numIterations = gradientsPerBlock[rowInd, colInd]
indPtr2, colInds2 = omegasList[colInd]
learner.updateUV(indPtr2, colInds2, U, V, muU, muV, blockRowInds, blockColInds, gp, gq, normGp, normGq, ind, sigmaU, sigmaV, numIterations)
else:
time.sleep(3)
lock.acquire()
if foundBlock:
rowIsFree[rowInd] = True
colIsFree[colInd] = True
iterationsPerBlock[rowInd, colInd] += 1
lock.release()
def restrictOmega(indPtr, colInds, colIndsSubset):
"""
Take a set of nonzero indices for a matrix and restrict the columns to colIndsSubset.
"""
m = indPtr.shape[0]-1
newIndPtr = numpy.zeros(indPtr.shape[0], indPtr.dtype)
newColInds = array.array("I")
colIndsSubset = numpy.array(colIndsSubset, numpy.int)
ptr = 0
for i in range(m):
omegai = numpy.array(colInds[indPtr[i]:indPtr[i+1]], numpy.int)
#newOmegai = numpy.intersect1d(omegai, colIndsSubset, assume_unique=True)
#This way is 60% faster
total = numpy.concatenate((omegai, colIndsSubset))
counts = numpy.bincount(total)
newOmegai = numpy.where(counts>1)[0]
newIndPtr[i] = ptr
newIndPtr[i+1] = ptr + newOmegai.shape[0]
newColInds.extend(newOmegai)
ptr += newOmegai.shape[0]
newColInds = numpy.array(newColInds, dtype=colInds.dtype)
return newIndPtr, newColInds
class MaxLocalAUC(AbstractRecommender):
def __init__(self, k, w=0.9, alpha=0.05, eps=10**-6, lmbdaU=0.1, lmbdaV=0.1, maxIterations=50, stochastic=False, numProcesses=None):
"""
Create an object for maximising the local AUC with a penalty term using the matrix
decomposition UV.T
:param k: The rank of matrices U and V
:param w: The quantile for the local AUC - e.g. 1 means takes the largest value, 0.7 means take the top 0.3
:param alpha: The (initial) learning rate
:param eps: The termination threshold for ||dU|| and ||dV||
:param lmbda: The regularistion penalty for V
:stochastic: Whether to use stochastic gradient descent or gradient descent
"""
super(MaxLocalAUC, self).__init__(numProcesses)
self.alpha = alpha #Initial learning rate
self.beta = 0.75
self.bound = False
self.delta = 0.05
self.eps = eps
self.eta = 5
self.folds = 2
self.initialAlg = "rand"
self.itemExpP = 0.0 #Sample from power law between 0 and 1
self.itemExpQ = 0.0
self.k = k
self.lmbda = (lmbdaU+lmbdaV)/2
self.maxIterations = maxIterations
self.maxNormU = 100
self.maxNormV = 100
self.maxNorms = 2.0**numpy.arange(0, 8)
self.metric = "f1"
self.normalise = True
self.numAucSamples = 10
self.numRecordAucSamples = 100
self.numRowSamples = 30
self.numRuns = 200
self.loss = "hinge"
self.p = 10
self.parallelSGD = False
self.parallelStep = 1 #Number of iterations for each parallel updateUV (smaller gives better convergence)
self.printStep = 10000
self.q = 3
self.rate = "constant"
self.recordStep = 10
self.reg = True
self.rho = 1.0
self.scaleAlpha = True
self.startAverage = 30
self.stochastic = stochastic
self.t0 = 0.1 #Convergence speed - larger means we get to 0 faster
self.validationUsers = 0.1
self.w = w
#Model selection parameters
self.ks = 2**numpy.arange(3, 8)
self.lmbdas = 2.0**-numpy.arange(1, 7)
self.rhos = numpy.array([0, 0.1, 0.5, 1.0])
self.itemExps = numpy.array([0, 0.25, 0.5, 0.75, 1.0])
#Learning rate selection
self.alphas = 2.0**-numpy.arange(1, 7)
self.t0s = 2.0**-numpy.arange(0.0, 5.0)
def __str__(self):
outputStr = "MaxLocalAUC: "
attributes = vars(self)
for key, item in sorted(attributes.iteritems()):
if isinstance(item, int) or isinstance(item, float) or isinstance(item, str) or isinstance(item, bool):
outputStr += key + "=" + str(item) + " "
return outputStr
def computeBound(self, X, U, V, trainExp, delta):
"""
Compute a lower bound on the expectation of the loss based on Rademacher
theory.
"""
m, n = X.shape
Ru = numpy.linalg.norm(U)
Rv = numpy.linalg.norm(V)
X = X.toarray()
Xs = X.sum(1)
E = (X.T / Xs).T
EBar = numpy.ones(X.shape) - X
EBar = (EBar.T / (EBar.su
|
KAMI911/lactransformer
|
lactransformer/lasdiff.py
|
Python
|
mpl-2.0
| 6,565
| 0.003656
|
try:
import traceback
import argparse
import textwrap
import glob
import os
import logging
import datetime
import multiprocessing
from libs import LasPyConverter
except ImportError as err:
print('Error {0} import module: {1}'.format(__name__, err))
traceback.print_exc()
exit(128)
script_path = __file__
header = textwrap.dedent('''LAS Diff''')
class LasPyParameters:
def __init__(self):
# predefinied paths
self.parser = argparse.ArgumentParser(prog="lasdiff",
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
epilog=textwrap.dedent('''
example:
'''))
# reguired parameters
self.parser.add_argument('-i', type=str, dest='input', required=True,
help='required: input file or folder')
self.parser.add_argument('-o', type=str, dest='output', required=True,
help='required: output file or folder (d:\lasfiles\\tests\\results)')
# optional parameters
self.parser.add_argument('-input_format', type=str, dest='input_format', required=False, choices=['las', 'laz'],
help='optional: input format (default=las, laz is not implemented (yet))')
self.parser.add_argument('-cores', type=int, dest='cores', required=False, default=1,
help='optional: cores (default=1)')
self.parser.add_argument('-v', dest='verbose', required=False,
help='optional: verbose toggle (-v=on, nothing=off)', action='store_true')
self.parser.add_argument('-version', action='version', version=self.parser.prog)
def parse(self):
self.args = self.parser.parse_args()
##defaults
if self.args.verbose:
self.args.verbose = ' -v'
else:
self.args.verbose = ''
if self.args.input_format == None:
self.args.input_format = 'las'
if self.args.cores == None:
self.args.cores = 1
# ---------PUBLIC METHODS--------------------
def get_output(self):
return self.args.output
def get_input(self):
return self.args.input
def get_input_format(self):
return self.args.input_format
def get_verbose(self):
return self.args.verbose
def get_cores(self):
return self.args.cores
def DiffLas(parameters):
# Parse incoming parameters
source_file = parameters[0]
destination_file = parameters[1]
# Get name for this process
current = multiprocessing.current_proces()
proc_name = current.name
logging.info('[%s] Starting ...' % (proc_name))
logging.info(
'[%s] Creating diff of %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
# Opening source LAS files for read and write
lasFiles = LasPyConverter.LasPyCompare(source_file, destination_file)
# Opening destination LAS file
logging.info('[%s] Opening %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
lasFiles.OpenReanOnly()
logging.info('[%s] Comparing %s LAS PointCloud file and %s LAS PointCloud file ...' % (
proc_name, source_file, destination_file))
lasFiles.ComparePointCloud()
logging.info('[%s] Closing %s LAS PointCloud.' % (proc_name, destination_file))
lasFiles.Close()
logging.info('[%s] %s LAS PointCloud has closed.' % (proc_name, destination_file))
return 0
def SetLogging(logfilename):
logging.basicConfig(
filename=logfilename,
filemode='w',
format='%(asctime)s %(name)s %(levelname)s %(message)s', datefmt='%d-%m-%Y %H:%M:%S',
level=logging.DEBUG)
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%d-%m-%Y %H:%M:%S')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def main():
logfilename = 'lasdiff_' + datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '.log'
SetLogging(logfilename)
logging.info(header)
lasconverterworkflow = LasPyParameters()
lasconverterworkflow.parse()
# File/Directory handler
inputfiles = lasconverterworkflow.get_input()
inputformat = lasconverterworkflow.get_input_format()
outputfiles = lasconverterworkflow.get_output()
outputpath = os.path.normpath(outputfiles)
cores = lasconverterworkflow.get_cores()
inputisdir = False
doing = []
if os.path.isdir(inputfiles):
inputisdir = True
inputfiles = glob.glob(os.path.join(inputfiles, '*' + inputformat))
if not os.path.exists(outputfiles):
os.makedirs(outputfiles)
for workfile in inputfiles:
if os.path.isfile(workfile) and os.path.isfile(os.path.join(outputpath, os.path.basename(workfile))):
logging.info('Adding %s to the queue.' % (workfile))
doing.append([workfile,
|
os.path.join(outputpath, os.path.basename(workfile))])
|
else:
logging.info('The %s is not file, or pair of comparable files. Skipping.' % (workfile))
elif os.path.isfile(inputfiles):
inputisdir = False
workfile = inputfiles
if os.path.basename(outputfiles) is not "":
doing.append([workfile, outputfiles])
else:
doing.append([workfile, os.path.join(outputpath, os.path.basename(workfile))])
logging.info('Adding %s to the queue.' % (workfile))
else:
# Not a file, not a dir
logging.error('Cannot found input LAS PointCloud file: %s' % (inputfiles))
exit(1)
# If we got one file, start only one process
if inputisdir is False:
cores = 1
if cores != 1:
pool = multiprocessing.Pool(processes=cores)
results = pool.map_async(DiffLas, doing)
pool.close()
pool.join()
else:
for d in doing:
DiffLas(d)
logging.info('Finished, exiting and go home ...')
if __name__ == '__main__':
main()
|
faneshion/MatchZoo
|
matchzoo/layers/matching_layer.py
|
Python
|
apache-2.0
| 5,753
| 0
|
"""An implementation of Matching Layer."""
import typing
import tensorflow as tf
from keras.engine import Layer
class MatchingLayer(Layer):
"""
Layer that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to True, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
:param kwargs: Standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.layers.MatchingLayer(matching_type='dot',
... normalize=True)
>>> num_batch, left_len, right_len, num_dim = 5, 3, 2, 10
>>> layer.build([[num_batch, left_len, num_dim],
... [num_batch, right_len, num_dim]])
"""
def __init__(self, normalize: bool = False,
matching_type: str = 'dot', **kwargs):
""":class:`MatchingLayer` constructor."""
super().__init__(**kwargs)
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
self._shape1 = None
self._shape2 = None
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def build(self, input_shape: list):
"""
Build the layer.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
# Used purely for shape validation.
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
self._shape1 = input_shape[0]
self._shape2 = input_shape[1]
for idx in 0, 2:
if self._shape1[idx] != self._shape2[idx]:
raise ValueError(
'Incompatible dimensions: '
f'{self._shape1[idx]} != {self._shape2[idx]}.'
f'Layer shapes: {self._shape1}, {self._shape2}.'
)
def call(self, inputs: list, **kwargs) -> typing.Any:
"""
The computation logic of MatchingLayer.
:param inputs: two input tensors.
"""
x1 = inputs[0]
x2 = inputs[1]
if self._matching_type == 'dot':
if self._normalize:
x1 = tf.math.l2_normalize(x1, axis=2)
x2 = tf.math.l2_normalize(x2, axis=2)
return tf.expand_dims(tf.einsum('abd,acd->abc', x1, x2), 3)
else:
if self._matching_type == 'mul':
def func(x, y):
return x * y
elif self._matching_type == 'plus':
def func(x, y):
return x + y
elif self._matching_type == 'minus':
def func(x, y):
return x - y
elif self._matching_type == 'concat':
def func(x, y):
return tf.concat([x, y], axis=3)
else:
raise ValueError(f"Invalid matching type."
f"{self._matching_type} received."
f"Mut be in `dot`, `mul`, `plus`, "
f"`minus` and `concat`.")
x1_exp = tf.stack([x1] * self._shape2[1], 2)
x2_exp = tf.stack([x2] * self._shape1[1], 1)
return func(x1_exp, x2_exp)
def compute_output_shape(self, input_shape: list) -> tuple:
"""
Calculate the layer output shape.
:param input_shape: the shapes of the input tensors,
for MatchingLayer we need tow input tensors.
"""
if not isinstance(input_shape, list) or len(input_shape) != 2:
raise ValueError('A `MatchingLayer` layer should be called '
'on a list of 2 inputs.')
shape1 = list(input_shape[0])
shape2 = list(input_shape[1])
if len(shape1) != 3 or len(shape2) != 3:
raise ValueError('A `MatchingLayer` layer should be called '
'on 2 inputs with 3 dimensions.')
if shape1[0] != shape2[0] or shape1[2] != shape2[2]:
raise ValueError('A `MatchingLayer` la
|
yer should be called '
'on 2 inputs with same 0,2 dimensions.')
if self._matching_type in ['mul', 'plus', 'minus']:
return shape1[0], shape1[1], shape2[1], shape1[2]
elif self._matching_type == 'dot':
return shape1[0], shape1[1], shape2[1], 1
elif self._matching_type == 'concat':
|
return shape1[0], shape1[1], shape2[1], shape1[2] + shape2[2]
else:
raise ValueError(f"Invalid `matching_type`."
f"{self._matching_type} received."
f"Must be in `mul`, `plus`, `minus` "
f"`dot` and `concat`.")
def get_config(self) -> dict:
"""Get the config dict of MatchingLayer."""
config = {
'normalize': self._normalize,
'matching_type': self._matching_type,
}
base_config = super(MatchingLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
AZQ1994/s4compiler
|
sr_mult.py
|
Python
|
gpl-3.0
| 5,281
| 0.065897
|
from list_node import ListNode
from instruction import Instruction, Subneg4Instruction
def sr_mult(WM, LM):
namespace_bak = WM.getNamespace(string=False)
WM.setNamespace(["sr","mult"])
c_0 = WM.const(0)
c_1 = WM.const(1)
c_m1 = WM.const(-1)
c_32 = WM.const(32)
a = WM.addDataWord(0, "arg1")
b = WM.addDataWord(0, "arg2")
ret_addr = WM.addDataPtrWord(0, "ret_addr")
temp = WM.addDataWord(0, "temp")
count = WM.addDataWord(0, "count")
hi = WM.addDataWord(0, "hi")
lo = WM.addDataWord(0, "lo")
sign = WM.addDataWord(0, "sign")
NEXT = WM.getNext()
#HALT = WM.getHalt()
LNs = (
LM.new(ListNode("sr_mult", sys = True)),
LM.new(ListNode(Subneg4Instruction(
c_1.getPtr(),
a.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L010")
)), "sr_mult_start"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L020")
))),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_0.getPtr(),
a.getPtr(),
NEXT
)),"sr_mult_L010"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_1.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_1.getPtr(),
b.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L030")
)),"sr_mult_L020"),
LM.new(ListNode(Subneg4Instruction(
c_32.getPtr(),
c_0.getPtr(),
count.getPtr(),
WM.label("sr_mult_L050")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_0.getPtr(),
b.getPtr(),
NEXT
)),"sr_mult_L030"),
LM.new(ListNode(Subneg4Instruction(
sign.getPtr(),
c_m1.getPtr(),
sign.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_32.getPtr(),
c_0.getPtr(),
count.getPtr(),
NEXT
)),"sr_mult_L040"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
hi.getPtr(),
NEXT
)),"sr_mult_L050"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_0.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
hi.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L100"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L110")
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L110"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
lo.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L200"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
lo.getPtr(),
lo.getPtr(),
WM.label("sr_mult_L300")
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
b.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L500")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
lo.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L500")
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
)),"sr_mult_L300"),
LM.new(ListNode(Subneg4Instruction(
b.getPtr(),
lo.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L800")
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
hi.getPtr(),
hi.getPtr(),
NEXT
)),"sr_mult_L500"),
LM.new(ListNode(Subneg4Instruction(
a.getPtr(),
c_0.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L800"),
LM.new(ListNode(Subneg4Instruction(
temp.getPtr(),
a.getPtr(),
a.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_m1.getPtr(),
count.getPtr(),
count.getPtr(),
WM.label("sr_mult_L100")
))),
LM.new(ListNode(Subneg4Instruction(
sign.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
WM.label("sr_mult_L990")
)),"sr_mult_L900"),
LM.new(ListNode(Subneg4Instruction(
lo.getPtr(),
c_0.getPtr(),
lo.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
hi.getPtr(),
c_0.getPtr(),
hi.getPtr(),
NEXT
))),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
hi.getPtr(),
temp.getPtr(),
NEXT
)),"sr_mult_L990"),
LM.new(ListNode(Subneg4Instruction(
c_0.getPtr(),
lo.getPtr(),
temp.getPtr(),
NEXT
))),
LM.new(
|
ListNode(Subneg4Instruction(
|
c_0.getPtr(),
c_m1.getPtr(),
temp.getPtr(),
ret_addr
)))
)
WM.setNamespace(namespace_bak)
return LNs, a, b, ret_addr, lo
|
openstack/horizon
|
openstack_dashboard/dashboards/project/networks/ports/urls.py
|
Python
|
apache-2.0
| 1,111
| 0
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
|
obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing perm
|
issions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.networks.ports import views
from openstack_dashboard.dashboards.project.networks.ports.extensions. \
allowed_address_pairs import views as addr_pairs_views
PORTS = r'^(?P<port_id>[^/]+)/%s$'
urlpatterns = [
url(PORTS % 'detail', views.DetailView.as_view(), name='detail'),
url(PORTS % 'addallowedaddresspairs',
addr_pairs_views.AddAllowedAddressPair.as_view(),
name='addallowedaddresspairs')
]
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/models/_models.py
|
Python
|
mit
| 674,347
| 0.003676
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AddressSpace(msrest.serialization.Model):
"""AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param address_prefixes: A list of address blocks reserved for this virtual network in CIDR
notation.
:type address_prefixes: list[str]
"""
_attribute_map = {
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AddressSpace, self).__init__(**kwargs)
self.address_prefixes = kwargs.get('address_prefixes', None)
class Resource(msrest.serialization.Model):
"""Common resource representation.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ApplicationGateway(Resource):
"""Application gateway resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param etag: A unique read-only string that changes whenever the resource is updated.
:type etag: str
:param zones: A list of availability zones denoting where the resource needs to come from.
:type zones: list[str]
:param identity: The identity of the application gateway, if configured.
:type identity: ~azure.mgmt.network.v2019_04_01.models.ManagedServiceIdentity
:param sku: SKU of the application gateway resource.
:type sku: ~azure.mgmt.network.v2019_04_01.models.ApplicationGatewaySku
:param ssl_policy: SSL policy of the application gateway resource.
:type ssl_policy: ~azure.mgmt.network.v2019_04_01.models.ApplicationGatewaySslPolicy
:ivar operational_state: Operational state of the application gateway resource. Possible values
include: "Stopped", "Starting", "Running", "Stopping".
:vartype operational_state: str or
~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayOperationalState
:param gateway_ip_configurations: Subnets of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type gateway_ip_configurations:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayIPConfiguration]
:param authentication_certificates: Authentication certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type authentication_certificates:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayAuthenti
|
cationCertificate]
:param trusted_root_certificates: Trusted Root certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limit
|
s#application-gateway-limits>`_.
:type trusted_root_certificates:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayTrustedRootCertificate]
:param ssl_certificates: SSL certificates of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type ssl_certificates:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewaySslCertificate]
:param frontend_ip_configurations: Frontend IP addresses of the application gateway resource.
For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayFrontendIPConfiguration]
:param frontend_ports: Frontend ports of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ports:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayFrontendPort]
:param probes: Probes of the application gateway resource.
:type probes: list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayProbe]
:param backend_address_pools: Backend address pool of the application gateway resource. For
default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_address_pools:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayBackendAddressPool]
:param backend_http_settings_collection: Backend http settings of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayBackendHttpSettings]
:param http_listeners: Http listeners of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type http_listeners:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayHttpListener]
:param url_path_maps: URL path map of the application gateway resource. For default limits, see
`Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type url_path_maps: list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayUrlPathMap]
:param request_routing_rules: Request routing rules of the application gateway resource.
:type request_routing_rules:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayRequestRoutingRule]
:param rewrite_rule_sets: Rewrite rules for the application gateway resource.
:type rewrite_rule_sets:
list[~azure.mgmt.network.v2019_04_01.models.ApplicationGatewayRewriteRuleSet]
:param redirect_configurations: Redirect configurations of the application gateway resource.
For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azu
|
ohaz/Colours
|
main.py
|
Python
|
apache-2.0
| 1,269
| 0.002364
|
# -*- coding: utf-8 -*-
# Main kivy import
import kivy
# Additional kivy imports
from kivy.app import App
from kivy.config import Config
# Screens
from screens import screenmanager
from screens.mainscreen import MainScreen
from screens.ingamescreen import IngameScreen
# Cause program to end if the required kivy version is not installed
kivy.require('1.8.0')
__author__ = 'ohaz'
# ---------------
# Config settings
#
|
---------------
# Multitouch emulation creates red dots on the screen. We don't need multitouch, so we disable it
Config.set('input', 'mouse', 'mouse,disable_multitouch')
# ---------------------
# Local initialisations
# ---------------------
# Initialise the screen manager (screens/screenmanager.py)
screenmanager.init()
# Add the two screens to it
screenmanager.set_screens([MainScreen(name='main_menu'), IngameScre
|
en(name='ingame')])
# Start with the main menu screen
screenmanager.change_to('main_menu')
class ColoursApp(App):
"""
The main Class.
Only needed for the build function.
"""
def build(self):
"""
Method to build the app.
:return: the screenmanager
"""
return screenmanager.get_sm()
# Create the app and run it
if __name__ == '__main__':
ColoursApp().run()
|
nachandr/cfme_tests
|
cfme/tests/containers/test_containers_smartstate_analysis.py
|
Python
|
gpl-2.0
| 5,838
| 0.002398
|
import random
from collections import namedtuple
import dateparser
import pytest
from cfme import test_requirements
from cfme.containers.image import Image
from cfme.containers.provider import ContainersProvider
from cfme.containers.provider import ContainersTestItem
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.meta(server_roles='+smartproxy'),
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1),
pytest.mark.provider([ContainersProvider], scope='function'),
test_requirements.containers
]
AttributeToVerify = namedtuple('AttributeToVerify', ['table', 'attr', 'verifier'])
TESTED_ATTRIBUTES__openscap_off = (
AttributeToVerify('configuration', 'OpenSCAP Results', bool),
AttributeToVerify('configuration', 'OpenSCAP HTML', lambda val: val == 'Available'),
AttributeToVerify('configuration', 'Last scan', dateparser.parse)
)
TESTED_ATTRIBUTES__openscap_on = TESTED_ATTRIBUTES__openscap_off + (
AttributeToVerify('compliance', 'Status', lambda val: val.lower() != 'never verified'),
AttributeToVerify('compliance', 'History', lambda val: val == 'Available')
)
TEST_ITEMS = (
ContainersTestItem(Image, 'openscap_off', is_openscap=False,
tested_attr=TESTED_ATTRIBUTES__openscap_off),
ContainersTestItem(Image, 'openscap_on', is_openscap=True,
tested_attr=TESTED_ATTRIBUTES__openscap_on)
)
NUM_SELECTED_IMAGES = 1
@pytest.fixture(scope='function')
def delete_all_container_tasks(appliance):
col = appliance.collections.tasks.filter({'tab': 'AllTasks'})
col.delete_all()
@pytest.fixture(scope='function')
def random_image_instance(appliance):
collection = appliance.collections.container_images
# add filter for select only active(not archived) images from redHat registry
filter_image_collection = collection.filter({'active': True, 'redhat_registry': True})
return random.sample(filter_image_collection.all(), NUM_SELECTED_IMAGES).pop()
@pytest.mark.polarion('10030')
def test_manage_policies_navigation(random_image_instance):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
random_image_instance.assign_policy_profiles('OpenSCAP profile')
@pytest.mark.polarion('10031')
def test_check_compliance(random_image_instance):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
random_image_instance.assign_policy_profiles('OpenSCAP profile')
random_image_instance.check_compliance()
def get_table_attr(instance, table_name, attr):
# Trying to read the table <table_name> attribute <attr>
view = navigate_to(instance, 'Details', force=True)
table = getattr(view.entities, table_name, None)
if table:
return table.read().get(attr)
@pytest.mark.parametrize(('test_item'), TEST_ITEMS)
def test_containers_smartstate_analysis(provider, test_item, soft_assert,
delete_all_container_tasks,
random_image_instance):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
if test_item.is_openscap:
random_image_instance.assign_policy_profiles('OpenSCAP profile')
else:
random_image_instance.unassign_policy_profiles('OpenSCAP profile')
random_image_instance.perform_smartstate_analysis(wait_for_finish=True)
view = navigate_to(random_image_instance, 'Details')
for tbl, attr, verifier in test_item.tested_attr:
table = getattr(view.entities, tbl)
table_data = {k.lower(): v for k, v in table.read().items()}
if not soft_assert(attr.lower() in table_data,
f'{tbl} table has missing attribute \'{attr}\''):
continue
provider.refresh_provider_relationships()
wait_for_retval = wait_for(lambda: get_table_attr(random_image_instance, tbl, attr),
message='Trying to get attribute "{}" of table "{}"'.format(
attr, tbl),
delay=5, num_sec=120, silent_failure=True)
if not wait_for_retval:
soft_assert(False, 'Could not get attribute "{}" for "{}" table.'
.format(attr, tbl))
continue
value = wait_for_retval.out
soft_assert(verifier(value),
f'{tbl}.{attr} attribute has unexpected value ({value})')
@pytest.mark.parametrize(('test_item'), TEST_ITEMS)
def test_containers_smartstate_analysis_api(provider, test_item, soft_assert,
delete_all_container_tasks, random_image_instance):
"""
Test initiating a SmartState Analysis scan via the CFME API through the ManageIQ API Client
entity class.
RFE: BZ 1486362
P
|
olarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
if test_item.is_openscap:
random_image_instance.assign_policy_profiles('OpenSCAP profile')
else:
random_image_instance.unassign_policy_profiles('OpenSCAP profile')
original_scan =
|
random_image_instance.last_scan_attempt_on
random_image_instance.scan()
task = provider.appliance.collections.tasks.instantiate(
name=f"Container Image Analysis: '{random_image_instance.name}'", tab='AllTasks')
task.wait_for_finished()
soft_assert(original_scan != random_image_instance.last_scan_attempt_on,
'SmartState Anaysis scan has failed')
|
jendap/tensorflow
|
tensorflow/python/compiler/tensorrt/test/quantization_mnist_test.py
|
Python
|
apache-2.0
| 10,917
| 0.005954
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TRT INT8 conversion without calibration on Mnist model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.compiler.tf2tensorrt.python.ops import trt_ops
# pylint: enable=unused-import
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.compiler.tensorrt import trt_convert
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.python.estimator.model_fn import ModeKeys
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.datasets import mnist
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import saver
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.training_util import get_global_step
INPUT_NODE_NAME = 'input'
OUTPUT_NODE_NAME = 'output'
class QuantizationAwareTrainingMNISTTest(test_util.TensorFlowTestCase):
def _BuildGraph(self, x):
def _Quantize(x, r):
x = gen_array_ops.quantize_and_dequantize_v2(x, -r, r)
return x
def _DenseLayer(x, num_inputs, num_outputs, quantization_range, name):
"""Dense layer with quantized outputs.
Args:
x: input to the dense layer
num_inputs: number of input columns of x
num_outputs: number of output columns
quantization_range: the min/max range for quantization
name: name of the variable scope
Returns:
The output of the layer.
"""
with variable_scope.variable_scope(name):
kernel = variable_scope.get_variable(
'kernel',
shape=[num_inputs, num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.glorot_uniform())
bias = variable_scope.get_variable(
'bias',
shape=[num_outputs],
dtype=dtypes.float32,
initializer=keras.initializers.zeros())
x = math_ops.matmul(x, kernel)
x = _Quantize(x, quantization_range)
x = nn.bias_add(x, bias)
x = _Quantize(x, quantization_range)
return x
x = _Quantize(x, 1)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=32, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Conv + Bias + Relu6
x = layers.conv2d(x, filters=64, kernel_size=3, use_bias=True)
x = nn.relu6(x)
# Reduce
x = math_ops.reduce_mean(x, [1, 2])
x = _Quantize(x, 6)
# FC1
x = _DenseLayer(x, 64, 512, 6, name='dense')
x = nn.relu6(x)
# FC2
x = _DenseLayer(x, 512, 10, 25, name='dense_1')
x = array_ops.identity(x, name=OUTPUT_NODE_NAME)
return x
def _GetGraphDef(self, use_trt, max_batch_size, model_dir):
"""Get the frozen mnist GraphDef.
Args:
use_trt: whether use TF-TRT to convert the graph.
max_batch_size: the max batch size to apply during TF-TRT conversion.
model_dir: the model directory to load the checkpoints.
Returns:
The frozen mnist GraphDef.
"""
graph = ops.Graph()
with self.session(graph=graph) as sess:
with graph.device('/GPU:0'):
x = array_ops.placeholder(
shape=(None, 28, 28, 1), dtype=dtypes.float32, name=INPUT_NODE_NAME)
self._BuildGraph(x)
# Load weights
mnist_saver = saver.Saver()
checkpoint_file = latest_checkpoint(model_dir)
mnist_saver.restore(sess, checkpoint_file)
# Freeze
graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, output_node_names=[OUTPUT_NODE_NAME])
# Convert with TF-TRT
if use_trt:
logging.info('Number of nodes before TF-TRT conversion: %d',
len(graph_def.node))
graph_def = trt_convert.create_inference_graph(
graph_def,
outputs=[OUTPUT_NODE_NAME],
max_batch_size=max_batch_size,
precision_mode='INT8',
# There is a 2GB GPU memory limit for each test, so we set
# max_workspace_size_bytes to 256MB to leave enough room for TF
# runtime to allocate GPU memory.
max_workspace_size_bytes=1 << 28,
minimum_segment_size=2,
use_calibration=False,
)
logging.info('Number of nodes after TF-TRT conversion: %d',
len(graph_def.node))
num_engines = len(
[1 for n in graph_def.node if str(n.op) == 'TRTEngineOp'])
self.assertEqual(1, num_engines)
return graph_def
def _Run(self, is_training
|
, use_trt, batch_size, num_epochs, model_dir):
"""Train or evaluate the model.
Args:
is_training: whether to train or evaluate the model. In training mode,
quantization will
|
be simulated where the quantize_and_dequantize_v2 are
placed.
use_trt: if true, use TRT INT8 mode for evaluation, which will perform
real quantization. Otherwise use native TensorFlow which will perform
simulated quantization. Ignored if is_training is True.
batch_size: batch size.
num_epochs: how many epochs to train. Ignored if is_training is False.
model_dir: where to save or load checkpoint.
Returns:
The Estimator evaluation result.
"""
# Get dataset
train_data, test_data = mnist.load_data()
def _PreprocessFn(x, y):
x = math_ops.cast(x, dtypes.float32)
x = array_ops.expand_dims(x, axis=2)
x = 2.0 * (x / 255.0) - 1.0
y = math_ops.cast(y, dtypes.int32)
return x, y
def _EvalInputFn():
mnist_x, mnist_y = test_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=1)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _TrainInputFn():
mnist_x, mnist_y = train_data
dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
dataset = dataset.shuffle(2 * len(mnist_x))
dataset = dataset.apply(
data.experimental.map_and_batch(
map_func=_PreprocessFn,
batch_size=batch_size,
num_parallel_calls=8))
dataset = dataset.repeat(count=num_epochs)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
def _ModelFn(features
|
lizardsystem/lizard-box
|
lizard_box/migrations/0008_auto__add_portaltab__add_layoutportaltab__chg_field_box_url.py
|
Python
|
gpl-3.0
| 5,651
| 0.007963
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PortalTab'
db.create_table('lizard_box_portaltab', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tab_type', self.gf('django.db.models.fields.IntegerField')(default=1)),
('destination_slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('destination_url', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal('lizard_box', ['PortalTab'])
# Adding model 'LayoutPortalTab'
db.create_table('lizard_box_layoutportaltab', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('layout', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_box.Layout'])),
('portal_tab', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_box.PortalTab'])),
('index', self.gf('django.db.models.fields.IntegerField')(default=100)),
))
db.send_create_signal('lizard_box', ['LayoutPortalTab'])
# Changing field 'Box.url'
db.alter_column('lizard_box_box', 'url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
def backwards(self, orm):
# Deleting model 'PortalTab'
db.delete_table('lizard_box_portaltab')
# Deleting model 'LayoutPortalTab'
db.delete_table('lizard_box_layoutportaltab')
# Changing field 'Box.url'
db.alter_column('lizard_box_box', 'url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True))
models = {
'lizard_box.box': {
'Meta': {'object_name': 'Box'},
'box_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'icon_class': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'lizard_box.column': {
'Meta': {'object_name': 'Column'},
'boxes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_box.Box']", 'through': "orm['lizard_box.ColumnBox']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_box.Layout']"}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_box.columnbox': {
'Meta': {'object_name': 'ColumnBox'},
'action_boxes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'action_boxes'", 'symmetrical': 'False', 'to': "orm['lizard_box.Box']"}),
'box': ('django.d
|
b.models.fields.related.ForeignKey', [], {'to': "orm['lizard_box.Box']"}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_box.Column']"}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerFi
|
eld', [], {'default': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'})
},
'lizard_box.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'portal_tabs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_box.PortalTab']", 'through': "orm['lizard_box.LayoutPortalTab']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_box.layoutportaltab': {
'Meta': {'object_name': 'LayoutPortalTab'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_box.Layout']"}),
'portal_tab': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_box.PortalTab']"})
},
'lizard_box.portaltab': {
'Meta': {'object_name': 'PortalTab'},
'destination_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'destination_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tab_type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
}
}
complete_apps = ['lizard_box']
|
gotgenes/BiologicalProcessNetworks
|
bpn/mcmc/sabpn.py
|
Python
|
mit
| 3,485
| 0.003156
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011 Chris D. Lasher & Phillip Whisenhunt
#
# This software is released under the MIT License. Please see
# LICENSE.txt for details.
"""A program to detect Process Linkage Networks using
Simulated Annealing.
|
"""
import collections
import itertools
import sys
from convutils import convutils
import bpn.cli
import bpn.structures
from defaults import (
SUPERDEBUG,
SUPERDEBUG_MODE,
LINKS_FIELDNAMES,
PARAMETERS_FIELDNAMES,
TRANSITIONS_FIELDNAMES,
DETAILED_TRANSITIONS_FIELDNAMES
)
# Configure all the logging stuff
import logging
log
|
ger = logging.getLogger('bpn.sabpn')
if SUPERDEBUG_MODE:
# A logging level below logging.DEBUG
logging.addLevelName(SUPERDEBUG, 'SUPERDEBUG')
logger.setLevel(SUPERDEBUG)
#stream_handler.setLevel(SUPERDEBUG)
import simulatedannealing
import states
import recorders
def main(argv=None):
cli_parser = bpn.cli.SaCli()
input_data = cli_parser.parse_args(argv)
logger.info("Constructing supporting data structures; this may "
"take a while...")
annotated_interactions = bpn.structures.AnnotatedInteractionsArray(
input_data.interactions_graph,
input_data.annotations_dict
)
logger.info("Considering %d candidate links in total." %
annotated_interactions.calc_num_links())
logger.info("Constructing Simulated Annealing")
if input_data.free_parameters:
logger.info("Using free parameter transitions.")
parameters_state_class = states.RandomTransitionParametersState
else:
parameters_state_class = states.PLNParametersState
if input_data.disable_swaps:
logger.info("Disabling swap transitions.")
links_state_class = states.NoSwapArrayLinksState
else:
links_state_class = states.ArrayLinksState
if input_data.detailed_transitions:
logger.info("Recording extra information for each state.")
transitions_csvfile = convutils.make_csv_dict_writer(
input_data.transitions_outfile,
DETAILED_TRANSITIONS_FIELDNAMES
)
else:
transitions_csvfile = convutils.make_csv_dict_writer(
input_data.transitions_outfile,
TRANSITIONS_FIELDNAMES
)
sa = simulatedannealing.ArraySimulatedAnnealing(
annotated_interactions,
input_data.activity_threshold,
input_data.transition_ratio,
num_steps=input_data.steps,
temperature=input_data.temperature,
end_temperature=input_data.end_temperature,
parameters_state_class=parameters_state_class,
links_state_class=links_state_class
)
logger.info("Beginning to Anneal. This may take a while...")
sa.run()
logger.info("Run completed.")
logger.info("Writing link results to %s" %
input_data.links_outfile.name)
links_out_csvwriter = convutils.make_csv_dict_writer(
input_data.links_outfile, LINKS_FIELDNAMES)
logger.info("Writing parameter results to %s" % (
input_data.parameters_outfile.name))
parameters_out_csvwriter = convutils.make_csv_dict_writer(
input_data.parameters_outfile, PARAMETERS_FIELDNAMES)
logger.info("Writing transitions data to %s." % (
input_data.transitions_outfile.name))
logger.info("Finished.")
if __name__ == '__main__':
main()
|
jimmycallin/master-thesis
|
architectures/svm_baseline/resources.py
|
Python
|
mit
| 5,218
| 0.002108
|
from misc_utils import get_config, get_logger, tokenize
from discourse_relation import DiscourseRelation
from collections import Counter, defaultdict
import json
import abc
import numpy as np
from os.path import join
import os
logger = get_logger(__name__)
class Resource(metaclass=abc.ABCMeta):
def __init__(self, path, classes):
self.path = path
self.classes = sorted(classes)
self.y_indices = {x: y for y, x in enumerate(self.classes)}
self.instances = list(self._load_instances(path))
@abc.abstractmethod
def _load_instances(self, path):
raise NotImplementedError("This class must be subclassed.")
class PDTBRelations(Resource):
def __init__(self, path, classes, separate_dual_classes, filter_type=None, skip_missing_classes=True):
self.skip_missing_classes = skip_missing_classes
self.separate_dual_classes = separate_dual_classes
self.filter_type = [] if filter_type is None else filter_type
super().__init__(path, classes)
def _load_instances(self, path):
with open(join(path, 'relations.json')) as file_:
for line in file_:
rel = DiscourseRelation(json.loads(line.strip()))
if (self.filter_type != [] or self.filter_type is not None) and rel.relation_type() not in self.filter_type:
continue
if self.separate_dual_classes:
for splitted in rel.split_up_senses():
if len(splitted.senses()) > 1:
raise ValueError("n_senses > 1")
if len(splitted.senses()) == 1 and splitted.senses()[0] not in self.y_indices:
if self.skip_missing_classes:
logger.debug("Sense class {} not in class list, skipping {}".format(splitted.senses()[0], splitted.relation_id()))
continue
yield splitted
else:
a_class_exist = any(r in self.y_indices for r in rel.senses())
if not a_class_exist:
if self.skip_missing_classes:
logger.debug("Sense {} classes not in class list, skipping {}".format(rel.senses(), rel.relation_id()))
continue
yield rel
def get_feature_tensor(self, extractors):
rels_feats = []
n_instances = 0
last_features_for_instance = None
for rel in self.instances:
n_instances += 1
feats = []
total_features_per_instance = 0
for extractor in extractors:
# These return matrices of shape (1, n_features)
# We concatenate them on axis 1
arg_rawtext = getattr(rel, extractor.argument)()
arg_tokenized = tokenize(arg_rawtext)
arg_feats = extractor.extract_features(arg_tokenized)
feats.append(arg_feats)
total_features_per_instance += extractor.n_features
if last_features_for_instance is not None:
# Making sure we have equal number of features per instance
assert total_features_per_instance == last_features_for_instance
rels_feats.append(np.concatenate(feats, axis=1))
feature_tensor = np.array(rels_feats)
assert_shape = (n_instances, 1, total_features_per_instance)
assert feature_tensor.shape == assert_shape, \
"Tensor shape mismatch. Is {}, should be {}".format(feature_tensor.shape, assert_shape)
return feature_tensor
def get_correct(self, indices=True):
"""
Returns answer indices.
"""
for rel in self.instances:
senses = rel.senses()
if self.separate_dual_classes:
if indices:
yield self.y_indices[senses[0]]
else:
yield senses[0]
else:
ys = [self.y_indices[sense] for sense in senses]
if indices:
yield ys
else:
yield senses
def store_results(self, results, store_path):
"""
Don't forget to use the official scoring script here.
"""
text_results = [self.classes[res] for res in results]
# Load test file
# Output json object with results
# Deal with multiple instances somehow
predicted_rels = []
for text_result, rel in zip(text_results, self.instances):
if rel.is_explicit():
rel_type = 'Explicit'
|
else:
rel_type = 'Implicit'
predicted_rels.append(rel.to_output_f
|
ormat(text_result, rel_type)) # turn string representation into list instance first
# Store test file
if not os.path.exists(store_path):
os.makedirs(store_path)
with open(join(store_path, 'output.json'), 'w') as w:
for rel in predicted_rels:
w.write(json.dumps(rel) + '\n')
logger.info("Stored predicted output at {}".format(store_path))
|
txdywy/wiki20
|
wiki20/tests/models/test_auth.py
|
Python
|
gpl-2.0
| 1,523
| 0.009849
|
# -*- coding: utf-8 -*-
"""Test suite for the TG app's models"""
from __future__ import unicode_literals
from nose.tools import eq_
from wiki20 import model
from wiki20.tests.models import ModelTest
class TestGroup(ModelTest):
"""Unit test case for the ``Group`` model."""
klass = model.Group
attrs = dict(
group_name = "test_group",
display_name
|
= "Test Group"
)
class TestUser(ModelTest):
"""Unit test case for the ``User`` model."""
klass = model.User
attrs = dict(
user_name = "ignucius",
email_address = "ignucius@example.org"
)
def test_obj_creation_username(self):
"""The obj constructor must set the user name right"""
eq_(self.obj.user_name, "ignucius")
def test_obj_cre
|
ation_email(self):
"""The obj constructor must set the email right"""
eq_(self.obj.email_address, "ignucius@example.org")
def test_no_permissions_by_default(self):
"""User objects should have no permission by default."""
eq_(len(self.obj.permissions), 0)
def test_getting_by_email(self):
"""Users should be fetcheable by their email addresses"""
him = model.User.by_email_address("ignucius@example.org")
eq_(him, self.obj)
class TestPermission(ModelTest):
"""Unit test case for the ``Permission`` model."""
klass = model.Permission
attrs = dict(
permission_name = "test_permission",
description = "This is a test Description"
)
|
nicksergeant/snipt
|
views.py
|
Python
|
mit
| 1,815
| 0.000551
|
from annoying.decorators import ajax_request, render_to
from blogs.views import blog_list
from django.db.models import Count
from django.http import HttpResponseRedirect, HttpResponseBadRequest
from snipts.utils import get_lexers_list
from taggit.models import Tag
@render_to('homepage.html')
def homepage(request):
if request.blog_user:
return blog_list(request)
return {}
@ajax_request
def lexers(request):
lexers = get_lexers_list()
objects = []
for l in lexers:
try:
filters = l[2]
except IndexError:
filters = []
try:
mimetypes = l[3]
except IndexError:
mimetypes = []
objects.append({
'name': l[0],
'lexers': l[1],
'filters': filters,
'mimetypes': mimetypes
})
return {'objects': objects}
def login_redirect(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/' + request.user.username + '/')
else:
return HttpResponseRedirect('/')
@render_to('tags.html')
def tags(request):
all_tags = Tag.objects.filter(snipt__public=True).order_by('name')
all_tags = all_tags.annotate(count=Count('taggit_taggeditem_items__id'))
popular_tags = Tag.objects.filter(snipt__public=T
|
rue)
popular_tags = popular_tags.annotate(
count=Count('taggit_taggeditem_items__id'))
popular_tags = popular_tags.order_by('-count')[:20]
popular_tags = sorted(popular_tags, key=lambda tag: tag.name)
return {
'all_tags
|
': all_tags,
'tags': popular_tags
}
@ajax_request
def user_api_key(request):
if not request.user.is_authenticated():
return HttpResponseBadRequest()
return {
'api_key': request.user.api_key.key
}
|
jennirinker/PyFAST
|
setup.py
|
Python
|
gpl-3.0
| 395
| 0.002532
|
""" Set-up script to in
|
stall PyFAST locally
"""
from setuptools import setup
setup(name='pyfast',
version='0.1',
description='Tools for working with wind turbine simulator FAST',
|
url='https://github.com/jennirinker/PyFAST.git',
author='Jenni Rinker',
author_email='jennifer.m.rinker@gmail.com',
license='GPL',
packages=['pyfast'],
zip_safe=False)
|
abw333/dominoes
|
dominoes/__init__.py
|
Python
|
mit
| 675
| 0
|
from dominoes import players
from dominoes import search
from dominoes.board import Board
from dominoes.domino import Domino
from dominoes.exceptions import EmptyBoardException
from dominoes.exceptions import EndsMismatchException
from dominoes.exceptions import GameInProgressException
from dominoes.exce
|
ptions import GameOverException
from dominoes.exceptions import NoSuchDominoException
from dominoes.exceptions import NoSuchPlayerException
from dominoes.exceptions import SeriesOverException
from dominoes.game import Game
from dominoes.hand impo
|
rt Hand
from dominoes.result import Result
from dominoes.series import Series
from dominoes.skinny_board import SkinnyBoard
|
PrincetonUniversity/pywsse
|
wsse/client/requests/tests/test_auth.py
|
Python
|
lgpl-3.0
| 2,780
| 0.026278
|
# wsse/client/requests/tests/test_auth.py
# coding=utf-8
# pywsse
# Authors: Rushy Panchal, Naphat Sanguansin, Adam Libresco, Jérémie Lumbroso
# Date: September 1st, 2016
# Description: Test the requests authentication implementation
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.contrib.auth.models import User
from rest_framework import status
import requests
from wsse import settings
from wsse.compat import reverse_lazy
from wsse.server.django.wsse.models import UserSecret
from wsse.client.requests.auth import WSSEAuth
def setUpModule():
'''
Set up the module for running tests.
'''
# Set the nonce store to the Django store after saving the current settings
# so they can be restored later.
global __old_nonce_settings
__old_nonce_settings = (settings.NONCE_STORE, settings.NONCE_STORE_ARGS)
settings.NONCE_STORE = 'wsse.server.django.wsse.store.DjangoNonceStore'
|
settings.NONCE_STORE_ARGS = []
def tearDownModule():
'''
Tear down the module after running tests.
'''
# R
|
estore the nonce settings.
settings.NONCE_STORE, settings.NONCE_STORE_ARGS = __old_nonce_settings
class TestWSSEAuth(StaticLiveServerTestCase):
'''
Test authenticating through the `WSSEAuth` handler.
'''
endpoint = reverse_lazy('api-test')
def setUp(self):
'''
Set up the test cases.
'''
self.user = User.objects.create(username = 'username')
self.user_secret = UserSecret.objects.create(user = self.user)
self.auth = WSSEAuth('username', self.user_secret.secret)
self.base_url = '{}{}'.format(self.live_server_url, self.endpoint)
def test_auth(self):
'''
Perform valid authentication. The user should be authenticated.
'''
response = requests.get(self.base_url, auth = self.auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_auth_reuse(self):
'''
Reuse the same authentication handler. Both requests should succeed.
'''
response_a = requests.get(self.base_url, auth = self.auth)
response_b = requests.get(self.base_url, auth = self.auth)
self.assertEqual(response_a.status_code, status.HTTP_200_OK)
self.assertEqual(response_b.status_code, status.HTTP_200_OK)
def test_auth_incorrect_password(self):
'''
Authneticate with an incorrect password. The authentication should fail.
'''
response = requests.get(self.base_url, auth = WSSEAuth('username',
'!' + self.user_secret.secret))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_auth_nonexistent_username(self):
'''
Authneticate with a nonexistent user. The authentication should fail.
'''
response = requests.get(self.base_url, auth = WSSEAuth('nonexistentuser',
self.user_secret.secret))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
|
novapost/django-email-change
|
src/email_change/__init__.py
|
Python
|
apache-2.0
| 1,088
| 0.001838
|
# -*- coding: utf-8 -*-
#
# This file is part of django-email-change.
#
# django-email-change adds support for email address change and confirmation.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-email-change
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-email-change
#
# Copyright 2010 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANT
|
IES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VERSION = (0, 2, 3, 'final', 0)
def get_version():
version =
|
'%d.%d.%d' % (VERSION[0], VERSION[1], VERSION[2])
return version
|
obutkalyuk/Python_15
|
fixture/session.py
|
Python
|
apache-2.0
| 1,157
| 0.002593
|
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, user, password):
wd = self.app.wd
self.app.open_home_page()
self.app.type_text("user", user)
self.app.type_text("pass", password)
wd.find_element_by_css_selec
|
tor("input[value='Login']").click()
def ensure_login(self, user, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(user):
return
else:
self.logout()
self.login(
|
user,password)
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout"))> 0
def is_logged_in_as(self, user):
wd = self.app.wd
return self.get_logged_user() == user
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//form[@name='logout']/b").text[1:-1]
|
ysasaki6023/NeuralNetworkStudy
|
cifar02/analysis_DropTest.py
|
Python
|
mit
| 925
| 0.028108
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm
fig = plt.figure()
ax = {}
ax["DropOut"] = fig.add_subplot(121)
ax["NoDropOut"] = fig.add_subplot(122)
dList = {}
dList["DropOut"] = ["DropOut1","DropOut2","DropOut3"]
dList["NoDropOut"] = ["NoDropOut1","NoDropOut2"]
def myPlot(ax,dName):
cList = ["black","blue","red","green","cyan"]
|
for i,dfile in enumerate(dName):
print dfile
d = pd.read_csv("Output_DropTest/%s/output.dat"%dfile)
dTrain = d[d["mode"]=="Train"]
dTest = d[d["mode"]=="Test" ]
ax.plot(dTrain.epoch, dTrain.accuracy*100., lineStyle="-" , color=cList[i], label=dfile)
ax.plot(dTest .epoch, dTest .accuracy*100., lineStyle="--", color=cList[i], label="")
ax.set_xlim(0,50)
ax.set_ylim(0,100)
ax.leg
|
end(loc=4,fontsize=8)
ax.grid()
for k in dList:
myPlot(ax[k],dList[k])
plt.show()
|
mpearmain/BayesBoost
|
examples/usage.py
|
Python
|
apache-2.0
| 1,604
| 0.00187
|
from bayes_opt import BayesianOptimization
'''
Example of how to use this bayesian optimization package.
'''
# Lets find the maximum of a simple quadratic function of two variables
# We create the bayes_opt object and pass the function to be maximized
# together with the paraters names and their bounds.
bo = BayesianOptimization(lambda x, y: -x**2 -(y-1)**2 + 1, {'x': (-4, 4), 'y': (-3, 3)})
# One of the things we can do with this object is pass points
# which we want the algorithm to probe. A dictionary with the
# parameters names and a list of values to include in the search
# must be given.
bo.explore({'x': [-1, 3], 'y': [-2, 2]})
# Additionally, if we have any prior knowledge of the behaviour of
# the target function (even if not totally accurate) we can also
# tell that to the optimizer.
# Here we pass a dictionar
|
y with target values as keys of another
# dictionary with parameters names and their corresponding value.
bo.initialize({-2: {'x': 1, 'y': 0}, -1.251: {'x': 1, 'y': 1.5}})
# Once we are satisfied with the initialization conditions
# we let the algorithm do its magic by calling the maximize()
# method.
bo.maximize(n_iter=15)
# The output values can be accessed with se
|
lf.res
print(bo.res['max'])
# If we are not satisfied with the current results we can pickup from
# where we left, maybe pass some more exploration points to the algorithm
# change any parameters we may choose, and the let it run again.
bo.explore({'x': [0.6], 'y': [-0.23]})
bo.maximize(n_iter=5, acq='pi')
# Finally, we take a look at the final results.
print(bo.res['max'])
print(bo.res['all'])
|
bjodah/pysym
|
pysym/util.py
|
Python
|
bsd-2-clause
| 2,569
| 0
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from . import _wrap_numbers, Symbol, Number, Matrix
def symbols(s):
""" mimics sympy.symbols """
tup = tuple(map(Symbol, s.replace(',', ' ').split()))
if len(tup) == 1:
return tup[0]
else:
return tup
def symarray(prefix, shape):
import numpy as np
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (
|
prefix, '_'.join(map(str, index))))
return arr
def lambdify(args, exprs):
"""
lambdify mimics sympy.lambdify
"""
try:
nargs = len(args)
except TypeError:
|
args = (args,)
nargs = 1
try:
nexprs = len(exprs)
except TypeError:
exprs = (exprs,)
nexprs = 1
@_wrap_numbers
def f(*inp):
if len(inp) != nargs:
raise TypeError("Incorrect number of arguments")
try:
len(inp)
except TypeError:
inp = (inp,)
subsd = dict(zip(args, inp))
return [expr.subs(subsd).evalf() for expr in exprs][
0 if nexprs == 1 else slice(None)]
return f
class Lambdify(object):
"""
Lambdify mimics symengine.Lambdify
"""
def __init__(self, syms, exprs):
self.syms = syms
self.exprs = exprs
def __call__(self, inp, out=None):
inp = tuple(map(Number.make, inp))
subsd = dict(zip(self.syms, inp))
def _eval(expr_iter):
return [expr.subs(subsd).evalf() for expr in expr_iter]
exprs = self.exprs
if out is not None:
try:
out.flat = _eval(exprs.flatten())
except AttributeError:
out.flat = _eval(exprs)
elif isinstance(exprs, Matrix):
import numpy as np
nr, nc = exprs.nrows, exprs.ncols
out = np.empty((nr, nc))
for ri in range(nr):
for ci in range(nc):
out[ri, ci] = exprs._get_element(
ri*nc + ci).subs(subsd).evalf()
return out
# return Matrix(nr, nc, _eval(exprs._get_element(i) for
# i in range(nr*nc)))
elif hasattr(exprs, 'reshape'):
# NumPy like container:
container = exprs.__class__(exprs.shape, dtype=float, order='C')
container.flat = _eval(exprs.flatten())
return container
else:
return _eval(exprs)
|
tommyip/zulip
|
zerver/webhooks/trello/view/card_actions.py
|
Python
|
apache-2.0
| 10,437
| 0.004791
|
from typing import Any, Dict, Mapping, Optional, Tuple
from .exceptions import UnknownUpdateCardAction
SUPPORTED_CARD_ACTIONS = [
u'updateCard',
u'createCard',
u'addLabelToCard',
u'removeLabelFromCard',
u'addMemberToCard',
u'removeMemberFromCard',
u'addAttachmentToCard',
u'addChecklistToCard',
u'commentCard',
u'updateCheckItemStateOnCard',
]
IGNORED_CARD_ACTIONS = [
'createCheckItem',
]
CREATE = u'createCard'
CHANGE_LIST = u'changeList'
CHANGE_NAME = u'changeName'
SET_DESC = u'setDesc'
CHANGE_DESC = u'changeDesc'
REMOVE_DESC = u'removeDesc'
ARCHIVE = u'archiveCard'
REOPEN = u'reopenCard'
SET_DUE_DATE = u'setDueDate'
CHANGE_DUE_DATE = u'changeDueDate'
REMOVE_DUE_DATE = u'removeDueDate'
ADD_LABEL = u'addLabelToCard'
REMOVE_LABEL = u'removeLabelFromCard'
ADD_MEMBER = u'addMemberToCard'
REMOVE_MEMBER = u'removeMemberFromCard'
ADD_ATTACHMENT = u'addAttachmentToCard'
ADD_CHECKLIST = u'addChecklistToCard'
COMMENT = u'commentCard'
UPDATE_CHECK_ITEM_STATE = u'updateCheckItemStateOnCard'
TRELLO_CARD_URL_TEMPLATE = u'[{card_name}]({card_url})'
ACTIONS_TO_MESSAGE_MAPPER = {
CREATE: u'created {card_url_template}.',
CHANGE_LIST: u'moved {card_url_template} from {old_list} to {new_list}.',
CHANGE_NAME: u'renamed the card from "{old_name}" to {card_url_template}.',
SET_DESC: u'set description for {card_url_template} to:\n~~~ quote\n{desc}\n~~~\n',
CHANGE_DESC: (u'changed description for {card_url_template} from\n' +
'~~~ quote\n{old_desc}\n~~~\nto\n~~~ quote\n{desc}\n~~~\n'),
REMOVE_DESC: u'removed description from {card_url_template}.',
ARCHIVE: u'archived {card_url_template}.',
REOPEN: u'reopened {card_url_template}.',
SET_DUE_DATE: u'set due date for {card_url_template} to {due_date}.',
CHANGE_DUE_DATE: u'changed due date for {card_url_template} from {old_due_date} to {due_date}.',
REMOVE_DUE_DATE: u'removed the due date from {card_url_template}.',
ADD_LABEL: u'added a {color} label with \"{text}\" to {card_url_template}.',
REMOVE_LABEL: u'removed a {color} label with \"{text}\" from {card_url_template}.',
ADD_MEMBER: u'added {member_name} to {card_url_template}.',
REMOVE_MEMBER: u'removed {member_name} from {card_url_template}.',
ADD_ATTACHMENT: u'added [{attachment_name}]({attachment_url}) to {card_url_template}.',
ADD_CHECKLIST: u'added the {checklist_name} checklist to {card_url_template}.',
COMMENT: u'commented on {card_url_template}:\n~~~ quote\n{text}\n~~~\n',
UPDATE_CHECK_ITEM_STATE: u'{action} **{item_name}** in **{checklist_name}** ({card_url_template}).'
}
def prettify_date(date_string: str) -> str:
return date_string.replace('T', ' ').replace('.000', '').replace('Z', ' UTC')
def process_card_action(payload: Mapping[str, Any], action_type: str) -> Optional[Tuple[str, str]]:
proper_action = get_proper_action(payload, action_type)
if proper_action is not None:
return get_subject(payload), get_body(payload, proper_action)
return None
def get_proper_action(payload: Mapping[str, Any], action_type: str) -> Optional[str]:
if action_type == 'updateCard':
data = get_action_data(payload)
old_data = data['old']
card_data = data['card']
if data.get('listBefore'):
return CHANGE_LIST
if old_data.get('name'):
return CHANGE_NAME
if old_data.get('desc') == "":
return SET_DESC
if old_data.get('desc'):
if card_data.get('desc') == "":
return REMOVE_DESC
else:
return CHANGE_DESC
if old_data.get('due', False) is None:
return SET_DUE_DATE
if old_data.get('due'):
if card_data.get('due', False) is None:
return REMOVE_DUE_DATE
else:
return CHANGE_DUE_DATE
if old_data.get('closed') is False and card_data.get('closed'):
return ARCHIVE
if old_data.get('closed') and card_data.get('closed') is False:
return REOPEN
# we don't support events for when a card is moved up or down
# within a single list
if old_data.get('pos'):
return None
raise UnknownUpdateCardAction()
return action_type
def get_subject(payload: Mapping[str, Any]) -> str:
return get_action_data(payload)['board'].get('name')
def get_body(payload: Mapping[str, Any], action_type: str) -> str:
message_body = ACTIONS_TO_FILL_BODY_MAPPER[action_type](payload, action_type)
creator = payload['action']['memberCreator'].get('fullName')
return u'{full_name} {rest}'.format(full_name=creator, rest=message_body)
def get_added_checklist_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'checklist_name': get_action_data(payload)['checklist'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_update_check_item_body(payload: Mapping[str, Any], action_type: str) -> str:
action = get_action_data(payload)
state = action['checkItem']['state']
data = {
'action': 'checked' if state == 'complete' else 'unchecked',
'checklist_name': action['checklist'].get('name'),
'item_name': action['checkItem'].get('nam
|
e'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_added_attachment_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'attachment_url': get_action_data(payload)['attachment'].get('url'),
'attachment_name': get_action_data(payload)['attachment'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_updated_card_body(payload: Mapping[s
|
tr, Any], action_type: str) -> str:
data = {
'card_name': get_card_name(payload),
'old_list': get_action_data(payload)['listBefore'].get('name'),
'new_list': get_action_data(payload)['listAfter'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_renamed_card_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'old_name': get_action_data(payload)['old'].get('name'),
'new_name': get_action_data(payload)['old'].get('name'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_added_label_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'color': get_action_data(payload).get('value'),
'text': get_action_data(payload).get('text'),
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_member_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'member_name': payload['action']['member'].get('fullName')
}
return fill_appropriate_message_content(payload, action_type, data)
def get_comment_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'text': get_action_data(payload)['text'],
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_due_date_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'due_date': prettify_date(get_action_data(payload)['card'].get('due'))
}
return fill_appropriate_message_content(payload, action_type, data)
def get_changed_due_date_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'due_date': prettify_date(get_action_data(payload)['card'].get('due')),
'old_due_date': prettify_date(get_action_data(payload)['old'].get('due'))
}
return fill_appropriate_message_content(payload, action_type, data)
def get_managed_desc_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'desc': prettify_date(get_action_data(payload)['card']['desc'])
}
return fill_appropriate_message_content(payload, action_type, data)
def get_changed_desc_body(payload: Mapping[str, Any], action_type: str) -> str:
data = {
'desc': prettify_date(get_action_data(payload)['card']['desc']),
'old_desc': prettify_dat
|
cydcowley/Imperial-Visualizations
|
visuals_EM/Waves and Dielectrics/plotly_arrows.py
|
Python
|
mit
| 2,137
| 0.003745
|
import numpy as np
import plotly.graph_objs as go
def p2c(r, theta, phi):
"""Convert polar unit vector to cartesians"""
return [r * np.sin(theta) * np.cos(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(theta)]
# return [-r * np.cos(theta),
# r * np.sin(theta) * np.sin(phi),
# r * np.sin(theta) * np.cos(phi)]
class Arrow:
def __init__(self, theta, out, width=5, color='rgb(0,0,0)'):
"""
Args:
theta (float) - radi
|
ans [0, π]
out (bool) - True if outgoing, False if incoming (to the origin)
width (int) - line thickness
color (hex/rgb) - line color
"""
self.theta = theta
self.out = out
self.width = width
self.color = color
wing_length, wing_angle = self._find_wing_coord()
shaft
|
_xyz = p2c(1., self.theta, 0)
wings_xyz = [p2c(wing_length, self.theta + wing_angle, 0),
p2c(wing_length, self.theta - wing_angle, 0)]
self.shaft = go.Scatter3d(
x=[0, shaft_xyz[0]],
y=[0, shaft_xyz[1]],
z=[0, shaft_xyz[2]],
showlegend=False, mode='lines', line={'width': self.width, 'color': self.color}
)
self.wings = go.Scatter3d(
x=[wings_xyz[0][0], shaft_xyz[0] / 2., wings_xyz[1][0]],
y=[wings_xyz[0][1], shaft_xyz[1] / 2., wings_xyz[1][1]],
z=[wings_xyz[0][2], shaft_xyz[2] / 2., wings_xyz[1][2]],
showlegend=False, mode='lines', line={'width': self.width, 'color': self.color}
)
self.data = [self.shaft, self.wings]
def _find_wing_coord(self):
"""Finds polar coordinates of arrowhead wing ends"""
frac = 0.1
r = 0.5
sin45 = np.sin(np.pi / 4.)
if self.out == True:
d = r - frac * sin45
elif self.out == False:
d = r + frac * sin45
else:
raise TypeError("arg: out must be True or False")
a = np.sqrt(frac**2 * sin45**2 + d**2)
alpha = np.arccos(d / a)
return [a, alpha]
|
mcanaves/django-tenant-schemas
|
examples/tenant_tutorial/tenant_tutorial/views.py
|
Python
|
mit
| 1,030
| 0.000971
|
from customers.models import Client
from django.conf import settings
from django.db import utils
from django.views.generic import TemplateView
from tenant_schemas.utils import remove_www
class HomeView(TemplateView):
template_name = "index_public.html"
def get_context_data(self, **kw
|
args):
context = super(HomeView, self).get_context_data(**kwargs)
hostname_without_port = remove_www(self.request.get_host().split(':')[0])
try:
Client.objects.get(schema_name='public')
except utils.DatabaseError:
context['need_sync'] = True
context['shared_apps'] = settings.SHARED_APP
|
S
context['tenants_list'] = []
return context
except Client.DoesNotExist:
context['no_public_tenant'] = True
context['hostname'] = hostname_without_port
if Client.objects.count() == 1:
context['only_public_tenant'] = True
context['tenants_list'] = Client.objects.all()
return context
|
hastexo/django-oscar-vat_moss
|
oscar_vat_moss/order/models.py
|
Python
|
bsd-3-clause
| 375
| 0
|
from oscar_vat_moss import fields
from oscar.apps.address.abstract_models import AbstractShippingAddress
from oscar.apps.address.abstract_models import AbstractBilli
|
ngAddress
class ShippingAddress(AbstractShippingAddress):
|
vatin = fields.vatin()
class BillingAddress(AbstractBillingAddress):
vatin = fields.vatin()
from oscar.apps.order.models import * # noqa
|
konradxyz/cloudify-manager
|
rest-service/manager_rest/workflow_client.py
|
Python
|
apache-2.0
| 1,585
| 0
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from manager_rest.celery_client import celery_client as client
class WorkflowClient(object):
@staticmethod
def execute_workflow(name,
workflow,
blueprint_id,
deployment_id,
execution_id,
execution_parameters=None):
task_name
|
= workflow['operation']
task_queue = '{}_workflows'.format(deployment_id)
execution_parameters['__cloudify_context'] = {
'workflow_id': name,
'blueprint_id': blueprint_id,
|
'deployment_id': deployment_id,
'execution_id': execution_id
}
client().execute_task(task_name=task_name,
task_queue=task_queue,
task_id=execution_id,
kwargs=execution_parameters)
def workflow_client():
return WorkflowClient()
|
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn
|
tests/python/proper.py
|
Python
|
gpl-3.0
| 11,408
| 0.006837
|
#! /usr/bin/env python
import vcsn
from test import *
algos = ['distance', 'inplace', 'separate']
# check INPUT EXP ALGORITHM
# -------------------------
def check_algo(i, o, algo):
i = vcsn.automaton(i)
o = vcsn.automaton(o)
print("using algorithm: ", algo)
print("checking proper")
# We call sort().strip() everywhere to avoid seeing differences caused by the
# different numbering of the states between the algorithms
CHECK_EQ(o.sort().strip(), i.proper(algo=algo).sort().strip())
# Since we remove only states that _become_ inaccessible,
# i.proper(prune = False).accessible() is not the same as
# i.proper(): in the former case we also removed the non-accessible
# states.
print("checking proper(prune = False)")
CHECK_EQ(o.accessible(),
i.proper(prune=False, algo=algo).accessible())
# FIXME: Because proper uses copy, state numbers are changed.
#
# FIXME: cannot use is_isomorphic because some of our test cases
# have unreachable states, which is considered invalid by
# is_isomorphic.
print("checking idempotence")
if i.proper(algo=algo).is_accessible():
CHECK_ISOMORPHIC(i.proper(algo=algo), i.proper(algo=algo).proper(algo=algo))
else:
CHECK_EQ(i.proper(algo=algo).sort().strip(),
i.proper(algo=algo).proper(algo=algo).sort().strip())
def check_fail_algo(aut, algo):
a = vcsn.automaton(aut)
try:
a.proper(algo=algo)
FAIL(r"invalid \\e-cycle not detected")
except RuntimeError:
PASS()
def check(i, o, algs=algos):
for algo in algs:
check_algo(i, o, algo)
def check_fail(i, algs=algos):
for algo in algs:
check_fail_algo(i, algo)
## --------------------------------------- ##
## lao, r: check the computation of star. ##
## --------------------------------------- ##
check(r'''context = "lao, r"
$ -> 0 <3>
0 -> 1 <5>
1 -> 1 <.5>\e
1 -> 2 <7>\e
2 -> $ <11>
''',
'''
digraph
{
vcsn_context = "lao, r"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
}
I0 -> 0 [label = "<3>"]
0 -> 1 [label = "<5>"]
1 -> F1 [label = "<154>"]
}
''')
## -------------------------------------------- ##
## law_char, r: check the computation of star. ##
## -------------------------------------------- ##
check(r'''digraph
{
vcsn_context = "law_char(ab), r"
I -> 0 -> F
0 -> 0 [label = "<.5>\\e"]
}''','''digraph
{
vcsn_context = "wordset<char_letters(ab)>, r"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
}
{
node [shape = circle, style = rounded, width = 0.5]
0
}
I0 -> 0
0 -> F0 [label = "<2>"]
}''')
## ------------- ##
## law_char, b. ##
## ------------- ##
check(r'''digraph
{
vcsn_context = "law_char(ab), b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
}
I0 -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "\\e"]
1 -> F1
1 -> 0 [label = "\\e
|
"]
1 -> 2 [label = "a"]
2 -> 0 [label = "a"]
2 -> 1 [label = "\\e"]
}''', '''digraph
{
vcsn_context = "wordset<char_letters(ab)>, b"
rankdir = LR
edge [arrowhead = vee, arrows
|
ize = .6]
{
node [shape = point, width = 0]
I0
F0
F1
F2
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
}
I0 -> 0
0 -> F0
0 -> 0 [label = "a"]
0 -> 1 [label = "a"]
0 -> 2 [label = "a"]
1 -> F1
1 -> 0 [label = "a"]
1 -> 1 [label = "a"]
1 -> 2 [label = "a"]
2 -> F2
2 -> 0 [label = "a"]
2 -> 1 [label = "a"]
2 -> 2 [label = "a"]
}''')
## ------------------------------------------------- ##
## law_char, z: invalid \e-cycle (weight is not 0). ##
## ------------------------------------------------- ##
check_fail(r'''digraph
{
vcsn_context = "law_char(ab), z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
3
}
I0 -> 0
0 -> 1 [label = "<2>a"]
0 -> 2 [label = "<-1>\\e"]
1 -> F1
1 -> 0 [label = "<-1>\\e"]
2 -> 1 [label = "<-1>\\e"]
2 -> 3 [label = "<2>a"]
3 -> 0 [label = "<2>a"]
}''')
## ------------- ##
## law_char, z. ##
## ------------- ##
check(r'''digraph
{
vcsn_context = "law_char(ab), z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
3
}
I0 -> 0
0 -> 1 [label = "<2>a"]
0 -> 2 [label = "<-1>a"]
1 -> F1
1 -> 0 [label = "<-1>\\e"]
2 -> 1 [label = "<-1>\\e"]
2 -> 3 [label = "<2>a"]
3 -> 0 [label = "<2>a"]
}''', '''digraph
{
vcsn_context = "wordset<char_letters(ab)>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
F2
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
3
}
I0 -> 0
0 -> 1 [label = "<2>a"]
0 -> 2 [label = "<-1>a"]
1 -> F1
1 -> 1 [label = "<-2>a"]
1 -> 2 [label = "a"]
2 -> F2 [label = "<-1>"]
2 -> 1 [label = "<2>a"]
2 -> 2 [label = "<-1>a"]
2 -> 3 [label = "<2>a"]
3 -> 0 [label = "<2>a"]
}''')
## ---------------------------------- ##
## law_char, zmin: invalid \e-cycle. ##
## ---------------------------------- ##
check_fail(r'''digraph
{
vcsn_context = "law_char(ab), zmin"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2
}
I0 -> 0 [label = "<0>"]
0 -> 1 [label = "<2>a"]
0 -> 2 [label = "<-1>\\e"]
1 -> F1 [label = "<0>"]
1 -> 0 [label = "<-1>\\e"]
1 -> 2 [label = "<2>a"]
2 -> 0 [label = "<2>a"]
2 -> 1 [label = "<-1>\\e"]
}''')
## ---------------------------- ##
## lan_char, zr: a long cycle. ##
## ---------------------------- ##
# FIXME(ap): with distance, weights are equivalent but not the same
check(r'''digraph
{
vcsn_context = "lan_char(z), expressionset<lal_char(abcd), q>"
rankdir = LR
node [shape = circle]
{
node [shape = point, width = 0]
I
F
}
{ 0 1 2 3 4 }
I -> 0
0 -> 1 [label = "<a>\\e"]
1 -> 2 [label = "<b>\\e"]
2 -> 3 [label = "<c>\\e"]
3 -> 0 [label = "<d>\\e"]
0 -> 4 [label = "z"]
4 -> F
}''', r'''digraph
{
vcsn_context = "letterset<char_letters(z)>, expressionset<letterset<char_letters(abcd)>, q>"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
}
I0 -> 0
0 -> 1 [label = "<(abcd)*>z"]
1 -> F1
}''', [algo for algo in algos if algo != 'distance'])
## ----------------------------------------- ##
## lan_char, zr: remove now-useless states. ##
## ----------------------------------------- ##
# Check that we remove states that _end_ without incoming transitions,
# but leave states that were inaccessible before the elimination of
# the spontaneous transitions.
# FIXME(ap): with distance, inaccessible states get pruned
check(r'''digraph
{
vcsn_context = "lan_char(z), expressionset<lal_char(abcdefgh), q>"
rankdir = LR
node [shape = circle]
{
node [shape = point, width = 0]
I
F
}
{ 0 1 2 3 4 5 6 7 8 9 }
I -> 0
0 -> 3 [label = "<a>\\e"]
0 -> 5 [label = "<b>\\e"]
1 -> 2 [label = "<c>\\e"]
3 -> 4 [label = "<d>\\e"]
5 -> 6 [label = "<e>\\e"]
7 -> 8 [label = "<f>\\e"]
6 -> 9 [label = "<g>\\e"]
8 -> 9 [label = "<h>\\e"]
9 -> F
}''', '''digraph
{
vcsn_context = "letterset<char_letters(z)>, expressionset<letterset<char_letters(abcdefgh)>, q>"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
F2
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1 [color = DimGray]
2 [color = DimGray]
}
I0 -> 0
0
|
rocky/python3-trepan
|
trepan/processor/parse/tok.py
|
Python
|
gpl-3.0
| 1,426
| 0
|
class Token:
"""
Class representing a token.
kind: the kind of token, e.g. filename, number, other
value: specific instance value, e.g. "/tmp/foo.c", or 5
offset: byte offset from start of p
|
arse string
"""
def __init__(self, kind, value=None, offset=None):
self.offset = offset
self.kind = kind
self.value = value
def __eq__(self, o):
""" '==', but it's okay if offset is different"""
if isinstance(o, Token):
# Both are tokens: compare kind
|
and value
# It's okay if offsets are different
return (self.kind == o.kind)
else:
return self.kind == o
def __repr__(self):
return str(self.kind)
def __repr1__(self, indent, sib_num=''):
return self.format(line_prefix=indent, sib_num=sib_num)
def __str__(self):
return self.format(line_prefix='')
def format(self, line_prefix='', sib_num=None):
if sib_num:
sib_num = "%d." % sib_num
else:
sib_num = ''
prefix = ('%s%s' % (line_prefix, sib_num))
offset_opname = '%5s %-10s' % (self.offset, self.kind)
if not self.value:
return "%s%s" % (prefix, offset_opname)
return "%s%s %s" % (prefix, offset_opname, self.value)
def __hash__(self):
return hash(self.kind)
def __getitem__(self, i):
raise IndexError
|
rdmilligan/SaltwashAR
|
scripts/features/base/speaking.py
|
Python
|
gpl-3.0
| 476
| 0.006303
|
# Copyright (C) 2015 Ross D Milligan
# GNU GENERAL PUBLIC LICENSE Version 3
|
(full notice can be found at https://github.com/rdmilligan/SaltwashAR)
class Speaking:
# initialize speaking
def __init__(self, text_to_speech):
self.is_speaking = False
self.text_to_speech = text_to_speech
# text to speech
def _text_to_speech(self, text):
s
|
elf.is_speaking = True
self.text_to_speech.convert(text)
self.is_speaking = False
|
Dev-Cloud-Platform/Dev-Cloud
|
dev_cloud/cc1/src/ec2/helpers/query.py
|
Python
|
apache-2.0
| 3,363
| 0.000595
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except i
|
n compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licen
|
se.
#
# @COPYRIGHT_end
"""@package src.ec2.helpers.query
@copyright Copyright (c) 2012 Institute of Nuclear Physics PAS <http://www.ifj.edu.pl/>
@author Oleksandr Gituliar <gituliar@gmail.com>
"""
from datetime import datetime
import urllib
from ec2.base.auth import _sign_parameters_ver2
def query(parameters, aws_key=None, aws_secret=None, endpoint=None,
method=None, secure=False):
parameters.setdefault('SignatureMethod', 'HmacSHA256')
parameters.setdefault('SignatureVersion', '2')
parameters['AWSAccessKeyId'] = aws_key
parameters['Timestamp'] = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
parameters['Version'] = "2012-03-01"
# set Signature
signature = _sign_parameters_ver2(
parameters,
aws_secret,
endpoint=endpoint,
method=method,
)
parameters['Signature'] = signature
# build request
protocol = 'http' if not secure else 'https'
query_parameters = urllib.urlencode(parameters)
if method == 'GET':
request = ("%s://%s/?%s" % (protocol, endpoint, query_parameters),)
elif method == 'POST':
request = ("%s://%s" % (protocol, endpoint), query_parameters)
else:
raise Exception('Unsupported %s method: %s' % (protocol.upper(), method))
response = urllib.urlopen(*request).read()
return request, response
def get_instance_tags(cluster_manager):
instances = cluster_manager.user.vm.get_list()
tags = []
for instance in instances:
tags.append({'resource-id': 'i-' + str(instance['vm_id']),
'key': 'Name',
'resource-type': 'instance',
'value': instance['name']})
return tags
def get_instance_name_tag(cluster_manager, id):
instance = cluster_manager.user.vm.get_by_id({'vm_id': id})
tags = {'resource-id': 'i-' + str(instance['vm_id']),
'key': 'Name',
'resource-type': 'instance',
'value': instance['name']}
return tags
def get_volume_name_tag(cluster_manager, id):
volume = cluster_manager.user.storage_image.get_by_id({'vm_id': id})
tags = {'resource-id': 'i-' + str(volume['storage_image_id']),
'key': 'Name',
'resource-type': 'volume',
'value': volume['name']}
return tags
def get_volume_tags(cluster_manager):
volumes = cluster_manager.user.storage_image.get_list()
tags = []
for volume in volumes:
tags.append({'resource-id': 'vol-' + str(volume['storage_image_id']),
'key': 'Name',
'resource-type': 'volume',
'value': volume['name']})
return tags
|
Eyepea/aiohttp
|
aiohttp/client.py
|
Python
|
apache-2.0
| 26,125
| 0.000153
|
"""HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import json
import os
import sys
import traceback
import warnings
from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
from yarl import URL
from . import connector as connector_mod
from . import client_exceptions, client_reqrep, hdrs, http, payload
from .client_exceptions import * # noqa
from .client_exceptions import (ClientError, ClientOSError, ServerTimeoutError,
WSServerHandshakeError)
from .client_reqrep import * # noqa
from .client_reqrep import ClientRequest, ClientResponse
from .client_ws import ClientWebSocketResponse
from .connector import * # noqa
from .connector import TCPConnector
from .cookiejar import CookieJar
from .helpers import (PY_35, CeilTimeout, TimeoutHandle, _BaseCoroMixin,
deprecated_noop, sentinel)
from .http import WS_KEY, WebSocketReader, WebSocketWriter
from .streams import FlowControlDataQueue
__all__ = (client_exceptions.__all__ + # noqa
client_reqrep.__all__ + # noqa
connector_mod.__all__ + # noqa
('ClientSession', 'ClientWebSocketResponse', 'request'))
# 5 Minute default read and connect timeout
DEFAULT_TIMEOUT = 5 * 60
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
requote_redirect_url = True
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, json_serialize=json.dumps,
request_class=ClientRequest, response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=http.HttpVersion11,
cookie_jar=None, connector_owner=True, raise_for_status=False,
read_timeout=sentinel, conn_timeout=None,
auto_decompress=True):
implicit_loop = False
if loop is None:
if connector is not None:
loop = connector._loop
else:
implicit_loop = True
loop = asyncio.get_event_loop()
if connector is None:
connector = TCPConnector(loop=loop)
if connector._loop is not loop:
raise RuntimeError(
"Session and connector has to use same event loop")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
if implicit_loop and not loop.is_running():
warnings.warn("Creating a client session outside of coroutine is "
"a very dangerous idea", ResourceWarning,
stacklevel=2)
context = {'client_session': self,
'message': 'Creating a client session outside '
'of coroutine'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
loop.call_exception_handler(context)
if cookie_jar is None:
cookie_jar = CookieJar(loop=loop)
self._cookie_jar = cookie_jar
if cookies is not None:
self._cookie_jar.update_cookies(cookies)
self._connector = connector
self._connector_owner = connector_owner
self._default_auth = auth
self._version = version
self._json_serialize = json_serialize
self._read_timeout = (read_timeout if read_timeout is not sentinel
else DEFAULT_TIMEOUT)
self._conn_timeout = conn_timeout
self._raise_for_status = raise_for_status
self._auto_decompress = auto_decompress
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([istr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, **kwargs):
"""Perform HTTP request."""
return _RequestContextManager(self._request(method, url, **kwargs))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
json=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True,
proxy=None,
proxy_auth=None,
timeout=sentinel):
# NOTE: timeout clamps existing connect and read timeouts. We cannot
# set the default to None because we need to detect if the user wants
# to use the existing timeouts by setting timeout to None.
if encoding is not None:
warnings.warn(
"encoding parameter is not supported, "
"please use FormData(charset='utf-8') instead",
DeprecationWarning)
if self.closed:
raise RuntimeError('Session is closed')
if data is not None and json is not None:
raise ValueError(
'data and json parameters can not be used at the same time')
elif json is not None:
data = payload.JsonPayload(json, dumps=self._json_serialize)
if not isinstance(chunked, bool) and chunked is not None:
warnings.warn(
'Chunk size is deprecated #1615', DeprecationWarning)
redirects = 0
history = []
version = self._version
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(istr(i))
if proxy is not None:
proxy = URL(proxy)
# timeout is cumulative for all request operations
# (request, redirects, responses, data consuming)
tm = TimeoutHandle(
self._loop,
timeout if timeout is not sentinel else self._read_time
|
out)
handle = tm.start()
|
timer = tm.timer()
try:
with timer:
while True:
url = URL(url).with_fragment(None)
cookies = self._cookie_jar.filter_cookies(url)
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=cookies, auth=auth, version=version,
compress=compress, chunke
|
m-weigand/ccd_tools
|
src/ddplot/ddplot.py
|
Python
|
gpl-3.0
| 16,512
| 0.000666
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ddplot.py can create plots from plot results created using dd_single.py and
dd_time.py
Copyright 2014,2015 Maximilian Weigand
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GN
|
U General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# from memory_profiler import *
from optp
|
arse import OptionParser
import os
import numpy as np
from NDimInv.plot_helper import *
import NDimInv.elem as elem
# import dd_single
import NDimInv
# import lib_dd.plot as lDDp
import sip_formats.convert as SC
import matplotlib.pylab as plt
import matplotlib as mpl
def handle_cmd_options():
parser = OptionParser()
parser.add_option("-i", "--dir", type='string', metavar='DIR',
help="dd_single/dd_time result directory default " +
"results",
default="results", dest="result_dir")
parser.add_option("--range", dest="spec_ranges", type="string",
help="Pixel range(s) to plot. Separate by ';' and " +
"start with 1. For example: \"1;2;4;5\". " +
"Also allowed are ranges: \"2-10\", and open ranges: " +
"\"5-\" (default: -1 (all))",
default=None)
# parser.add_option("--nr_cpus", type='int', metavar='NR',
# help="Output directory", default=1,
# dest="nr_cpus")
parser.add_option('-o', "--output", type='str', metavar='DIR',
help="Output directory (default: filtered_results)",
default='filtered_results',
dest="output_dir")
(options, args) = parser.parse_args()
return options, args
def _get_result_type(directory):
"""Use heuristics to determine the type of result dir that we deal with
Possible types are 'ascii' and 'ascii_audit'
"""
if not os.path.isdir(directory):
raise Exception('Directory does not exist: {0}'.format(directory))
if os.path.isdir(directory + os.sep + 'stats_and_rms'):
return 'ascii'
else:
return 'ascii_audit'
def load_ascii_audit_data(directory):
"""We need:
* frequencies
* data (rmag_rpha, cre_cim)
* forward response (rmag_rpha, cre_cim)
* RTD
"""
data = {}
frequencies = np.loadtxt(directory + os.sep + 'frequencies.dat')
data['frequencies'] = frequencies
with open(directory + os.sep + 'data.dat', 'r') as fid:
[fid.readline() for x in range(0, 3)]
header = fid.readline().strip()
index = header.find('format:')
data_format = header[index + 8:].strip()
subdata = np.loadtxt(fid)
temp = SC.convert(data_format, 'rmag_rpha', subdata)
rmag, rpha = SC.split_data(temp)
data['d_rmag'] = rmag
data['d_rpha'] = rpha
temp = SC.convert(data_format, 'cre_cim', subdata)
rmag, rpha = SC.split_data(temp)
data['d_cre'] = rmag
data['d_cim'] = rpha
temp = SC.convert(data_format, 'rre_rim', subdata)
rmag, rpha = SC.split_data(temp)
data['d_rre'] = rmag
data['d_rim'] = rpha
with open(directory + os.sep + 'f.dat', 'r') as fid:
[fid.readline() for x in range(0, 3)]
header = fid.readline().strip()
index = header.find('format:')
data_format = header[index + 8:].strip()
subdata = np.loadtxt(fid)
temp = SC.convert(data_format, 'rmag_rpha', subdata)
rmag, rpha = SC.split_data(temp)
data['f_rmag'] = rmag
data['f_rpha'] = rpha
temp = SC.convert(data_format, 'cre_cim', subdata)
rmag, rpha = SC.split_data(temp)
data['f_cre'] = rmag
data['f_cim'] = rpha
temp = SC.convert(data_format, 'rre_rim', subdata)
rmag, rpha = SC.split_data(temp)
data['f_rre'] = rmag
data['f_rim'] = rpha
data['rtd'] = np.atleast_2d(
np.loadtxt(directory + os.sep + 'm_i.dat', skiprows=4))
data['tau'] = np.loadtxt(directory + os.sep + 'tau.dat', skiprows=4)
return data
def load_ascii_data(directory):
data = {}
frequencies = np.loadtxt(directory + os.sep + 'frequencies.dat')
data['frequencies'] = frequencies
data_format = open(
directory + os.sep + 'data_format.dat', 'r').readline().strip()
subdata = np.loadtxt(directory + os.sep + 'data.dat')
temp = SC.convert(data_format, 'rmag_rpha', subdata)
rmag, rpha = SC.split_data(temp)
data['d_rmag'] = rmag
data['d_rpha'] = rpha
temp = SC.convert(data_format, 'cre_cim', subdata)
rmag, rpha = SC.split_data(temp)
data['d_cre'] = rmag
data['d_cim'] = rpha
temp = SC.convert(data_format, 'rre_rim', subdata)
rmag, rpha = SC.split_data(temp)
data['d_rre'] = rmag
data['d_rim'] = rpha
f_format = open(
directory + os.sep + 'f_format.dat', 'r').readline().strip()
subdata = np.loadtxt(directory + os.sep + 'f.dat')
temp = SC.convert(f_format, 'rmag_rpha', subdata)
rmag, rpha = SC.split_data(temp)
data['f_rmag'] = rmag
data['f_rpha'] = rpha
temp = SC.convert(f_format, 'cre_cim', subdata)
rmag, rpha = SC.split_data(temp)
data['f_cre'] = rmag
data['f_cim'] = rpha
temp = SC.convert(f_format, 'rre_rim', subdata)
rmag, rpha = SC.split_data(temp)
data['f_rre'] = rmag
data['f_rim'] = rpha
data['rtd'] = np.atleast_2d(
np.loadtxt(directory + os.sep + 'stats_and_rms' + os.sep +
'm_i_results.dat'))
data['tau'] = np.loadtxt(directory + os.sep + 'tau.dat')
return data
def extract_indices_from_range_str(filter_string, max_index=None):
"""
Extract indices (e.g. for spectra or pixels) from a range string. The
string must have the following format: Separate different ranges by ';',
first index is 1.
If max_index is provided, open ranges are allowed, as well as "-1" for all.
Examples:
"1;2;4;5"
"2-10"
"5-"
"-1"
Parameters
----------
filter_string : string to be parsed according to the format specifications
¦ ¦ ¦ ¦ above
max_index : (default: None). Provide the maximum index for the ranges to
¦ ¦ ¦ allow open ranges and "-1" for all indices
"""
if(filter_string is None):
return None
sections = filter_string.split(';')
filter_ids = []
# now look for ranges and expand if necessary
for section in sections:
filter_range = section.split('-')
if(len(filter_range) == 2):
start = filter_range[0]
end = filter_range[1]
# check for an open range, e.g. 4-
if(end == ''):
if(max_index is not None):
end = max_index
else:
continue
filter_ids += list(range(int(start) - 1, int(end)))
else:
filter_ids.append(int(section) - 1)
return filter_ids
def plot_data(data, options):
nr_specs = data['d_rmag'].shape[0]
indices = extract_indices_from_range_str(options.spec_ranges,
nr_specs)
if indices is None:
indices = list(range(0, nr_specs))
frequencies = data['frequencies']
for index in indices:
fig, axes = plt.subplots(1, 5, figsize=(14, 3))
# Magnitude and phase values
ax = axes[0]
ax.semilogx(frequencies, data['d_rmag'][index, :], '.', color='k')
ax.semilogx(frequencies, data['f_rmag'][index, :], '-', color='k')
ax.set_xlabel('frequency [Hz]')
ax.set_ylabel(r'$|\rho|~[\O
|
softlayer/softlayer-python
|
SoftLayer/CLI/subnet/__init__.py
|
Python
|
mit
| 23
| 0
|
""
|
"Network subnets.
|
"""
|
SeeSpotRun/rmlint
|
tests/test_options/test_match_without_extension.py
|
Python
|
gpl-3.0
| 718
| 0.001393
|
#!/usr/bin/env python3
# encoding: utf-8
from nose import with_setup
from tests.utils import *
@with_setup(usual_setup_f
|
unc, usual_teardown_func)
def test_negative():
create_file('xxx', 'b.png')
create_file('xxx', 'a.png')
create_file('xxx', 'a')
head, *data, footer = run_rmlint('-i')
assert footer['total_files'] == 3
assert footer['total_lint_size'] == 0
assert footer['duplicates'] == 0
@with_setup(usual_setup_func, usual_teardown_func)
def test_positive():
create_file('xxx', 'a.png
|
')
create_file('xxx', 'a.jpg')
head, *data, footer = run_rmlint('-i')
assert footer['total_files'] == 2
assert footer['total_lint_size'] == 3
assert footer['duplicates'] == 1
|
ericlee0803/surrogate-GCP
|
gp/GPhelpers.py
|
Python
|
bsd-3-clause
| 2,253
| 0.030626
|
import GPy
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
# ~~~~WARNING: ONLY SUPPORT FOR 1D RIGHT NOW~~~~ #
# TODO
def batch():
pass
# Calculates GP from input and output vectors X and Y respectively
def calcGP(X, Y, kernel='rbf', variance=1., lengthscale=1.):
# Reshape in 1D Case to proper column vector form
if(len(X.shape) == 1):
X = np.reshape(X, (len(X),1))
if(len(Y.shape) == 1):
Y = np.reshape(Y, (len(Y),1))
if(kernel=='rbf'):
kernel = GPy.kern.RBF(input_dim=1, variance=variance, lengthscale=leng
|
thscale)
m = GPy.models.GPRegression(X,Y,kernel)
return m
else:
print('Kernel is not supported, please use one that is supported or use the default RBF Kernel')
return None
# Updates GP with a set of new function evaluations Y at points X
def updateGP(model, kernel, Xnew, Ynew):
# Reshape in 1D Case
if(len(Xnew.shape) == 1):
Xnew = np.reshape(X, (len(Xnew),1))
if(len(Ynew.shape) == 1):
Ynew = np.reshape(Y, (len(Ynew),1))
X = np.append(model.X, Xnew, 0)
Y = np.ap
|
pend(model.Y, Ynew, 0)
m = GPy.models.GPRegression(X,Y,kernel)
return m
# Using Expected Improvement, send out a number of further evaluations
# -batchsize = number of new evals
# -fidelity = number of points used to estimate EI
# -bounds = determines how new evals points are spaced
def batchNewEvals_EI(model, bounds=1, batchsize=50, fidelity=100):
P, ei = compute_ei(model, fidelity)
idx = np.argmax(ei)
xnew = P[idx]
X = np.linspace(xnew-bounds, xnew+bounds, num=batchsize)
return X
# Calculates EI given means mu and variances sigma2
def compute_ei_inner(ybest, mu, sigma2):
sigma = np.sqrt(sigma2)
u = (ybest - mu) / sigma
ucdf = norm.cdf(u)
updf = norm.pdf(u)
ei = sigma * (updf + u * ucdf)
return ei
# Takes in GP model from GPy and computes EI at points P
# We are assuming minimization, and thus ybest represents the smallest point we have so far
def compute_ei(model, numsamples):
P = np.linspace(model.X[0], model.X[-1], num=numsamples)
ybest = np.amax(model.Y)
P = np.reshape(P, [len(P), 1])
mu, sigma2 = model.predict(P)
return P, compute_ei_inner(ybest, mu, sigma2)
def plotGP(model):
fig = model.plot()
regfig = GPy.plotting.show(fig)
regfig.savefig('GPmodel.png')
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/mixture/plot_gmm.py
|
Python
|
bsd-3-clause
| 2,817
| 0
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
|
u = w[0] / linalg.norm(w[0])
# as the
|
DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
jeremymcrae/mupit
|
mupit/mutation_rates.py
|
Python
|
mit
| 8,652
| 0.007397
|
"""
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
import tempfile
import urllib
import pandas
from mupit.gtf import convert_gtf
from mupit.util import is_url
def get_default_rates(rates_url="http://www.nature.com/ng/journal/v46/n9/extref/ng.3050-S2.xls",
gencode_url="ftp://ftp.sanger.ac.uk/pub/gencode/Gencode_human/release_19/gencode.v19.annotation.gtf.gz"):
""" obtain the table of mutation rates from the Samocha et al paper
Rates fo
|
r all genes can be obtained from the supplementary material of
Samocha et al. Nature Genetics 2014 doi:10.1038/ng.3050
Args:
rates_url: url to supplementary mutation rates table
gencode_url: url to gencode, or local path. This is requ
|
ired to identify
chromosomes for the genes in the rates data, since we need to know
the chromosome in order to corrrect rates on chromosome X.
Returns:
dataframe of mutation rates, with an extra column for summed lof rate
"""
rates = pandas.read_excel(rates_url, sheetname="mutation_probabilities")
# convert rates from log-scaled values, so we can later multiply by the
# number of transmissions
columns = ["syn", "mis", "non", "splice_site", "frameshift"]
rates[columns] = 10 ** rates[columns]
# sort out the required columns and names.
rates["hgnc"] = rates["gene"]
gencode = load_gencode(gencode_url)
recode = dict(zip(gencode["hgnc"], gencode["chrom"]))
rates["chrom"] = rates["hgnc"].map(recode)
rates = rates[["hgnc", "chrom", "syn", "mis", "splice_site", "frameshift", "non"]]
return rates
def load_gencode(path):
""" load gencode table with HGNC symbols and chromosome coordinates
Args:
path: path to gzipped tab-separated table of gencode gene entries. This
can be either a url, or local path.
Returns:
pandas dataframe of HGNC symbols and genome coordiantes
"""
gencode = convert_gtf(path)
# restrict outselves to protein coding genes (or genes which are protein
# coding in at least some individuals)
gencode = gencode[gencode["gene_type"].isin(["protein_coding",
"polymorphic_pseudogene"])]
gencode = gencode[gencode["feature"] == "gene"]
# get the required column names, and strip out all unnecessary columns
gencode["hgnc"] = gencode["gene_name"]
gencode["chrom"] = [ x.strip("chr") for x in gencode["seqname"].astype(str) ]
gencode = gencode[["hgnc", "chrom", "start", "end"]].copy()
return gencode
def get_expected_mutations(rates, male, female):
""" gets numbers of expected mutation per gene
Loads gene-based mutation rates, in order to determine the expected number
of mutations per gene, given the number of studied probands and adjusts for
sex-chromosome transmissions.
This defaults to the gene-based mutation rates from Nature Genetics
46:944-950 (2014) doi:10.1038/ng.3050, but we can pass in other gene-based
mutation rate datasets.
Args:
rates: pandas dataframe containing per-gene mutation
male: number of male probands in the dataset
female: number of female probands in the dataset
Returns:
a dataframe of mutation rates for genes under different mutation
classes.
"""
if rates is None:
rates = get_default_rates()
autosomal = 2 * (male + female)
expected = rates[["hgnc", "chrom"]].copy()
# account for how different pandas versions sum series with only NA
kwargs = {}
if pandas.__version__ >= '0.22.0':
kwargs = {'min_count': 1}
# get the number of expected mutations, given the number of transmissions
expected["lof_indel"] = rates["frameshift"] * autosomal
expected["lof_snv"] = (rates[["non", "splice_site"]].sum(axis=1, skipna=True, **kwargs)) * autosomal
expected["missense_indel"] = (rates["frameshift"] / 9) * autosomal
expected["missense_snv"] = rates["mis"] * autosomal
expected["synonymous_snv"] = rates["syn"] * autosomal
# correct for the known ratio of indels to nonsense, and for transmissions
# on the X-chromosome
expected = adjust_indel_rates(expected)
expected = correct_for_x_chrom(expected, male, female)
# subset to the columns we need to estimate enrichment probabilities
expected = expected[["hgnc", "chrom", "lof_indel", "lof_snv",
"missense_indel", "missense_snv", "synonymous_snv"]]
return expected
def correct_for_x_chrom(expected, male_n, female_n):
""" correct mutations rates for sex-chromosome transmission rates
Args:
expected: gene-based data frame, containing rates for different mutation
classes.
male_n: number of trios with male offspring
female_n: number of trios with female offspring
Returns:
a dataframe of mutation rates for genes under different mutation
classes.
"""
# Calculate the number of transmissions for autosomal, male and female
# transmissions. The number of transmissions from males is equal to the
# number of female probands (since only females receive a chrX from their
# fathers). Likewise, all offspring receive a chrX from their mothers, so
# the number of transmissions from females equals the number of probands.
autosomal = 2 * (male_n + female_n)
female_transmissions = male_n + female_n
male_transmissions = female_n
# get scaling factors using the alpha from the most recent SFHS (Scottish
# Family Health Study) phased de novo data.
alpha = 3.4
male_factor = 2 / (1 + (1 / alpha))
female_factor = 2 / (1 + alpha)
# correct the non-PAR chrX genes for fewer transmissions and lower rate
# (dependent on alpha)
chrX = expected["chrom"].isin(["X", "chrX"])
x_factor = ((male_transmissions * male_factor) + (female_transmissions * female_factor)) / autosomal
x_factor = pandas.Series([x_factor] * len(chrX), index=expected.index)
x_factor[~chrX] = 1
expected["missense_snv"] *= x_factor
expected["missense_indel"] *= x_factor
expected["lof_snv"] *= x_factor
expected["lof_indel"] *= x_factor
expected["synonymous_snv"] *= x_factor
return expected
def adjust_indel_rates(expected):
""" adapt indel rates for lower rate estimate from validated de novos
The indel mutation rates from Samocha et al., Nature Genetics 46:944-950
assume that the overall indel mutation rate is 1.25-fold greater than the
overall nonsense mutation rate, ie there are 1.25 times as many frameshifts
as nonsense mutations. We have our own estimates for the ratio, derived from
our de novo validation efforts, which we shall apply in place of the Samocha
et al ratios.
Args:
rates: data frame of mutation rates.
Returns:
the rates data frame, with adjusted indel rates.
"""
# the following numbers were derived from the DDD 4K dataset.
nonsense_n
|
Yelp/elastalert
|
elastalert/kibana_discover.py
|
Python
|
apache-2.0
| 5,644
| 0.003189
|
# -*- coding: utf-8 -*-
# flake8: noqa
import datetime
import logging
import json
import os.path
import prison
import urllib.parse
from .util import EAException
from .util import lookup_es_key
from .util import ts_add
kibana_default_timedelta = datetime.timedelta(minutes=10)
kibana5_kibana6_versions = frozenset(['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8'])
kibana7_versions = frozenset(['7.0', '7.1', '7.2', '7.3'])
def generate_kibana_discover_url(rule, match):
''' Creates a link for a kibana discover app. '''
discover_app_url = rule.get('kibana_discover_app_url')
if not
|
discover_app_url:
logging.warning(
'Missing kibana_discover_app_url for rule %s' % (
rule.get('name', '<MIS
|
SING NAME>')
)
)
return None
kibana_version = rule.get('kibana_discover_version')
if not kibana_version:
logging.warning(
'Missing kibana_discover_version for rule %s' % (
rule.get('name', '<MISSING NAME>')
)
)
return None
index = rule.get('kibana_discover_index_pattern_id')
if not index:
logging.warning(
'Missing kibana_discover_index_pattern_id for rule %s' % (
rule.get('name', '<MISSING NAME>')
)
)
return None
columns = rule.get('kibana_discover_columns', ['_source'])
filters = rule.get('filter', [])
if 'query_key' in rule:
query_keys = rule.get('compound_query_key', [rule['query_key']])
else:
query_keys = []
timestamp = lookup_es_key(match, rule['timestamp_field'])
timeframe = rule.get('timeframe', kibana_default_timedelta)
from_timedelta = rule.get('kibana_discover_from_timedelta', timeframe)
from_time = ts_add(timestamp, -from_timedelta)
to_timedelta = rule.get('kibana_discover_to_timedelta', timeframe)
to_time = ts_add(timestamp, to_timedelta)
if kibana_version in kibana5_kibana6_versions:
globalState = kibana6_disover_global_state(from_time, to_time)
appState = kibana_discover_app_state(index, columns, filters, query_keys, match)
elif kibana_version in kibana7_versions:
globalState = kibana7_disover_global_state(from_time, to_time)
appState = kibana_discover_app_state(index, columns, filters, query_keys, match)
else:
logging.warning(
'Unknown kibana discover application version %s for rule %s' % (
kibana_version,
rule.get('name', '<MISSING NAME>')
)
)
return None
return "%s?_g=%s&_a=%s" % (
os.path.expandvars(discover_app_url),
urllib.parse.quote(globalState),
urllib.parse.quote(appState)
)
def kibana6_disover_global_state(from_time, to_time):
return prison.dumps( {
'refreshInterval': {
'pause': True,
'value': 0
},
'time': {
'from': from_time,
'mode': 'absolute',
'to': to_time
}
} )
def kibana7_disover_global_state(from_time, to_time):
return prison.dumps( {
'filters': [],
'refreshInterval': {
'pause': True,
'value': 0
},
'time': {
'from': from_time,
'to': to_time
}
} )
def kibana_discover_app_state(index, columns, filters, query_keys, match):
app_filters = []
if filters:
bool_filter = { 'must': filters }
app_filters.append( {
'$state': {
'store': 'appState'
},
'bool': bool_filter,
'meta': {
'alias': 'filter',
'disabled': False,
'index': index,
'key': 'bool',
'negate': False,
'type': 'custom',
'value': json.dumps(bool_filter, separators=(',', ':'))
},
} )
for query_key in query_keys:
query_value = lookup_es_key(match, query_key)
if query_value is None:
app_filters.append( {
'$state': {
'store': 'appState'
},
'exists': {
'field': query_key
},
'meta': {
'alias': None,
'disabled': False,
'index': index,
'key': query_key,
'negate': True,
'type': 'exists',
'value': 'exists'
}
} )
else:
app_filters.append( {
'$state': {
'store': 'appState'
},
'meta': {
'alias': None,
'disabled': False,
'index': index,
'key': query_key,
'negate': False,
'params': {
'query': query_value,
'type': 'phrase'
},
'type': 'phrase',
'value': str(query_value)
},
'query': {
'match': {
query_key: {
'query': query_value,
'type': 'phrase'
}
}
}
} )
return prison.dumps( {
'columns': columns,
'filters': app_filters,
'index': index,
'interval': 'auto'
} )
|
testmana2/test
|
Plugins/VcsPlugins/vcsPySvn/SvnStatusMonitorThread.py
|
Python
|
gpl-3.0
| 6,360
| 0.00173
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the VCS status monitor thread class for Subversion.
"""
from __future__ import unicode_literals
import os
import pysvn
from VCS.StatusMonitorThread import VcsStatusMonitorThread
import Preferences
class SvnStatusMonitorThread(VcsStatusMonitorThread):
"""
Class implementing the VCS status monitor thread class for Subversion.
"""
def __init__(self, interval, project, vcs, parent=None):
"""
Constructor
@param interval new interval in seconds (integer)
@param project reference to the project object (Project)
@param vcs reference to the version control object
@param parent reference to the parent object (QObject)
"""
VcsStatusMonitorThread.__init__(self, interval, project, vcs, parent)
def _performMonitor(self):
"""
Protected method implementing the monitoring action.
This method populates the statusList member variable
with a list of strings giving the status in the first column and the
path relative to the project directory starting with the third column.
The allowed status flags are:
<ul>
<li>"A" path was added but not yet comitted</li>
<li>"M" path has local changes</li>
<li>"O" path was removed</li>
<li>"R" path was deleted and then re-added</li>
<li>"U" path needs an update</li>
<li>"Z" path contains a conflict</li>
<li>" " path is back at normal</li>
</ul>
@return tuple of flag indicating successful operation (boolean) and
a status message in case of non successful operation (string)
"""
self.shouldUpdate = False
client = pysvn.Client()
client.exception_style = 1
client.callback_get_login = \
self.__clientLoginCallback
client.callback_ssl_server_trust_prompt = \
self.__clientSslServerTrustPromptCallback
cwd = os.getcwd()
os.chdir(self.projectDir)
try:
allFiles = client.status(
'.', recurse=True, get_all=True, ignore=True,
update=not Preferences.getVCS("MonitorLocalStatus"))
states = {}
for file in allFiles:
uptodate = True
if file.repos_text_status != pysvn.wc_status_kind.none:
uptodate = uptodate and \
file.repos_text_status != pysvn.wc_status_kind.modified
if file.repos_prop_status != pysvn.wc_status_kind.none:
uptodate = uptodate and \
file.repos_prop_status != pysvn.wc_status_kind.modified
status = ""
if not uptodate:
status = "U"
self.shouldUpdate = True
elif file.text_status == pysvn.wc_status_kind.conflicted or \
file.prop_status == pysvn.wc_status_kind.conflicted:
status = "Z"
elif file.text_status == pysvn.wc_status_kind.deleted or \
file.prop_status == pysvn.wc_status_kind.deleted:
status = "O"
|
elif file.text_status == pysvn.wc_status_kind.modified or \
file.prop_status == pysvn.wc_status_kind.modified:
status = "M"
elif file.text_status == pysvn.wc_status_kind.added or \
file.prop_status == pysvn.wc_status_kind.added:
status = "A"
elif fi
|
le.text_status == pysvn.wc_status_kind.replaced or \
file.prop_status == pysvn.wc_status_kind.replaced:
status = "R"
if status:
states[file.path] = status
try:
if self.reportedStates[file.path] != status:
self.statusList.append(
"{0} {1}".format(status, file.path))
except KeyError:
self.statusList.append(
"{0} {1}".format(status, file.path))
for name in list(self.reportedStates.keys()):
if name not in states:
self.statusList.append(" {0}".format(name))
self.reportedStates = states
res = True
statusStr = self.tr(
"Subversion status checked successfully (using pysvn)")
except pysvn.ClientError as e:
res = False
statusStr = e.args[0]
os.chdir(cwd)
return res, statusStr
def __clientLoginCallback(self, realm, username, may_save):
"""
Private method called by the client to get login information.
@param realm name of the realm of the requested credentials (string)
@param username username as supplied by subversion (string)
@param may_save flag indicating, that subversion is willing to save
the answers returned (boolean)
@return tuple of four values (retcode, username, password, save).
Retcode should be True, if username and password should be used
by subversion, username and password contain the relevant data
as strings and save is a flag indicating, that username and
password should be saved. Always returns (False, "", "", False).
"""
return (False, "", "", False)
def __clientSslServerTrustPromptCallback(self, trust_dict):
"""
Private method called by the client to request acceptance for a
ssl server certificate.
@param trust_dict dictionary containing the trust data
@return tuple of three values (retcode, acceptedFailures, save).
Retcode should be true, if the certificate should be accepted,
acceptedFailures should indicate the accepted certificate failures
and save should be True, if subversion should save the certificate.
Always returns (False, 0, False).
"""
return (False, 0, False)
|
ubiquill/Potluck
|
src/view/Changes.py
|
Python
|
gpl-2.0
| 2,296
| 0.003486
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2011 Thomas Schreiber
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Fr
|
ee Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# by Thomas Schreiber <ubiquill@gmail.com>
#
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from view.changesUi import Ui_changeSummary
import string
class ChangeWin(QDialog):
"""A QDialog that lists changes before they are commited.
:param QDialog: Parent class.
"""
def __init__(self, parent):
"""Initialize ChangeWin.
:param parent: Caller.
"""
QDialog.__init__(self, parent)
self.ui=Ui_changeSummary()
self.ui.setupUi(self)
def setChanges(self, changeDict):
"""Add changes to ChangeWin.
:param changeDict: Dictionary of changes.
"""
installString = ''
upgradeString = ''
removeString = ''
for app in changeDict['repoInstalls']:
installString += app + ' '
for app in changeDict['aurInstalls']:
installString += app + ' '
for app in changeDict['aurBuildDeps']:
installString += app + ' '
for app in changeDict['aurDeps']:
installString += app + ' '
for app in changeDict['repoUpgrades']:
upgradeString += app + ' '
for app in changeDict['aurUpgrades']:
upgradeString += app + ' '
for app in changeDict['removes']:
removeString += app + ' '
self.ui.toInstallEdit.setText(installString)
self.ui.toUpgradeEdit.setText(upgradeString)
self.ui.toRemoveEdit.setText(removeString)
# vim: set ts=4 sw=4 noet:
|
scrapinghub/dateparser
|
dateparser/data/date_translation_data/sw.py
|
Python
|
bsd-3-clause
| 4,155
| 0
|
info = {
"name": "sw",
"date_order": "DMY",
"january": [
"jan",
"januari"
],
"february": [
"feb",
"februari"
],
"march": [
"mac",
"machi"
],
"april": [
"apr",
"aprili"
],
"may": [
"mei"
],
"june": [
"jun",
"juni"
],
"july": [
"jul",
"julai"
],
"august": [
"ago",
"agosti"
],
"september": [
"sep",
"septemba"
],
"october": [
"okt",
"oktoba"
],
"november": [
"nov",
"novemba"
],
"december": [
"des",
"desemba"
],
"monday": [
"jumatatu"
],
"tuesday": [
"jumanne"
],
"wednesday": [
"jumatano"
],
"thursday": [
"alhamisi"
],
"friday": [
"ijumaa"
],
"saturday": [
"jumamosi"
],
"sunday": [
"jumapili"
],
"am": [
"am",
"asubuhi"
],
"pm": [
"mchana",
"pm"
],
"year": [
"mwaka"
],
"month": [
"mwezi"
],
"week": [
"wiki"
],
"day": [
"siku"
],
"hour": [
"saa"
],
"minute": [
"dak",
"dakika"
],
"second": [
"sek",
"sekunde"
],
"relative-type": {
"0 day ago": [
"leo"
],
"0 h
|
our ago": [
"saa hii"
],
"0 minute ago": [
"dakika hii"
],
"0 month ago": [
"mwezi huu"
],
"0 second ago": [
"sasa hivi"
],
"0 week ago": [
"wiki hii"
],
"0 year ago": [
"mwaka huu"
],
"1 day ago": [
"jana"
],
"1 month ago": [
"mwezi uliopita"
],
"1
|
week ago": [
"wiki iliyopita"
],
"1 year ago": [
"mwaka uliopita"
],
"in 1 day": [
"kesho"
],
"in 1 month": [
"mwezi ujao"
],
"in 1 week": [
"wiki ijayo"
],
"in 1 year": [
"mwaka ujao"
]
},
"relative-type-regex": {
"\\1 day ago": [
"siku (\\d+) iliyopita",
"siku (\\d+) zilizopita"
],
"\\1 hour ago": [
"saa (\\d+) iliyopita",
"saa (\\d+) zilizopita"
],
"\\1 minute ago": [
"dakika (\\d+) iliyopita",
"dakika (\\d+) zilizopita"
],
"\\1 month ago": [
"miezi (\\d+) iliyopita",
"mwezi (\\d+) uliopita"
],
"\\1 second ago": [
"sekunde (\\d+) iliyopita",
"sekunde (\\d+) zilizopita"
],
"\\1 week ago": [
"wiki (\\d+) iliyopita",
"wiki (\\d+) zilizopita"
],
"\\1 year ago": [
"miaka (\\d+) iliyopita",
"mwaka (\\d+) uliopita"
],
"in \\1 day": [
"baada ya siku (\\d+)"
],
"in \\1 hour": [
"baada ya saa (\\d+)"
],
"in \\1 minute": [
"baada ya dakika (\\d+)"
],
"in \\1 month": [
"baada ya miezi (\\d+)",
"baada ya mwezi (\\d+)"
],
"in \\1 second": [
"baada ya sekunde (\\d+)"
],
"in \\1 week": [
"baada ya wiki (\\d+)"
],
"in \\1 year": [
"baada ya miaka (\\d+)",
"baada ya mwaka (\\d+)"
]
},
"locale_specific": {
"sw-CD": {
"name": "sw-CD",
"week": [
"juma"
]
},
"sw-KE": {
"name": "sw-KE"
},
"sw-UG": {
"name": "sw-UG"
}
},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
|
jackkiej/SickRage
|
tests/notifier_tests.py
|
Python
|
gpl-3.0
| 10,384
| 0.003371
|
# coding=UTF-8
# URL: https://github.com/SickRage/SickRage
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
###
# As a test case, there are instances in which it is necessary to call protected members of
# classes in order to test those classes. Therefore we will be pylint disable protected-access
###
# pylint: disable=line-too-long
"""
Test notifiers
"""
import os.path
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sickbeard import db
from sickbeard.tv import TVEpisode, TVShow
from sickbeard.webserve import Home
from sickbeard.notifiers.emailnotify import Notifier as EmailNotifier
from sickbeard.notifiers.prowl import Notifier as ProwlNotifier
from sickrage.helper.encoding import ss
import tests.test_lib as test
class NotifierTests(test.SickbeardTestDBCase): # pylint: disable=too-many-public-methods
"""
Test notifiers
"""
@classmethod
def setUpClass(cls):
num_legacy_shows = 3
num_shows = 3
num_episodes_per_show = 5
cls.mydb = db.DBConnection()
cls.legacy_shows = []
cls.shows = []
# Per-show-notifications were originally added for email notifications only. To add
# this feature to other notifiers, it was necessary to alter the way text is stored in
# one of the DB columns. Therefore, to test properly, we must create some shows that
# store emails in the old method (legacy method) and then other shows that will use
# the new method.
for show_counter in range(100, 100 + num_legacy_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.legacy_shows.append(show)
for show_counter in range(200, 200 + num_shows):
show = TVShow(1, show_counter)
show.name = "Show " + str(show_counter)
show.episodes = []
for episode_counter in range(0, num_episodes_per_show):
episode = TVEpisode(show, test.SEASON, episode_counter)
episode.name = "Episode " + str(episode_counter + 1)
episode.quality = "SDTV"
show.episodes.append(episode)
show.saveToDB()
cls.shows.append(show)
def setUp(self):
"""
Set up tests
"""
self._debug_spew("\n\r")
@unittest.skip('Not yet implemented')
def test_boxcar(self):
"""
Test boxcar notifications
"""
pass
def test_email(self):
"""
Test email notifications
"""
email_notifier = EmailNotifier()
# Per-show-email notifications were added early on and utilized a different format than the other notifiers.
# Therefore, to test properly (and ensure backwards compatibility), this routine will test shows that use
# both the old and the new storage methodology
legacy_test_emails = "email-1@address.com,email2@address.org,email_3@address.tv"
test_emails = "email-4@address.com,email5@address.org,email_6@address.tv"
for show in self.legacy_shows:
showid = self._get_showid_by_showname(show.name)
self.mydb.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [legacy_test_emails, showid])
for show in self.shows:
showid = self._get_showid_by_showname(show.name)
Home.saveShowNotifyList(show=showid, emails=test_emails)
# Now, iterate through all shows using the email list generation routines that are used in the notifier proper
shows = self.legacy_shows + self.shows
for show in shows:
for episode in show.episodes:
ep_name = ss(episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality) # pylint: disable=protected-access
show_name = email_notifier._parseEp(ep_name) # pylint: disable=protected-access
recipients = email_notifier._generate_recipients(show_name) # pylint: disable=protected-access
self._debug_spew("- Email Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for email in recipients:
self._debug_spew("-- " + email.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented')
def test_emby(self):
"""
Test emby notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_freemobile(self):
"""
Test freemobile notifications
"""
pass
@unittest.skip('Not yet implemented')
d
|
ef test_growl(self):
"""
Test growl notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_kodi(self):
"""
Test kodi notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_libnotify(self):
"""
Test libnotify notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nma(self):
|
"""
Test nma notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmj(self):
"""
Test nmj notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_nmjv2(self):
"""
Test nmjv2 notifications
"""
pass
@unittest.skip('Not yet implemented')
def test_plex(self):
"""
Test plex notifications
"""
pass
def test_prowl(self):
"""
Test prowl notifications
"""
prowl_notifier = ProwlNotifier()
# Prowl per-show-notifications only utilize the new methodology for storage; therefore, the list of legacy_shows
# will not be altered (to preserve backwards compatibility testing)
test_prowl_apis = "11111111111111111111,22222222222222222222"
for show in self.shows:
showid = self._get_showid_by_showname(show.name)
Home.saveShowNotifyList(show=showid, prowlAPIs=test_prowl_apis)
# Now, iterate through all shows using the Prowl API generation routines that are used in the notifier proper
for show in self.shows:
for episode in show.episodes:
ep_name = ss(episode._format_pattern('%SN - %Sx%0E - %EN - ') + episode.quality) # pylint: disable=protected-access
show_name = prowl_notifier._parse_episode(ep_name) # pylint: disable=protected-access
recipients = prowl_notifier._generate_recipients(show_name) # pylint: disable=protected-access
self._debug_spew("- Prowl Notifications for " + show.name + " (episode: " + episode.name + ") will be sent to:")
for api in recipients:
self._debug_spew("-- " + api.strip())
self._debug_spew("\n\r")
return True
@unittest.skip('Not yet implemented
|
cshallue/models
|
research/differential_privacy/multiple_teachers/analysis.py
|
Python
|
apache-2.0
| 10,988
| 0.012013
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a
|
copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This script computes boun
|
ds on the privacy cost of training the
student model from noisy aggregation of labels predicted by teachers.
It should be used only after training the student (and therefore the
teachers as well). We however include the label files required to
reproduce key results from our paper (https://arxiv.org/abs/1610.05755):
the epsilon bounds for MNIST and SVHN students.
The command that computes the epsilon bound associated
with the training of the MNIST student model (100 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=mnist_250_teachers_labels.npy
--indices_file=mnist_250_teachers_100_indices_used_by_student.npy
The command that computes the epsilon bound associated
with the training of the SVHN student model (1000 label queries
with a (1/20)*2=0.1 epsilon bound each) is:
python analysis.py
--counts_file=svhn_250_teachers_labels.npy
--max_examples=1000
--delta=1e-6
"""
import os
import math
import numpy as np
from six.moves import xrange
import tensorflow as tf
from differential_privacy.multiple_teachers.input import maybe_download
# These parameters can be changed to compute bounds for different failure rates
# or different model predictions.
tf.flags.DEFINE_integer("moments",8, "Number of moments")
tf.flags.DEFINE_float("noise_eps", 0.1, "Eps value for each call to noisymax.")
tf.flags.DEFINE_float("delta", 1e-5, "Target value of delta.")
tf.flags.DEFINE_float("beta", 0.09, "Value of beta for smooth sensitivity")
tf.flags.DEFINE_string("counts_file","","Numpy matrix with raw counts")
tf.flags.DEFINE_string("indices_file","",
"File containting a numpy matrix with indices used."
"Optional. Use the first max_examples indices if this is not provided.")
tf.flags.DEFINE_integer("max_examples",1000,
"Number of examples to use. We will use the first"
" max_examples many examples from the counts_file"
" or indices_file to do the privacy cost estimate")
tf.flags.DEFINE_float("too_small", 1e-10, "Small threshold to avoid log of 0")
tf.flags.DEFINE_bool("input_is_counts", False, "False if labels, True if counts")
FLAGS = tf.flags.FLAGS
def compute_q_noisy_max(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
q = 0.0
for c in counts_rest:
gap = -c
q += (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def compute_q_noisy_max_approx(counts, noise_eps):
"""returns ~ Pr[outcome != winner].
Args:
counts: a list of scores
noise_eps: privacy parameter for noisy_max
Returns:
q: the probability that outcome is different from true winner.
"""
# For noisy max, we only get an upper bound.
# Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*)
# proof at http://mathoverflow.net/questions/66763/
# tight-bounds-on-probability-of-sum-of-laplace-random-variables
# This code uses an approximation that is faster and easier
# to get local sensitivity bound on.
winner = np.argmax(counts)
counts_normalized = noise_eps * (counts - counts[winner])
counts_rest = np.array(
[counts_normalized[i] for i in xrange(len(counts)) if i != winner])
gap = -max(counts_rest)
q = (len(counts) - 1) * (gap + 2.0) / (4.0 * math.exp(gap))
return min(q, 1.0 - (1.0/len(counts)))
def logmgf_exact(q, priv_eps, l):
"""Computes the logmgf value given q and privacy eps.
The bound used is the min of three terms. The first term is from
https://arxiv.org/pdf/1605.02065.pdf.
The second term is based on the fact that when event has probability (1-q) for
q close to zero, q can only change by exp(eps), which corresponds to a
much smaller multiplicative change in (1-q)
The third term comes directly from the privacy guarantee.
Args:
q: pr of non-optimal outcome
priv_eps: eps parameter for DP
l: moment to compute.
Returns:
Upper bound on logmgf
"""
if q < 0.5:
t_one = (1-q) * math.pow((1-q) / (1 - math.exp(priv_eps) * q), l)
t_two = q * math.exp(priv_eps * l)
t = t_one + t_two
try:
log_t = math.log(t)
except ValueError:
print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t)))
log_t = priv_eps * l
else:
log_t = priv_eps * l
return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
def logmgf_from_counts(counts, noise_eps, l):
"""
ReportNoisyMax mechanism with noise_eps with 2*noise_eps-DP
in our setting where one count can go up by one and another
can go down by 1.
"""
q = compute_q_noisy_max(counts, noise_eps)
return logmgf_exact(q, 2.0 * noise_eps, l)
def sens_at_k(counts, noise_eps, l, k):
"""Return sensitivity at distane k.
Args:
counts: an array of scores
noise_eps: noise parameter used
l: moment whose sensitivity is being computed
k: distance
Returns:
sensitivity: at distance k
"""
counts_sorted = sorted(counts, reverse=True)
if 0.5 * noise_eps * l > 1:
print("l too large to compute sensitivity")
return 0
# Now we can assume that at k, gap remains positive
# or we have reached the point where logmgf_exact is
# determined by the first term and ind of q.
if counts[0] < counts[1] + k:
return 0
counts_sorted[0] -= k
counts_sorted[1] += k
val = logmgf_from_counts(counts_sorted, noise_eps, l)
counts_sorted[0] -= 1
counts_sorted[1] += 1
val_changed = logmgf_from_counts(counts_sorted, noise_eps, l)
return val_changed - val
def smoothed_sens(counts, noise_eps, l, beta):
"""Compute beta-smooth sensitivity.
Args:
counts: array of scors
noise_eps: noise parameter
l: moment of interest
beta: smoothness parameter
Returns:
smooth_sensitivity: a beta smooth upper bound
"""
k = 0
smoothed_sensitivity = sens_at_k(counts, noise_eps, l, k)
while k < max(counts):
k += 1
sensitivity_at_k = sens_at_k(counts, noise_eps, l, k)
smoothed_sensitivity = max(
smoothed_sensitivity,
math.exp(-beta * k) * sensitivity_at_k)
if sensitivity_at_k == 0.0:
break
return smoothed_sensitivity
def main(unused_argv):
##################################################################
# If we are reproducing results from paper https://arxiv.org/abs/1610.05755,
# download the required binaries with label information.
##################################################################
# Binaries for MNIST results
paper_binaries_mnist = \
["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true",
"https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"]
if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \
or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy":
maybe_download(paper_binaries_mnist, os.getcwd())
# Binar
|
k3idii/pykpyk
|
datatools.py
|
Python
|
mit
| 955
| 0.034555
|
import itertools
import struct
def ip4_to_int32(str_ip, order='>'):
return struct.unpack(order+"I",struct.pack("BBBB",*map(int,str_ip.split("."))))[0]
def int32_to_ip4(big_int, order='>'):
return '.'.join(map(str, struct.unpack("BBBB",struct.pack(order+"I",big_int))))
def gen_get_n_bit(data, bit=1):
mask = 1 << n
for c in data:
yield 1 if ord(c) & mask else 0
def get_n_bit(data, bit=1):
ret
|
urn list(gen_get_n_bit(data, bit))
def byte_to_bin_str(byte):
return "{0:0>b}".format(byte)
def byte_to_bin_arr(byte):
return [int(x) for x in byte_to_bin_str(byte)]
def gen_chunks(data, size):
for i in range(1+len(data)/size):
yield data[i*size:(i+1)*size]
def chunks(data, size):
return list(gen_chunks(data, size))
def compare_collection(col, func_cmp):
for pair in itertools.combinations(col, 2):
if not func_cmp(*pair):
return False
return True
|
def gen_pairs(col,):
return itertools.combinations(col,2);
|
swtp1v07/Savu
|
savu/data/structures.py
|
Python
|
apache-2.0
| 17,375
| 0
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: structures
:platform: Unix
:synopsis: Classes which describe the main data types for passing between
plugins
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import numpy as np
import h5py
import logging
from mpi4py import MPI
from savu.core.utils import logmethod
NX_CLASS = 'NX_class'
# Core Direction Keywords
CD_PROJECTION = 'core_dir_projection'
CD_SINOGRAM = 'core_dir_sinogram'
CD_ROTATION_AXIS = 'core_dir_rotation_axis'
CD_PATTERN = 'core_dir_pattern'
class SliceAvailableWrapper(object):
"""
This class takes 2 datasets, one avaialble boolean ndarray, and 1 data
ndarray. Its purpose is to provide slices from the data array only if data
has been put there, and to allow a convinient way to put slices into the
data array, and set the available array to True
"""
def __init__(self, avail, data):
"""
:param avail: The available boolean ndArray
:type avail: boolean ndArray
:param data: The data ndArray
:type data: any ndArray
"""
self.avail = avail
self.data = data
def __getitem__(self, item):
if self.avail[item].all():
return self.data[item]
else:
return None
def __setitem__(self, item, value):
self.data[item] = value
self.avail[item] = True
def __getattr__(self, name):
"""
Delegate everything else to the data class
"""
value = self.data.__getattribute__(name)
return value
class SliceAlwaysAvailableWrapper(SliceAvailableWrapper):
"""
This class takes 1 data ndarray. Its purpose is to provide slices from the
data array in the same way as the SliceAvailableWrapper but assuming the
data is always available (for example in the case of the input file)
"""
def __init__(self, data):
"""
:param data: The data ndArray
:type data: any ndArray
"""
super(SliceAlwaysAvailableWrapper, self).__init__(None, data)
@logmethod
def __getitem__(self, item):
return self.data[item]
@logmethod
def __setitem__(self, item, value):
self.data[item] = value
class PassThrough(object):
"""
Interface Class describing when the input data of a plugin is also the
output
"""
def __init__(self):
super(PassThrough, self).__init__()
class Data(object):
"""
Baseclass for all data
"""
def __init__(self):
super(Data, self).__init__()
self.backing_file = None
self.data = None
self.base_path = None
self.core_directions = {}
@logmethod
def complete(self):
"""
Closes the backing file and completes work
"""
if self.backing_file is not None:
logging.debug("Completing file %s %s", self.base_path,
self.backing_file.filename)
self.backing_file.close()
self.backing_file = None
def external_link(self):
return h5py.ExternalLink(self.backing_file.filename,
self.base_path)
def get_slice_list(self, frame_type):
if frame_type in self.core_directions.keys():
it = np.nditer(self.data, flags=['multi_index'])
dirs_to_remove = list(self.core_directions[frame_type])
dirs_to_remove.sort(reverse=True)
for direction in dirs_to_remove:
it.remove_axis(direction)
mapping_list = range(len(it.multi_index))
dirs_to_remove.sort()
for direction in dirs_to_remove:
mapping_list.insert(direction, -1)
mapping_array = np.array(mapping_list)
slice_list = []
while not it.finished:
tup = it.multi_index + (slice(None),)
slice_list.append(tuple(np.array(tup)[mapping_array]))
it.iternext()
return slice_list
return None
def get_data_shape(self):
"""
Simply returns the shape of the main data array
"""
return self.data.shape
class RawTimeseriesData(Data):
"""
Descriptor for raw timeseries data
"""
def __init__(self):
super(RawTimeseriesData, self).__init__()
self.image_key = None
self.rotation_angle = None
self.control = None
self.center_of_rotation = None
@logmethod
def populate_from_nx_tomo(self, path):
"""
Populate the RawTimeseriesData from an NXTomo defined NeXus file
:param path: The full path of the NeXus file to load.
:type path: str
"""
self.backing_file = h5py.File(path, 'r')
logging.debug("Creating file '%s' '%s'", 'tomo_entry',
self.backing_file.filename)
data = self.backing_file['entry1/tomo_entry/instrument/detector/data']
self.data = SliceAlwaysAvailableWrapper(data)
image_key = self.backing_file[
'entry1/tomo_entry/instrument/detector/image_key']
self.image_key = SliceAlwaysAvailableWrapper(image_key)
rotation_angle = \
self.backing_file['entry1/tomo_entry/sample/rotation_angle']
self.rotation_angle = SliceAlwaysAvailableWrapper(rotation_angle)
control = self.backing_file['entry1/tomo_entry/control/data']
self.control = SliceAlwaysAvailableWrapper(control)
self.core_directions[CD_PROJECTION] = (1, 2)
self.core_directions[CD_SINOGRAM] = (0, 2)
self.core_directions[CD_ROTATION_AXIS] = (0, )
@logmethod
def create_backing_h5(self, path, group_name, data, mpi=False,
new_shape=None):
"""
Create a h5 backend for this RawTimeseriesData
:param path: The full path of the NeXus file to use as a backend
:type path: str
:param data: The structure from which this can be created
:type data: savu.structure.RawTimeseriesData
:param mpi: if an MPI process, provide MPI package here, default None
:type mpi: package
"""
self.backing_file = None
if mpi:
self.backing_file = h5py.File(path, 'w', driver='mpio',
comm=MPI.COMM_WORLD)
else:
self.backing_file = h5py.File(path, 'w')
|
if self.backing_file is None:
raise IOError("Failed to open the hdf5 file")
logging.debug("Creating file '%s' '%s'", self.base_path,
self.backing_file.filename)
self.base_path = group_name
if not isinstance(data, RawTimeseriesData):
raise ValueError("data is not a RawTimeseries
|
Data")
self.core_directions[CD_PROJECTION] = (1, 2)
self.core_directions[CD_SINOGRAM] = (0, 2)
self.core_directions[CD_ROTATION_AXIS] = 0
data_shape = new_shape
if data_shape is None:
data_shape = data.data.shape
data_type = np.double
image_key_shape = data.image_key.shape
image_key_type = data.image_key.dtype
rotation_angle_shape = data.rotation_angle.shape
rotation_angle_type = data.rotation_angle.dtype
control_shape = data.control.shape
control_type = data.control.dtype
cor_shape = (data.data.shape[self.core_directions[CD_ROTATION_AXIS]],)
cor_type = np.double
group = self.backing_file.create_group(group_name)
group.attrs[NX_CLASS] = 'NXdata'
data_val
|
adrn/gala
|
gala/integrate/setup_package.py
|
Python
|
mit
| 1,672
| 0
|
from distutils.core import Extension
from collections import defaultdict
def get_extensions():
import numpy as np
exts = []
# malloc
mac_incl_path = "/usr/include/malloc"
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/integrate/cyintegrators/leapfrog.pyx')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
exts.append(Extension('gala.integrate.cyintegrators.leapfrog', **cfg))
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/potential/hamiltonian/src/chamiltonian.c')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
cfg['sources'].append('gala/integrate/cyintegrators/dop853.pyx')
cfg['sources'].append('gala/integrate/cyintegrators/dopri/dop853.c')
exts.append(Extension('gala.integrate.cyintegrators.dop853', **cfg))
cfg = defaultdict(list)
cfg['include_dirs'].append(np.get_include())
cfg['include_dirs'].append(mac_incl_path)
cfg['include_dirs'].append('gala/potential')
cfg
|
['extra_compile_args'].append('--std=gnu99')
cfg['sources'].append('gala/integrate/cyintegrators/ruth4.pyx')
cfg['sources'].append('gala/potential/potential/src/cpotential.c')
exts.append(Extension('gala.integrate.cyintegrators.ruth4',
|
**cfg))
return exts
|
google/fuzzbench
|
analysis/coverage_data_utils.py
|
Python
|
apache-2.0
| 9,404
| 0.000638
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for coverage data calculation."""
import collections
import itertools
import json
import posixpath
from typing import Dict, List, Tuple
import tempfile
import pandas as pd
from analysis import data_utils
from common import filestore_utils
from common import logs
logger = logs.Logger('coverage_data_utils')
def fuzzer_and_benchmark_to_key(fuzzer: str, benchmark: str) -> str:
|
"""Returns the key representing |fuzzer| and |benchmark|."""
return fuzzer + ' ' + benchmark
def key_to_fuzzer_and_benchmark(key: str) -> Tuple[str, str]:
"""Returns a tuple containing the fuzzer and the benchmark represented by
|key|."""
return tuple(key.split(' '))
def get_experiment_filestore_path_for_fuzzer_benchmark(
fuzzer: str,
benchmark: str,
df: pd.DataFrame,
) -> str:
"""Returns the experiment filestore
|
path for |fuzzer| and |benchmark| in
|df|. Returns an arbitrary filestore path if there are multiple."""
df = df[df['fuzzer'] == fuzzer]
df = df[df['benchmark'] == benchmark]
experiment_filestore_paths = get_experiment_filestore_paths(df)
fuzzer_benchmark_filestore_path = experiment_filestore_paths[0]
if len(experiment_filestore_paths) != 1:
logger.warning(
'Multiple cov filestores (%s) for this fuzzer (%s) benchmark (%s) '
'pair. Using first: %s.', experiment_filestore_paths, fuzzer,
benchmark, fuzzer_benchmark_filestore_path)
return fuzzer_benchmark_filestore_path
def get_experiment_filestore_paths(df: pd.DataFrame) -> List[str]:
"""Returns a list of experiment filestore paths from |df|."""
return list((df['experiment_filestore'] + '/' + df['experiment']).unique())
def get_coverage_report_filestore_path(fuzzer: str, benchmark: str,
df: pd.DataFrame) -> str:
"""Returns the filestore path of the coverage report for |fuzzer| on
|benchmark| for |df|."""
exp_filestore_path = get_experiment_filestore_path_for_fuzzer_benchmark(
fuzzer, benchmark, df)
return posixpath.join(exp_filestore_path, 'coverage', 'reports', benchmark,
fuzzer, 'index.html')
def get_covered_regions_dict(experiment_df: pd.DataFrame) -> Dict:
"""Combines json files for different fuzzer-benchmark pair in
|experiment_df| and returns a dictionary of the covered regions."""
fuzzers_and_benchmarks = set(
zip(experiment_df.fuzzer, experiment_df.benchmark))
arguments = [(fuzzer, benchmark,
get_experiment_filestore_path_for_fuzzer_benchmark(
fuzzer, benchmark, experiment_df))
for fuzzer, benchmark in fuzzers_and_benchmarks]
result = itertools.starmap(get_fuzzer_benchmark_covered_regions_and_key,
arguments)
return dict(result)
def get_fuzzer_benchmark_covered_regions_filestore_path(
fuzzer: str, benchmark: str, exp_filestore_path: str) -> str:
"""Returns the path to the covered regions json file in the |filestore| for
|fuzzer| and |benchmark|."""
return posixpath.join(exp_filestore_path, 'coverage', 'data', benchmark,
fuzzer, 'covered_regions.json')
def get_fuzzer_covered_regions(fuzzer: str, benchmark: str, filestore: str):
"""Returns the covered regions dict for |fuzzer| from the json file in the
filestore."""
src_file = get_fuzzer_benchmark_covered_regions_filestore_path(
fuzzer, benchmark, filestore)
with tempfile.NamedTemporaryFile() as dst_file:
if filestore_utils.cp(src_file, dst_file.name,
expect_zero=False).retcode:
logger.warning('covered_regions.json file: %s could not be copied.',
src_file)
return {}
with open(dst_file.name) as json_file:
return json.load(json_file)
def get_fuzzer_benchmark_covered_regions_and_key(
fuzzer: str, benchmark: str, filestore: str) -> Tuple[str, Dict]:
"""Accepts |fuzzer|, |benchmark|, |filestore|.
Returns a tuple containing the fuzzer benchmark key and the regions covered
by the fuzzer on the benchmark."""
fuzzer_benchmark_covered_regions = get_fuzzer_covered_regions(
fuzzer, benchmark, filestore)
key = fuzzer_and_benchmark_to_key(fuzzer, benchmark)
return key, fuzzer_benchmark_covered_regions
def get_unique_region_dict(benchmark_coverage_dict: Dict) -> Dict:
"""Returns a dictionary containing the covering fuzzers for each unique
region, where the |threshold| defines which regions are unique."""
region_dict = collections.defaultdict(list)
unique_region_dict = {}
threshold_count = 1
for fuzzer in benchmark_coverage_dict:
for region in benchmark_coverage_dict[fuzzer]:
region_dict[region].append(fuzzer)
for region, fuzzers in region_dict.items():
if len(fuzzers) <= threshold_count:
unique_region_dict[region] = fuzzers
return unique_region_dict
def get_unique_region_cov_df(unique_region_dict: Dict,
fuzzer_names: List[str]) -> pd.DataFrame:
"""Returns a DataFrame where the two columns are fuzzers and the number of
unique regions covered."""
fuzzers = collections.defaultdict(int)
for region in unique_region_dict:
for fuzzer in unique_region_dict[region]:
fuzzers[fuzzer] += 1
dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}
for fuzzer in fuzzer_names:
covered_num = fuzzers[fuzzer]
dict_to_transform['fuzzer'].append(fuzzer)
dict_to_transform['unique_regions_covered'].append(covered_num)
return pd.DataFrame(dict_to_transform)
def get_benchmark_cov_dict(coverage_dict, benchmark):
"""Returns a dictionary to store the covered regions of each fuzzer. Uses a
set of tuples to store the covered regions."""
benchmark_cov_dict = {}
for key, covered_regions in coverage_dict.items():
current_fuzzer, current_benchmark = key_to_fuzzer_and_benchmark(key)
if current_benchmark == benchmark:
covered_regions_in_set = set()
for region in covered_regions:
covered_regions_in_set.add(tuple(region))
benchmark_cov_dict[current_fuzzer] = covered_regions_in_set
return benchmark_cov_dict
def get_benchmark_aggregated_cov_df(coverage_dict, benchmark):
"""Returns a dataframe where each row represents a fuzzer and its aggregated
coverage number."""
dict_to_transform = {'fuzzer': [], 'aggregated_edges_covered': []}
for key, covered_regions in coverage_dict.items():
current_fuzzer, current_benchmark = key_to_fuzzer_and_benchmark(key)
if current_benchmark == benchmark:
dict_to_transform['fuzzer'].append(current_fuzzer)
dict_to_transform['aggregated_edges_covered'].append(
len(covered_regions))
return pd.DataFrame(dict_to_transform)
def get_pairwise_unique_coverage_table(benchmark_coverage_dict, fuzzers):
"""Returns a table that shows the unique coverage between each pair of
fuzzers.
The pairwise unique coverage table is a square matrix where each
row and column represents a fuzzer, and each cell contains a number
showing the regions covered by the fuzzer of the column but not by
the fuzzer of the row."""
pairwise_unique_coverage_values = []
for fuzzer_in_row in fuzzers:
row = []
for fuzzer_in_col in fuzzers:
pairwise_unique_coverage_valu
|
brodie/cram
|
cram/_test.py
|
Python
|
gpl-2.0
| 7,518
| 0.000532
|
"""Utilities for running individual tests"""
import itertools
import os
import re
import time
from cram._diff import esc, glob, regex, unified_diff
from cram._process import PIPE, STDOUT, execute
__all__ = ['test', 'testfile']
_needescape = re.compile(br'[\x00-\x09\x0b-\x1f\x7f-\xff]').search
_escapesub = re.compile(br'[\x00-\x09\x0b-\x1f\\\x7f-\xff]').sub
_escapemap = dict((bytes([i]), br'\x%02x' % i) for i in range(256))
_escapemap.update({b'\\': b'\\\\', b'\r': br'\r', b'\t': br'\t'})
def _escape(s):
"""Like the string-escape codec, but doesn't escape quotes"""
return (_escapesub(lambda m: _escapemap[m.group(0)], s[:-1]) +
b' (esc)\n')
def test(lines, shell='/bin/sh', indent=2, testname=None, env=None,
cleanenv=True, debug=False):
r"""Run test lines and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
Note that the TESTSHELL environment variable is available in the
test (set to the specified shell). However, the TESTDIR and
TESTFILE environment variables are not available. To run actual
test files, see testfile().
Example usage:
>>> refout, postout, diff = test([b' $ echo hi\n',
... b' [a-z]{2} (re)\n'])
>>> refout == [b' $ echo hi\n', b' [a-z]{2} (re)\n']
True
>>> postout == [b' $ echo hi\n', b' hi\n']
True
>>> bool(diff)
False
lines may also be a single bytes string:
>>> refout, postout, diff = test(b' $ echo hi\n bye\n')
>>> refout == [b' $ echo hi\n', b' bye\n']
True
>>> postout == [b' $ echo hi\n', b' hi\n']
True
>>> bool(diff)
True
>>> (b''.join(diff) ==
... b'--- \n+++ \n@@ -1,2 +1,2 @@\n $ echo hi\n- bye\n+ hi\n')
True
:param lines: Test input
:type lines: bytes or collections.Iterable[bytes]
:param shell: Shell to run test in
:type shell: bytes or str or list[bytes] or list[str]
:param indent: Amount of indentation to use for shell commands
:type indent: int
:param testname: Optional test file name (used in diff output)
:type testname: bytes or None
:param env: Optional environment variables for the test shell
:type env: dict or None
:param cleanenv: Whether or not to sanitize the environment
:type cleanenv: bool
:param debug: Whether or not to run in debug mode (don't capture stdout)
:type debug: bool
return: Input, output, and diff iterables
:rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
"""
indent = b' ' * indent
cmdline = indent + b'$ '
conline = indent + b'> '
salt = b'CRAM%.5f' % time.time()
if env is None:
env = os.environ.copy()
if cleanenv:
for s in ('LANG', 'LC_ALL', 'LANGUAGE'):
env[s] = 'C'
env['TZ'] = 'GMT'
env['CDPATH'] = ''
env['COLUMNS'] = '80'
env['GREP_OPTIONS'] = ''
if isinstance(lines, bytes):
lines = lines.splitlines(True)
if isinstance(shell, (bytes, str)):
shell = [shell]
env['TESTSHELL'] = shell[0]
if debug:
stdin = []
for line in lines:
if not line.endswith(b'\n'):
line += b'\n'
if line.startswith(cmdline):
stdin.append(line[len(cmdline):])
elif line.startswith(conline):
stdin.append(line[len(conline):])
execute(shell + ['-'], stdin=b''.join(stdin), env=env)
return ([], [], [])
after = {}
refout, postout = [], []
i = pos = prepos = -1
stdin = []
for i, line in enumerate(lines):
if not line.endswith(b'\n'):
line += b'\n'
refout.append(line)
if line.startswith(cmdline):
after.setdefault(pos, []).append(line)
prepos = pos
pos = i
stdin.append(b'echo %s %d $?\n' % (salt, i))
stdin.append(line[len(cmdline):])
elif line.startswith(conline):
after.setdefault(prepos, []).append(line)
stdin.append(line[len(conline):])
elif not line.startswith(indent):
after.setdefault(pos, []).append(line)
stdin.append(b'echo %s %d $?\n' % (salt, i + 1))
output, retcode = execute(shell + ['-'], stdin=b''.join(stdin),
stdout=PIPE, stderr=STDOUT, env=env)
if retcode == 80:
return (refout, None, [])
pos = -1
ret = 0
for i, line in enumerate(output[:-1].splitlines(True)):
out, cmd = line, None
if salt in line:
out, cmd = line.split(salt, 1)
if out:
if not out.endswith(b'\n'):
out += b' (no-eol)\n'
if _needescape(out):
out = _escape(out)
postout.append(indent + out)
if cmd:
ret = int(cmd.split()[1])
if ret != 0:
postout.append(indent + b'[%d]\n' % ret)
postout += after.pop(pos, [])
pos = int(cmd.split()[0])
postout += after.pop(pos, [])
if testname:
diffpath = testname
errpath =
|
diffpath + b'.err'
else:
diffpath = errpath = b''
diff = unified_diff(refout, postout, diffpath, errpath,
matchers=[esc, glob, regex])
for firstline in diff:
return refout, postout, itertools.chain
|
([firstline], diff)
return refout, postout, []
def testfile(path, shell='/bin/sh', indent=2, env=None, cleanenv=True,
debug=False, testname=None):
"""Run test at path and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
Note that the TESTDIR, TESTFILE, and TESTSHELL environment
variables are available to use in the test.
:param path: Path to test file
:type path: bytes or str
:param shell: Shell to run test in
:type shell: bytes or str or list[bytes] or list[str]
:param indent: Amount of indentation to use for shell commands
:type indent: int
:param env: Optional environment variables for the test shell
:type env: dict or None
:param cleanenv: Whether or not to sanitize the environment
:type cleanenv: bool
:param debug: Whether or not to run in debug mode (don't capture stdout)
:type debug: bool
:param testname: Optional test file name (used in diff output)
:type testname: bytes or None
:return: Input, output, and diff iterables
:rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
"""
f = open(path, 'rb')
try:
abspath = os.path.abspath(path)
env = env or os.environ.copy()
env['TESTDIR'] = os.fsdecode(os.path.dirname(abspath))
env['TESTFILE'] = os.fsdecode(os.path.basename(abspath))
if testname is None: # pragma: nocover
testname = os.path.basename(abspath)
return test(f, shell, indent=indent, testname=testname, env=env,
cleanenv=cleanenv, debug=debug)
finally:
f.close()
|
Serpens/small_bioinfo
|
blast_fasta.py
|
Python
|
gpl-3.0
| 1,496
| 0.007353
|
#!/usr/bin/env python
import os, sys
from Bio import SeqIO
from Bio.Blast import NCBIWWW
from time import sleep
from helpers import parse_fasta, get_opts
def usage():
print """Usage: blast_fasta.py [OPTIONS] seqs.fasta
Options:
-t blast_type blastn, blastp, blastx, tblastn, tblastx, default: blastn
-o out_dir output directory, default: current directory
-p out_prefix prefix for output file names, default: blast_
"""
def run_blast(seq, blast_type='blastn'):
delay = 2
while True:
try:
result = NCBIWWW.qblast(blast_type, 'nr', seq.format('fasta')).getvalue()
|
sleep(delay)
return result
except urllib2.HTTPError: # something went wrong, increase delay and try again
delay *= 2
if __name__=='__main__':
t
|
ry:
opts = get_opts(sys.argv[1:], 't:o:p:')
fasta_path = opts[1][0]
opts = dict(opts[0])
except:
usage()
exit(1)
blast_type = opts.get('-t', 'blastn')
out_dir = opts.get('-o', os.getcwd())
out_prefix = opts.get('-p', 'blast_')
seqs = parse_fasta(fasta_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for seq in seqs:
print 'Running BLAST for ' + seq.id + '... ',
blast_xml_str = run_blast(seq, blast_type)
with open(os.path.join(out_dir, out_prefix + seq.id + '.xml'), 'w') as f:
f.write(blast_xml_str)
print 'completed'
|
GreenVars/diary
|
tests/logdb_test.py
|
Python
|
mit
| 2,455
| 0.002037
|
from diary import DiaryDB, Event
import unittest
import sqlite3
import os.path
class TestDiaryDB(unittest.TestCase):
TEMP_DB_PATH = os.path.join(os.path.dirname(__file__),
'testing_
|
dir', 'temp.db')
SIMPLE_EVENT = Event("INFO", "LEVEL")
def setUp(self):
self.logdb = DiaryDB(self.TEMP_DB_PATH)
|
self.logdb_default = DiaryDB()
@classmethod
def tearDownClass(cls):
import os
os.remove(cls.TEMP_DB_PATH)
def constructs_correctly(self):
self.assertIsInstance(self.logdb.conn, sqlite3.Connection)
self.assertIsInstance(self.logdb.cursor, sqlite3.Cursor)
def test_creates_table(self):
table = self.logdb.cursor.execute('''SELECT name FROM sqlite_master
WHERE type="table" AND name="logs"
''').fetchone()[0]
self.assertEquals(table, 'logs')
def test_creates_table_already_exists(self):
self.logdb.create_tables()
tables = self.logdb.cursor.execute('''SELECT name FROM sqlite_master
WHERE type="table" AND name="logs"
''').fetchall()
self.assertEquals(len(tables), 1)
def test_log(self):
self.logdb.log(self.SIMPLE_EVENT)
entry = self.logdb.cursor.execute('''SELECT * FROM logs ORDER BY
inputDT ASC LIMIT 1''').fetchone()
self.assertEquals(entry[0], self.SIMPLE_EVENT.dt)
self.assertEquals(entry[1], self.SIMPLE_EVENT.level)
self.assertEquals(entry[2], self.SIMPLE_EVENT.info)
def test_close(self):
self.logdb.close()
with self.assertRaises(sqlite3.ProgrammingError,
msg="Cannot operate on a closed database."):
self.logdb.conn.execute("SELECT 1 FROM logs LIMIT 1")
def test_default_path(self):
self.logdb_default.log(self.SIMPLE_EVENT)
entry = self.logdb_default.cursor.execute('''SELECT * FROM logs ORDER BY
inputDT DESC LIMIT 1''').fetchone()
self.assertEquals(entry[0], self.SIMPLE_EVENT.dt)
self.assertEquals(entry[1], self.SIMPLE_EVENT.level)
self.assertEquals(entry[2], self.SIMPLE_EVENT.info)
self.logdb_default.close()
if __name__ == '__main__':
unittest.main()
|
jlafon/django-rest-framework-oauth
|
tests/test_oauth.py
|
Python
|
mit
| 19,202
| 0.003854
|
import time
import datetime
import oauth2 as oauth
from provider import scope as oauth2_provider_scope
from rest_framework.test import APIClient
from rest_framework_oauth.authentication import (
oauth2_provider,
OAuthAuthentication,
OAuth2Authentication
)
from rest_framework import status, permissions
from rest_framework.views import APIView
from django.conf.urls import patterns, include, url
from django.http import HttpResponse
from django.utils.http import urlencode
from django.test import TestCase
from django.contrib.auth.models import User
class OAuth2AuthenticationDebug(OAuth2Authentication):
allow_query_params_token = True
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^oauth/$', MockView.as_view(authentication_classes=[OAuthAuthentication])),
(
r'^oauth-with-scope/$',
MockView.as_view(
authentication_classes=[OAuthAuthentication],
permission_classes=[permissions.TokenHasReadWriteScope]
)
),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^oauth2/', include('provider.oauth2.urls', namespace='oauth2')),
url(r'^oauth2-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication])),
url(r'^oauth2-test-debug/$', MockView.as_view(authentication_classes=[OAuth2AuthenticationDebug])),
url(
r'^oauth2-with-scope-test/$',
MockView.as_view(
authentication_classes=[OAuth2Authentication],
permission_classes=[permissions.TokenHasReadWriteScope]
)
)
)
class OAuthTests(TestCase):
"""OAuth 1.0a authentication"""
urls = 'tests.test_oauth'
def setUp(self):
# these imports are here because oauth is optional and hiding them in try..except block or compat
# could obscure problems if something breaks
from oauth_provider.models import Consumer, Scope
from oauth_provider.models import Token as OAuthToken
from oauth_provider import consts
self.consts = consts
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CONSUMER_KEY = 'consumer_key'
self.CONSUMER_SECRET = 'consumer_secret'
self.TOKEN_KEY = "token_key"
self.TOKEN_SECRET = "token_secret"
self.consumer = Consumer.objects.create(
key=self.CONSUMER_KEY, secret=self.CONSUMER_SECRET,
name='example', user=self.user, status=self.consts.ACCEPTED
)
self.scope = Scope.objects.create(name="resource name", url="api/")
self.token = OAuthToken.objects.create(
user=self.user, consumer=self.consumer, scope=self.scope,
|
token_type=OAuthToken.ACCESS, key=self.TOKEN_KEY, secret=self.TOKEN_SECRET,
is_approved=True
)
def _create_authorization_header(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com
|
", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return req.to_header()["Authorization"]
def _create_authorization_url_parameters(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return dict(req)
def test_post_form_passing_oauth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_post_form_repeated_nonce_failing_oauth(self):
"""Ensure POSTing form over OAuth with repeated auth (same nonces and timestamp) credentials fails"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
# simulate reply attack auth header containes already used (nonce, timestamp) pair
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_token_removed_failing_oauth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_consumer_status_not_accepted_failing_oauth(self):
"""Ensure POSTing when consumer status is anything other than ACCEPTED fails"""
for consumer_status in (self.consts.CANCELED, self.consts.PENDING, self.consts.REJECTED):
self.consumer.status = consumer_status
self.consumer.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_request_token_failing_oauth(self):
"""Ensure POSTing with unauthorized request token instead of access token fails"""
self.token.token_type = self.token.REQUEST
self.token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_urlencoded_parameters(self):
"""Ensure POSTing with x-www-form-urlencoded auth parameters passes"""
params = self._create_authorization_url_parameters()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', params, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_get_form_with_url_parameters(self):
"""Ensure GETing with auth in url parameters passes"""
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth/', params)
self.assertEqual(response.status_code, 200)
def test_post_hmac_sha1_signature_passes(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_
|
daneoshiga/gpodder
|
src/gpodder/gtkui/interface/__init__.py
|
Python
|
gpl-3.0
| 764
| 0.001309
|
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2015 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later vers
|
ion.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
|
You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
|
blasterbug/doc2md.py
|
doc2md.py
|
Python
|
gpl-2.0
| 7,823
| 0.005497
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""
doc2md.py generates Python documentation in the Markdown (md) format. It was
written to automatically generate documentation that can be put on Github
or Bitbucket wiki pages. It is initially based on Ferry Boender's pydocmd.
It is as of yet not very complete and is more of a Proof-of-concept than a
fully-fledged tool. Markdown is also a very restricted format and every
implementation works subtly, or completely, different. This means output
may be different on different converters.
## Usage
$ python doc2md.py module [...]
doc2md.py scans every python file (.py) given and generates the documentation
in a subfolder `doc`.
## Example output
- http://github.com/blasterbug/doc2md.py/wiki/doc2md
- http://github.com/blasterbug/SmileANN/wiki/neuron
- http://github.com/blasterbug/SmileANN/wiki/faces
"""
import sys
import os
import imp
import inspect
__author__ = "Benjamin Sientzoff"
__version__ = "0.1.2b"
__maintainer__ = "Benjamin Sientzoff (blasterbug)"
__license__ = "GNU GPL V2"
def remove_extension( fl ):
"""
Remove extention from the program file name
"""
# does not handle mutiple dots
return str(fl).split('.')[0]
def fmt_doc(doc, indent=''):
"""
Format a doc-string.
"""
s = ''
for line in doc.lstrip().splitlines():
s += '%s%s \n' % (indent, line.strip())
return s.rstrip()
def insp_file(file_name):
"""
Inspect a file and return module information
"""
mod_inst = imp.load_source(remove_extension( file_name ), file_name)
if not mod_inst:
sys.stderr.write("Failed to import '%s'\n" % (file_name))
sys.exit(2)
mod_name = inspect.getmodulename(file_name)
if not mod_name:
mod_name = os.path.splitext(os.path.basename(file_name))[0]
return insp_mod(mod_name, mod_inst)
def insp_mod(mod_name, mod_inst):
"""
Inspect a module return doc, vars, functions and classes.
"""
info = {
'name': mod_name,
'inst': mod_inst,
'author': {},
'doc': '',
'vars': [],
'functions': [],
'classes': [],
}
# Get module documentation
mod_doc = inspect.getdoc(mod_inst)
if mod_doc:
info['doc'] = mod_doc
for attr_name in ['author', 'copyright', 'license', 'version', 'maintainer', 'email']:
if hasattr(mod_inst, '__%s__' % (attr_name)):
info['author'][attr_name] = getattr(mod_inst, '__%s__' % (attr_name))
# Get module global vars
for member_name, member_inst in inspect.getmembers(mod_inst):
if not member_name.startswith('_') and \
not inspect.isfunction(member_inst) and \
not inspect.isclass(member_inst) and \
not inspect.ismodule(member_inst) and \
member_inst.__module__ == mod_name and \
member_name not in mod_inst.__builtins__:
info['vars'].append( (member_name, member_inst) )
# Get module functions
|
functions = inspect.getmembers(mod_inst, inspect.i
|
sfunction)
if functions:
for func_name, func_inst in functions:
if func_inst.__module__ == mod_name :
info['functions'].append(insp_method(func_name, func_inst))
# Get module classes
classes = inspect.getmembers(mod_inst, inspect.isclass)
if classes:
for class_name, class_inst in classes:
if class_inst.__module__ == mod_name :
info['classes'].append(insp_class(class_name, class_inst))
return info
def insp_class(class_name, class_inst):
"""
Inspect class and return doc, methods.
"""
info = {
'name': class_name,
'inst': class_inst,
'doc': '',
'methods': [],
}
# Get class documentation
class_doc = inspect.getdoc(class_inst)
# if class_doc:
#info['doc'] = fmt_doc(class_doc)
# Get class methods
methods = inspect.getmembers(class_inst, inspect.ismethod)
for method_name, method_inst in methods:
info['methods'].append(insp_method(method_name, method_inst))
return info
def insp_method(method_name, method_inst):
"""
Inspect a method and return arguments, doc.
"""
info = {
'name': method_name,
'inst': method_inst,
'args': [],
'doc': ''
}
# Get method arguments
method_args = inspect.getargspec(method_inst)
for arg in method_args.args:
if arg != 'self':
info['args'].append(arg)
# Apply default argumument values to arguments
if method_args.defaults:
a_pos = len(info['args']) - len(method_args.defaults)
for pos, default in enumerate(method_args.defaults):
info['args'][a_pos + pos] = '%s=%s' % (info['args'][a_pos + pos], default)
# Print method documentation
method_doc = inspect.getdoc(method_inst)
if method_doc:
info['doc'] = fmt_doc(method_doc)
return info
def to_markdown( text_block ) :
"""
Markdownify an inspect file
:param text_block: inspect file to turn to Markdown
:return: Markdown doc into a string
"""
doc_output = ("# %s \n" % file_i['name'] )
doc_output += file_i['doc'] + ' \n'
author = ''
if 'author' in file_i['author']:
author += file_i['author']['author'] + ' '
if 'email' in file_i['author']:
author += '<%s>' % (file_i['author']['email'])
if author:
doc_output += str("\n __Author__: %s \n" % author )
author_attrs = [
('Version', 'version'),
('Copyright', 'copyright'),
('License', 'license'),
]
for attr_friendly, attr_name in author_attrs:
if attr_name in file_i['author']:
doc_output += " __%s__: %s \n" % (attr_friendly, file_i['author'][attr_name])
if file_i['vars']:
doc_output += "\n## Variables\n"
for var_name, var_inst in file_i['vars']:
doc_output += " - `%s`: %s\n" % (var_name, var_inst)
if file_i['functions']:
doc_output += "\n\n## Functions\n"
for function_i in file_i['functions']:
if function_i['name'].startswith('_'):
continue
doc_output += "\n\n### `%s(%s)`\n" % (function_i['name'], ', '.join(function_i['args']))
if function_i['doc']:
doc_output += "%s" % (function_i['doc'])
else:
doc_output += "No documentation for this function "
if file_i['classes']:
doc_output += "\n\n## Classes\n"
for class_i in file_i['classes']:
doc_output += "\n\n### class `%s()`\n" % (class_i['name'])
if class_i['doc']:
doc_output += "%s " % (class_i['doc'])
else:
doc_output += "No documentation for this class "
doc_output += "\n\n### Methods:\n"
for method_i in class_i['methods']:
if method_i['name'] != '__init__' and method_i['name'].startswith('_'):
continue
doc_output += "\n\n#### def `%s(%s)`\n" % (method_i['name'], ', '.join(method_i['args']))
doc_output += "%s " % (method_i['doc'])
return doc_output
if __name__ == '__main__':
if 1 < len(sys.argv) :
doc_dir = "doc"
for arg in sys.argv[1:] :
file_i = insp_file(arg)
doc_content = to_markdown(file_i)
if not os.path.exists( doc_dir ) :
os.makedirs( doc_dir )
doc_file = open( doc_dir + "/" + remove_extension(arg) + ".md", 'w')
sys.stdout.write( "Writing documentation for %s in doc/\n" % arg )
doc_file.write( doc_content )
doc_file.close()
else:
sys.stderr.write('Usage: %s <file.py>\n' % (sys.argv[0]))
sys.exit(1)
|
talapus/Ophidian
|
Academia/Modules/list_all_installed_modules.py
|
Python
|
bsd-3-clause
| 147
| 0
|
#!/usr/bin/env python
import sys
import textwrap
names =
|
sorted(sys.modules.keys())
name_text = ', '.join(name
|
s)
print textwrap.fill(name_text)
|
manene/SAE_Sampling
|
spark/genGraph.py
|
Python
|
mit
| 715
| 0
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Created Time: Fri Jul 17 00:58:20 2015
# Purpose: generate a bidirected graph
# Mail: hewr2010@gmail.com
__author__ = "Wayne Ho"
import sys
import random
if __name__ == "__main__":
if len(sys.argv) < 3:
print("./%s [#vertices] [#edges]" % sys.argv[0])
exit()
n, m =
|
int(sys.argv[1]), int(sys.argv[2])
print n, m
pool = {}
for i in xrange(m):
while True:
x, y = int(n * random.random()), int(n * random.rand
|
om())
if x > y:
x, y = y, x
if x == y:
continue
if (x, y) not in pool:
break
pool[(x, y)] = True
print x, y
|
mwbetrg/skripbatak
|
createlp2016table.py
|
Python
|
bsd-3-clause
| 2,171
| 0.004606
|
import site
import sys, os
from peewee import *
import datetime
import time
import calendar
import sqlite3
import gzip
import shutil
#-----------------------------------------------------------------------
if os.path.exists('/storage/extSdCard'):
database = SqliteDatabase('/storage/extSdCard/mydb/lessonplan2010.db', **{})
backupdir = '/storage/extSdCard/dbbackup/'
db = '/storage/extSdCard/mydb/lessonplan2010.db'
else:
database = SqliteDatabase('lessonplan2010.db', **{})
class BaseModel(Model):
class Meta:
database = database
class Lessonplanbank(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
bank = PrimaryKeyField(db_column='bank_id', null=True)
content = CharField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
level = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = IntegerField(null=True)
class Meta:
db
|
_table = 'lessonplanbank'
|
class Lessonplan2016(BaseModel):
activity1 = CharField(null=True)
activity2 = CharField(null=True)
assimilation = CharField(null=True)
content = CharField(null=True)
date = IntegerField(null=True)
duration = CharField(null=True)
exercise = TextField(null=True)
handout = TextField(null=True)
impact = CharField(null=True)
lo1 = CharField(null=True)
lo2 = CharField(null=True)
lo3 = CharField(null=True)
note = CharField(null=True)
theme = CharField(null=True)
timeend = CharField(null=True)
timestart = CharField(null=True)
tingkatan = CharField(null=True)
topic = CharField(null=True)
week = CharField(null=True)
class Meta:
db_table = 'lessonplan2016'
database.connect()
def create_tables():
database.connect()
database.create_tables([Lessonplan2016,])
create_tables()
|
Pistachitos/Sick-Beard
|
sickbeard/notifiers/growl.py
|
Python
|
gpl-3.0
| 6,111
| 0.014237
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import socket
import sickbeard
from sickbeard import logger, common
from sickbeard.exceptions import ex
from lib.growl import gntp
class GrowlNotifier:
def test_notify(self, host, password):
self._sendRegistration(host, password, 'Test')
return self._sendGrowl("Test Growl", "Testing Growl settings from Sick Beard", "Test", host, password, force=True)
def notify_snatch(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONSNATCH:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SNATCH], ep_name)
def notify_download(self, ep_name):
if sickbeard.GROWL_NOTIFY_ONDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_name + ": " + lang)
def _send_growl(self, options,message=None):
#Send Notification
notice = gntp.GNTPNotice()
#Required
notice.add_header('Application-Name',options['app'])
notice.add_header('Notification-Name',options['na
|
me'])
notice.add_header('Notification-Title',options['title'])
if options['password']:
notice.set_password(options['password'])
#Optional
if options['sticky']:
notice.add_header('Notification-Sticky',options['sticky'])
if options['priority']:
notice.add_header('Notification-Priority',options['priority'])
if options['icon']:
notice.add_header('Not
|
ification-Icon', 'https://raw.github.com/midgetspy/Sick-Beard/master/data/images/sickbeard.png')
if message:
notice.add_header('Notification-Text',message)
response = self._send(options['host'],options['port'],notice.encode(),options['debug'])
if isinstance(response,gntp.GNTPOK): return True
return False
def _send(self, host,port,data,debug=False):
if debug: print '<Sending>\n',data,'\n</Sending>'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
s.send(data)
response = gntp.parse_gntp(s.recv(1024))
s.close()
if debug: print '<Recieved>\n',response,'\n</Recieved>'
return response
def _sendGrowl(self, title="Sick Beard Notification", message=None, name=None, host=None, password=None, force=False):
if not sickbeard.USE_GROWL and not force:
return False
if name == None:
name = title
if host == None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
growlHosts = [(hostParts[0],port)]
opts = {}
opts['name'] = name
opts['title'] = title
opts['app'] = 'SickBeard'
opts['sticky'] = None
opts['priority'] = None
opts['debug'] = False
if password == None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['icon'] = True
for pc in growlHosts:
opts['host'] = pc[0]
opts['port'] = pc[1]
logger.log(u"Sending growl to "+opts['host']+":"+str(opts['port'])+": "+message)
try:
return self._send_growl(opts, message)
except socket.error, e:
logger.log(u"Unable to send growl to "+opts['host']+":"+str(opts['port'])+": "+ex(e))
return False
def _sendRegistration(self, host=None, password=None, name='Sick Beard Notification'):
opts = {}
if host == None:
hostParts = sickbeard.GROWL_HOST.split(':')
else:
hostParts = host.split(':')
if len(hostParts) != 2 or hostParts[1] == '':
port = 23053
else:
port = int(hostParts[1])
opts['host'] = hostParts[0]
opts['port'] = port
if password == None:
opts['password'] = sickbeard.GROWL_PASSWORD
else:
opts['password'] = password
opts['app'] = 'SickBeard'
opts['debug'] = False
#Send Registration
register = gntp.GNTPRegister()
register.add_header('Application-Name', opts['app'])
register.add_header('Application-Icon', 'https://raw.github.com/midgetspy/Sick-Beard/master/data/images/sickbeard.png')
register.add_notification('Test', True)
for i in common.notifyStrings:
register.add_notification(common.notifyStrings[i], True)
if opts['password']:
register.set_password(opts['password'])
try:
return self._send(opts['host'],opts['port'],register.encode(),opts['debug'])
except socket.error, e:
logger.log(u"Unable to send growl to "+opts['host']+":"+str(opts['port'])+": "+str(e).decode('utf-8'))
return False
notifier = GrowlNotifier
|
candlepin/virt-who
|
virtwho/virt/kubevirt/client.py
|
Python
|
gpl-2.0
| 4,273
| 0.000468
|
#
# Copyright 2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import json
import ssl
import urllib3
from urllib3.util.timeout i
|
mport Timeout
from virtwho.virt.kubevirt import config
_TIMEOUT = 60
class KubeClient:
def __init__(self, path, version, insecure):
cfg = config.Configuration()
cl = config._get_kube_config_loader_for_yaml_file(path)
cl.load_and_set(cfg)
if insecure:
self._pool_manager = urllib3.PoolManager(
num_pools=4,
maxsize=4,
cert_reqs=ssl.CERT_NONE,
|
assert_hostname=False
)
else:
cert_reqs = ssl.CERT_REQUIRED
ca_certs = cfg.ssl_ca_cert
cert_file = cfg.cert_file
key_file = cfg.key_file
self._pool_manager = urllib3.PoolManager(
num_pools=4,
maxsize=4,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=cert_file,
key_file=key_file
)
self.host = cfg.host
self.token = cfg.token
if not version:
self._version = self._kubevirt_version()
else:
self._version = version
def get_nodes(self):
return self._request('/api/v1/nodes')
def get_vms(self):
return self._request('/apis/kubevirt.io/' + self._version + '/virtualmachineinstances')
def _kubevirt_version(self):
versions = self._request('/apis/kubevirt.io')
return versions['preferredVersion']['version']
def _request(self, path):
header_params = {}
header_params['Accept'] = 'application/json'
header_params['Content-Type'] = 'application/json'
header_params['Authorization'] = self.token
url = self.host + path
try:
timeout = Timeout(connect=_TIMEOUT, read=_TIMEOUT)
r = self._pool_manager.request(
"GET",
url,
fields=None,
preload_content=True,
headers=header_params,
timeout=timeout
)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
data = r.data.decode('utf8')
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
# fetch data from response object
try:
data = json.loads(data)
except ValueError:
data = r.data
return data
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""
Custom error messages for exception
"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_covtype_get_future_model.py
|
Python
|
apache-2.0
| 1,261
| 0.014274
|
import sys
sys.path.insert(1, "../../../")
import h2o
import random
def test_get_future_model(ip,port):
covtype=h2o.upload_file(h2o.locate("smalldata/covtype/covtype.altered.gz"))
myY=54
myX=list(set(range(54)) - set([20,28])) # Cols 21 and 29 are constant, so must be explicitly ignored
# Set response to be indicator of a particular class
res_class=random.sample(range(1,5), 1)[0]
covtype[myY] = covtype[myY] == res_class
covtype[myY] = covtype[myY].asfactor()
# L2: alpha=0, lambda=0
covtype_h2o1 = h2o.start_glm_job(y=covtype[myY], x=covtype[myX], family="binomial", alpha=[0], Lambda=[0])
# Elastic: alpha=0.5, lambda=1e-4
covtype_h2o2 = h2o.start_glm_job(y=covtype[myY], x=covtype[myX], family="binomial", alpha=[0.5], Lambda=[1e-4])
|
# L1: alpha=1, lambda=1e-4
covtype_h2o3 = h2o.start_glm_job(y=covtype[myY], x=covtype[myX], family="
|
binomial", alpha=[1], Lambda=[1e-4])
covtype_h2o1 = h2o.get_future_model(covtype_h2o1)
print(covtype_h2o1)
covtype_h2o2 = h2o.get_future_model(covtype_h2o2)
print(covtype_h2o2)
covtype_h2o3 = h2o.get_future_model(covtype_h2o3)
print(covtype_h2o3)
if __name__ == "__main__":
h2o.run_test(sys.argv, test_get_future_model)
|
FeiZhan/Algo-Collection
|
answers/hackerrank/Check Subset.py
|
Python
|
mit
| 423
| 0.016627
|
#@result Submitted a few seconds ago • Score: 10.00 Status: Accepted Test Case #0: 0s Test Case #1: 0.01s Test Case #2: 0s Test Case #3: 0s Test Case #4: 0.01s Test Case #5: 0s
for i in range(int(raw_input())): #More than 4 lines will result in
|
0 scor
|
e. Blank lines won't be counted.
a = int(raw_input()); A = set(raw_input().split())
b = int(raw_input()); B = set(raw_input().split())
print B == B.union(A)
|
cynapse/cynin
|
src/ubify.cyninv2theme/ubify/cyninv2theme/browser/addcontentselector.py
|
Python
|
gpl-3.0
| 3,531
| 0.016709
|
###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this
|
program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################
|
################################
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from ubify.policy.config import spacesdefaultaddablenonfolderishtypes
from zope.component import getUtility
from zope.app.publisher.interfaces.browser import IBrowserMenu
class AddContentSelector(BrowserView):
"""Contains backend code for the addcontentselector
"""
template = ViewPageTemplateFile('addcontentselector.pt')
allowed_types = spacesdefaultaddablenonfolderishtypes + ('ContentSpace',)
def __call__(self):
return self.template()
def isAllowedtoAdd(self,typename):
menu = getUtility(IBrowserMenu, name='plone_contentmenu_factory')
self.currentcontextmenu = []
object_typename = self.context.portal_type
if object_typename in ('RecycleBin','Plone Site'):
self.currentcontextmenu = []
else:
self.currentcontextmenu = menu.getMenuItems(self.context, self.request)
self.allowedhere = False
if len(self.currentcontextmenu) > 0:
temp_list = [action for action in self.currentcontextmenu if action.has_key('extra') and action['extra'].has_key('id') and action['extra']['id'].lower() == typename.lower()]
self.allowedhere = len(temp_list) > 0
self.displaycurrentcontexttitle = ""
if self.allowedhere:
self.displaycurrentcontexttitle = self.context.Title()
if object_typename == 'ContentRoot':
self.displaycurrentcontexttitle = "Home"
|
asedunov/intellij-community
|
python/helpers/pydev/_pydevd_bundle/pydevd_utils.py
|
Python
|
apache-2.0
| 7,871
| 0.004447
|
from __future__ import nested_scopes
import traceback
import os
try:
from urllib import quote
except:
from urllib.parse import quote # @UnresolvedImport
import inspect
from _pydevd_bundle.pydevd_constants import IS_PY3K
import sys
from _pydev_bundle import pydev_log
def save_main_module(file, module_name):
# patch provided by: Scott Schlesier - when script is run, it does not
# use globals from pydevd:
# This will prevent the pydevd script from contaminating the namespace for the script to be debugged
# pretend pydevd is not the main module, and
# convince the file to be debugged that it was loaded as main
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
setattr(m, '__loader__', getattr(sys.modules[module_name], '__loader__'))
m.__file__ = file
return m
def to_number(x):
if is_string(x):
try:
n = float(x)
return n
except ValueError:
pass
l = x.find('(')
if l != -1:
y = x[0:l-1]
#print y
try:
n = float(y)
return n
except ValueError:
pass
return None
def compare_object_attrs(x, y):
try:
if x == y:
return 0
x_num = to_number(x)
y_num = to_number(y)
if x_num is not None and y_num is not None:
if x_num - y_num<0:
return -1
else:
return 1
if '__len__' == x:
return -1
if '__len__' == y:
return 1
return x.__cmp__(y)
except:
if IS_PY3K:
return (to_string(x) > to_string(y)) - (to_string(x) < to_string(y))
else:
return cmp(to_string(x), to_string(y))
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
if IS_PY3K:
def is_string(x):
return isinstance(x, str)
else:
def is_string(x):
return isinstance(x, basestring)
def to_string(x):
if is_string(x):
return x
else:
return str(x)
def print_exc():
if traceback:
traceback.print_exc()
if IS_PY3K:
def quote_smart(s, safe='/'):
return quote(s, safe)
else:
def quote_smart(s, safe='/'):
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote(s, safe)
def get_clsname_for_code(code, frame):
clsname = None
if len(code.co_varnames) > 0:
# We are checking the first argument of the function
# (`self` or `cls` for methods).
first_arg_name = code.co_varnames[0]
if first_arg_name in frame.f_locals:
first_arg_obj = frame.f_locals[first_arg_name]
if inspect.isclass(first_arg_obj): # class method
first_arg_class = first_arg_obj
else: # instance method
first_arg_class = first_arg_obj.__class__
func_name = code.co_name
if hasattr(first_arg_class, func_name):
method = getattr(first_arg_class, func_name)
func_code = None
if hasattr(method, 'func_code'): # Python2
func_code = method.func_code
elif hasattr(method, '__code__'): # Python3
func_code = method.__code__
if func_code and func_code == code:
clsname = first_arg_class.__name__
return clsname
def _get_project_roots(project_roots_cache=[]):
# Note: the project_roots_cache is the same instance among the many calls to the method
if not project_roots_cache:
roots = os.getenv('IDE_PROJECT_ROOTS', '').split(os.pathsep)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % roots)
new_roots = []
for root in roots:
new_roots.append(os.path.normcase(root))
project_roots_cache.append(new_roots
|
)
return project_roots_cache[-1] # returns the project roots with case normalized
def _get_library_roots(library_roots_cache=[]):
# Note: the project_roots_cache is the same instance among
|
the many calls to the method
if not library_roots_cache:
roots = os.getenv('LIBRARY_ROOTS', '').split(os.pathsep)
pydev_log.debug("LIBRARY_ROOTS %s\n" % roots)
new_roots = []
for root in roots:
new_roots.append(os.path.normcase(root))
library_roots_cache.append(new_roots)
return library_roots_cache[-1] # returns the project roots with case normalized
def not_in_project_roots(filename, filename_to_not_in_scope_cache={}):
# Note: the filename_to_not_in_scope_cache is the same instance among the many calls to the method
try:
return filename_to_not_in_scope_cache[filename]
except:
project_roots = _get_project_roots()
original_filename = filename
if not os.path.isabs(filename) and not filename.startswith('<'):
filename = os.path.abspath(filename)
filename = os.path.normcase(filename)
for root in project_roots:
if filename.startswith(root):
filename_to_not_in_scope_cache[original_filename] = False
break
else: # for else (only called if the break wasn't reached).
filename_to_not_in_scope_cache[original_filename] = True
if not filename_to_not_in_scope_cache[original_filename]:
# additional check if interpreter is situated in a project directory
library_roots = _get_library_roots()
for root in library_roots:
if root != '' and filename.startswith(root):
filename_to_not_in_scope_cache[original_filename] = True
# at this point it must be loaded.
return filename_to_not_in_scope_cache[original_filename]
def is_filter_enabled():
return os.getenv('PYDEVD_FILTERS') is not None
def is_filter_libraries():
is_filter = os.getenv('PYDEVD_FILTER_LIBRARIES') is not None
pydev_log.debug("PYDEVD_FILTER_LIBRARIES %s\n" % is_filter)
return is_filter
def _get_stepping_filters(filters_cache=[]):
if not filters_cache:
filters = os.getenv('PYDEVD_FILTERS', '').split(';')
pydev_log.debug("PYDEVD_FILTERS %s\n" % filters)
new_filters = []
for new_filter in filters:
new_filters.append(new_filter)
filters_cache.append(new_filters)
return filters_cache[-1]
def is_ignored_by_filter(filename, filename_to_ignored_by_filters_cache={}):
try:
return filename_to_ignored_by_filters_cache[filename]
except:
import fnmatch
for stepping_filter in _get_stepping_filters():
if fnmatch.fnmatch(filename, stepping_filter):
pydev_log.debug("File %s ignored by filter %s" % (filename, stepping_filter))
filename_to_ignored_by_filters_cache[filename] = True
break
else:
filename_to_ignored_by_filters_cache[filename] = False
return filename_to_ignored_by_filters_cache[filename]
|
icyflame/batman
|
pywikibot/userinterfaces/gui.py
|
Python
|
mit
| 21,774
| 0.000413
|
# -*- coding: utf-8 -*-
"""
A window with a unicode textfield where the user can edit.
Useful for editing the contents of an article.
"""
from __future__ import absolute_import, unicode_literals
#
# (C) Rob W.W. Hooft, 2003
# (C) Daniel Herding, 2004
# Wikiwichtel
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
#
import sys
if sys.version_info[0] > 2:
import tkinter as Tkinter
from tkinter.scrolledtext import ScrolledText
from tkinter import simpledialog as tkSimpleDialog
else:
import Tkinter
from ScrolledText import ScrolledText
import tkSimpleDialog
from idlelib import SearchDialog, ReplaceDialog, configDialog
from idlelib.configHandler import idleConf
from idlelib.MultiCall import MultiCallCreator
import pywikibot
from pywikibot import __url__
class TextEditor(ScrolledText):
"""A text widget with some editing enhancements.
A lot of code here is copied or adapted from the idlelib/EditorWindow.py
file in the standard Python distribution.
"""
def __init__(self, master=None, **kwargs):
# get default settings from user's IDLE configuration
currentThem
|
e = idleConf.CurrentTheme()
textcf = dict(padx=5, wrap='word', undo='True',
foreground=idleConf.GetHighlight(currentTheme,
'normal', fgBg='fg'),
|
background=idleConf.GetHighlight(currentTheme,
'normal', fgBg='bg'),
highlightcolor=idleConf.GetHighlight(currentTheme,
'hilite', fgBg='fg'),
highlightbackground=idleConf.GetHighlight(currentTheme,
'hilite',
fgBg='bg'),
insertbackground=idleConf.GetHighlight(currentTheme,
'cursor',
fgBg='fg'),
width=idleConf.GetOption('main', 'EditorWindow', 'width'),
height=idleConf.GetOption('main', 'EditorWindow',
'height')
)
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight = 'bold'
textcf['font'] = (idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size'),
fontWeight)
# override defaults with any user-specified settings
textcf.update(kwargs)
ScrolledText.__init__(self, master, **textcf)
def add_bindings(self):
# due to IDLE dependencies, this can't be called from __init__
# add key and event bindings
self.bind("<<cut>>", self.cut)
self.bind("<<copy>>", self.copy)
self.bind("<<paste>>", self.paste)
self.bind("<<select-all>>", self.select_all)
self.bind("<<remove-selection>>", self.remove_selection)
self.bind("<<find>>", self.find_event)
self.bind("<<find-again>>", self.find_again_event)
self.bind("<<find-selection>>", self.find_selection_event)
self.bind("<<replace>>", self.replace_event)
self.bind("<<goto-line>>", self.goto_line_event)
self.bind("<<del-word-left>>", self.del_word_left)
self.bind("<<del-word-right>>", self.del_word_right)
keydefs = {'<<copy>>': ['<Control-Key-c>', '<Control-Key-C>'],
'<<cut>>': ['<Control-Key-x>', '<Control-Key-X>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>'],
'<<end-of-file>>': ['<Control-Key-d>', '<Control-Key-D>'],
'<<find-again>>': ['<Control-Key-g>', '<Key-F3>'],
'<<find-selection>>': ['<Control-Key-F3>'],
'<<find>>': ['<Control-Key-f>', '<Control-Key-F>'],
'<<goto-line>>': ['<Alt-Key-g>', '<Meta-Key-g>'],
'<<paste>>': ['<Control-Key-v>', '<Control-Key-V>'],
'<<redo>>': ['<Control-Shift-Key-Z>'],
'<<remove-selection>>': ['<Key-Escape>'],
'<<replace>>': ['<Control-Key-h>', '<Control-Key-H>'],
'<<select-all>>': ['<Control-Key-a>'],
'<<undo>>': ['<Control-Key-z>', '<Control-Key-Z>'],
}
for event, keylist in keydefs.items():
if keylist:
self.event_add(event, *keylist)
def cut(self, event):
if self.tag_ranges("sel"):
self.event_generate("<<Cut>>")
return "break"
def copy(self, event):
if self.tag_ranges("sel"):
self.event_generate("<<Copy>>")
return "break"
def paste(self, event):
self.event_generate("<<Paste>>")
return "break"
def select_all(self, event=None):
self.tag_add("sel", "1.0", "end-1c")
self.mark_set("insert", "1.0")
self.see("insert")
return "break"
def remove_selection(self, event=None):
self.tag_remove("sel", "1.0", "end")
self.see("insert")
def del_word_left(self, event):
self.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event=None):
self.event_generate('<Meta-d>')
return "break"
def find_event(self, event=None):
if not self.tag_ranges("sel"):
found = self.tag_ranges("found")
if found:
self.tag_add("sel", found[0], found[1])
else:
self.tag_add("sel", "1.0", "1.0+1c")
SearchDialog.find(self)
return "break"
def find_again_event(self, event=None):
SearchDialog.find_again(self)
return "break"
def find_selection_event(self, event=None):
SearchDialog.find_selection(self)
return "break"
def replace_event(self, event=None):
ReplaceDialog.replace(self)
return "break"
def find_all(self, s):
"""
Highlight all occurrences of string s, and select the first one.
If the string has already been highlighted, jump to the next occurrence
after the current selection. (You cannot go backwards using the
button, but you can manually place the cursor anywhere in the
document to start searching from that point.)
"""
if hasattr(self, "_highlight") and self._highlight == s:
try:
if self.get(Tkinter.SEL_FIRST, Tkinter.SEL_LAST) == s:
return self.find_selection_event(None)
else:
# user must have changed the selection
found = self.tag_nextrange('found', Tkinter.SEL_LAST)
except Tkinter.TclError:
# user must have unset the selection
found = self.tag_nextrange('found', Tkinter.INSERT)
if not found:
# at last occurrence, scroll back to the top
found = self.tag_nextrange('found', 1.0)
if found:
self.do_highlight(found[0], found[1])
else:
# find all occurrences of string s;
# adapted from O'Reilly's Python in a Nutshell
# remove previous uses of tag 'found', if any
self.tag_remove('found', '1.0', Tkinter.END)
if s:
self._highlight = s
# start from the beginning (and when we come to the end, stop)
idx = '1.0'
while True:
# find next occurrence, exit loop if no more
idx = self.search(s, idx, nocase=1, stopindex=Tkinter.END)
if not idx:
break
|
Robpol86/Flask-Statics-Helper
|
flask_statics/helpers.py
|
Python
|
mit
| 1,406
| 0.004979
|
"""Helper functions for Flask-Statics."""
from flask_statics
|
import resource_base
from flask_statics import resource_definitions
|
def priority(var):
"""Prioritizes resource position in the final HTML. To be fed into sorted(key=).
Javascript consoles throw errors if Bootstrap's js file is mentioned before jQuery. Using this function such errors
can be avoided. Used internally.
Positional arguments:
var -- value sent by list.sorted(), which is a value in Statics().all_variables.
Returns:
Either a number if sorting is enforced for the value in `var`, or returns `var` itself.
"""
order = dict(JQUERY='0', BOOTSTRAP='1')
return order.get(var, var)
def get_resources(minify=False):
"""Find all resources which subclass ResourceBase.
Keyword arguments:
minify -- select minified resources if available.
Returns:
Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts
with css and js keys, and tuples of resources as values.
"""
all_resources = dict()
subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__()
for resource in subclasses:
obj = resource(minify)
all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js))
return all_resources
|
tswicegood/seeder
|
seeder/tests.py
|
Python
|
gpl-3.0
| 11,995
| 0.009587
|
from django.test import TestCase as DjangoTestCase
from django.conf import settings
from seeder.models import *
from seeder.posters import TwitterPoster
from random import randint as random
from datetime import datetime
import time
import mox
import re
def generate_random_authorized_account():
u = User(username = "foo" + st
|
r(random(10000, 99999)))
u.save()
return AuthorizedAccount.objects.create(user = u)
def generate_random_seeder(account = None):
if account is None:
account = generate_random_authorized_account()
return Seeder.objects.create(
twitter_id = random(1000, 9999),
authorized_for = account
)
def generate_random_token(seeder = No
|
ne):
if seeder is None:
seeder = generate_random_seeder()
return Token.objects.create(
seeder = seeder,
oauth_token = "some token" + str(random(10, 100)),
oauth_token_secret = "some token secret" + str(random(10, 100))
)
def generate_random_update(account = None):
if account is None:
account = generate_random_authorized_account()
return Update.objects.create(
posted_by = account,
original_text = "Hello from Seeder!"
)
def generate_mock_poster(update):
poster = mox.MockObject(TwitterPoster)
poster.post(update)
mox.Replay(poster)
return poster
class TestCase(DjangoTestCase):
def assertPubDateBetween(self, obj, begin, end):
self.assertTrue(obj.pub_date > begin and obj.pub_date < end)
def tearDown(self):
models = (AuthorizedAccount, Token, Seeder, Update, SeededUpdate,)
for model in models:
[obj.delete() for obj in model.objects.all()]
class TestOfSeededUpate(TestCase):
def test_has_a_future_timestamp(self):
foo = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update()
)
self.assertTrue(datetime.now() < foo.pub_date)
def test_retrieves_updates_based_on_availability(self):
first = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
second = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.fromtimestamp(time.time() + 1)
)
self.assertEqual(1, len(SeededUpdate.objects.currently_available()))
time.sleep(1.1)
self.assertEqual(2, len(SeededUpdate.objects.currently_available()))
def test_retrieves_updates_that_havenot_been_sent(self):
first = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
second = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
self.assertEqual(2, len(SeededUpdate.objects.currently_available()))
first.has_sent = 1;
first.save()
self.assertEqual(1, len(SeededUpdate.objects.currently_available()))
def test_send_calls_on_poster(self):
update = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update()
)
poster = generate_mock_poster(update)
update.send(poster)
mox.Verify(poster)
def test_send_marks_updates_as_sent(self):
update = SeededUpdate.objects.create(
seeder = generate_random_seeder(),
update = generate_random_update(),
pub_date = datetime.now()
)
self.assertEqual(len(SeededUpdate.objects.currently_available()), 1,
"sanity check to ensure value seeded update is present")
update.send(generate_mock_poster(update))
self.assertEqual(len(SeededUpdate.objects.currently_available()), 0,
"SeededUpdate should not be available after being sent")
class TestOfUpdate(TestCase):
def test_creates_seeded_updates_on_save(self):
# sanity check
self.assertEqual(0, len(SeededUpdate.objects.all()))
a = generate_random_authorized_account()
[generate_random_seeder(a) for i in range(10)]
update = Update.objects.create(
posted_by = a,
original_text = "Hello from Seeder!"
)
self.assertEqual(10, len(SeededUpdate.objects.all()))
def test_all_seeded_updates_have_pub_dates_between_1_and_30_minutes(self):
a = generate_random_authorized_account()
generate_random_seeder(a)
update = Update.objects.create(
posted_by = a,
original_text = "Hello from Seeder!"
)
seeded_update = SeededUpdate.objects.get(update = update)
# only uses 59 seconds to avoid possible race condition where
# more than a second elapses between creation and the time this
# test runs
begin_datetime = datetime.fromtimestamp(time.time() + 59)
end_datetime = datetime.fromtimestamp(time.time() + (60 * 30) + 1)
self.assertPubDateBetween(seeded_update, begin_datetime, end_datetime)
def test_only_creates_new_seeded_updates_on_new(self):
a = generate_random_authorized_account()
generate_random_seeder(a)
update = generate_random_update(a)
self.assertEqual(len(SeededUpdate.objects.all()), 1,
"Sanity check")
update.save()
self.assertEqual(len(SeededUpdate.objects.all()), 1,
"Should only create SeededUpdates on save when new")
def test_only_creates_for_non_expired_seeders(self):
a = generate_random_authorized_account()
s1 = generate_random_seeder(a)
s2 = generate_random_seeder(a)
s2.set_expires_on_in_days(-1)
s2.save()
update = generate_random_update(a)
self.assertEquals(len(SeededUpdate.objects.all()), 1,
"should only create one SeededUpdate since on has expired")
class TestOfAuthorizedAccount(TestCase):
def test_default_account_returns_default_account(self):
a = generate_random_authorized_account()
a.twitter_id = settings.SEEDER['default_twitter_id']
a.save()
default_account = AuthorizedAccount.objects.default_account()
self.assertEqual(settings.SEEDER['default_twitter_id'], default_account.twitter_id)
def test_only_pulls_seeders_that_have_not_expired(self):
a = generate_random_authorized_account()
s = generate_random_seeder(a)
self.assertEquals(len(a.seeder_set.currently_available()), 1,
"sanity check: seeder_set.currently_available() should be one")
s.expires_on = datetime.fromtimestamp(time.time() - 60)
s.save()
self.assertEquals(len(a.seeder_set.currently_available()), 0,
"seeder_set.currently_available() should have no seeders")
class TestOfSeeder(TestCase):
def test_automatically_expires_in_30_days(self):
seeder = generate_random_seeder()
expected_expires_on = datetime.fromtimestamp(time.time() + 60*60*24*30).date()
self.assertEquals(seeder.expires_on.date(), expected_expires_on,
"seeder.expires_on should default to 30 days")
def test_can_set_by_expires_by_day(self):
seeder = generate_random_seeder()
seeder.set_expires_on_in_days(7)
self.assertEquals(seeder.expires_on.date(), datetime.fromtimestamp(time.time() + 60*60*24*7).date(),
"seeder.expires_on should be 7 days in the future")
def test_can_take_a_string_as_parameter(self):
seeder = generate_random_seeder()
try:
seeder.set_expires_on_in_days("7")
except TypeError:
self.fail("seeder.set_expires_on_in_days() unable to handle a string")
def generate_mock_settings():
return mox.MockObject(settings)
class StubTwitterApi(object):
number_of_calls = 0
calls = []
def __init__(self, *args, **kwargs):
|
rvanlaar/easy-transifex
|
src/transifex/transifex/teams/tests/models.py
|
Python
|
bsd-2-clause
| 1,066
| 0.005629
|
fr
|
om transifex.projects.models import Project
from transi
|
fex.teams.models import Team
from transifex.txcommon.tests import base
class TestTeamModels(base.BaseTestCase):
def test_available_teams(self):
"""
Test whether monkey-patch of Project class with a 'available_teams'
instance method returns the desired result.
"""
# There must be only 1 'pt_BR' team
self.assertEquals(self.project.available_teams.count(), 1)
# Create a new 'ar' team for self.project
team = Team.objects.get_or_create(language=self.language_ar,
project=self.project, creator=self.user['maintainer'])[0]
# Create a secondary project and set it to outsource access to self.project
project = Project.objects.get_or_create(slug="foo",
defaults={'name':"Foo Project"},
source_language=self.language_en)[0]
project.outsource = self.project
# There must be 2 teams. One 'pt_BR' and a 'ar' one.
self.assertEquals(project.available_teams.count(), 2)
|
tomaszkax86/Colobot-Model-Converter
|
colobotformat.py
|
Python
|
bsd-2-clause
| 11,044
| 0.006249
|
#-*- coding: utf-8 -*-
# Implements Colobot model formats
# Copyright (c) 2014 Tomasz Kapuściński
import modelformat
import geometry
import struct
class ColobotNewTextFormat(modelformat.ModelFormat):
def __init__(self):
self.description = 'Colobot New Text format'
def get_extension(self):
return 'txt'
def read(self, filename, model, params):
input_file = open(filename, 'r')
triangle = geometry.Triangle()
materials = []
while True:
line = input_file.readline()
# eof
if len(line) == 0:
break
# comments are ignored
if line[0] == '#':
continue
# remove eol
if line[len(line)-1] == '\n':
line = line[:len(line)-1]
values = line.split(' ');
cmd = values[0]
if cmd == 'version':
model.version = int(values[1])
elif cmd == 'triangles':
continue
elif cmd == 'p1':
triangle.vertices[0] = parse_vertex(values)
elif cmd == 'p2':
triangle.vertices[1] = parse_vertex(values)
elif cmd == 'p3':
triangle.vertices[2] = parse_vertex(values)
elif cmd == 'mat':
triangle.material = parse_material(values)
elif cmd == 'tex1':
triangle.material.texture = values[1]
elif cmd == 'tex2':
triangle.material.texture2 = values[1]
elif cmd == 'var_tex2':
continue
elif cmd == 'lod_level':
triangle.material.lod = int(values[1])
elif cmd == 'state':
triangle.material.state = int(values[1])
mat_final = None
for mat in materials:
if triangle.material == mat:
mat_final = mat
if mat_final is None:
mat_final = triangle.material
materials.append(mat_final)
triangle.material = mat_final
model.triangles.append(triangle)
triangle = geometry.Triangle()
input_file.close()
return True
def write(self, filename, model, params):
output_file = open(filename, 'w')
version = 2
if 'version' in params:
version = int(params['version'])
# write header
output_file.write('# Colobot text model\n')
output_file.write('\n')
output_file.write('### HEAD\n')
output_file.write('version ' + str(version) + '\n')
output_file.write('total_triangles ' + str(len(model.triangles)) + '\n')
output_file.write('\n')
output_file.write('### TRIANGLES\n')
# write triangles
for triangle in model.triangles:
# write vertices
for i in range(3):
vertex = triangle.vertices[i]
output_file.write('p{} c {} {} {}'.format(i+1, vertex.x, vertex.y, vertex.z))
output_file.write(' n {} {} {}'.format(vertex.nx, vertex.ny, vertex.nz))
output_file.write(' t1 {} {}'.format(vertex.u1, vertex.v1))
output_file.write(' t2 {} {}\n'.format(vertex.u2, vertex.v2))
mat = triangle.material
dirt = 'N'
dirt_texture = ''
if 'dirt' in params:
dirt = 'Y'
dirt_texture = params['dirt']
output_file.write('mat dif {} {} {} {}'.format(mat.diffuse[0], mat.diffuse[1], mat.diffuse[2], mat.diffuse[3]))
output_file.write(' amb {} {} {} {}'.format(mat.ambient[0], mat.ambient[1], mat.ambient[2], mat.ambient[3]))
output_file.write(' spc {} {} {} {}\n'.format(mat.specular[0], mat.specular[1], mat.specular[2], mat.specular[3]))
output_file.write('tex1 {}\n'.format(mat.texture))
output_file.write('tex2 {}\n'.format(dirt_texture))
output_file.write('var_tex2 {}\n'.format(dirt))
if version == 1:
output_file.write('lod_level 0\n')
output_file.write('state ' + str(mat.state) + '\n')
output_file.write('\n')
output_file.close()
return True
class ColobotOldFormat(modelformat.ModelFormat):
def __init__(self):
self.description = 'Colobot Old Binary format'
def get_extension(self):
return 'mod'
def read(self, filename, model, params):
input_file = open(filename, 'rb')
# read header
version_major = struct.unpack('=i', input_file.read(4))[0]
version_minor = struct.unpack('=i', input_file.read(4))[0]
triangle_count = struct.unpack('=i', input_file.read(4))[0]
if version_major != 1 or version_minor != 2:
print('Unsupported format version: {}.{}'.format(version_major, version_minor))
return False
# read and ignore padding
input_file.read(40)
materials = []
for index in range(triangle_count):
triangle = geometry.Triangle()
# used, selected, 2 byte padding
input_file.read(4)
for vertex in triangle.vertices:
# position, normal, uvs
floats = struct.unpack('=ffffffffff', input_file.read(40))
vertex.x = floats[0]
vertex.y = floats[1]
vertex.z = floats[2]
vertex.nx = floats[3]
vertex.ny = floats[4]
vertex.nz = floats[5]
vertex.u1 = floats[6]
vertex.v1 = floats[7]
vertex.u2 = floats[8]
vertex.v2 = floats[9]
# material colors
floats = struct.unpack('=fffffffffffffffff', input_file.read(17 * 4))
|
mat = triangle.material
for i in range(4):
mat.diffuse[i] = floats[0 + i]
mat.ambient[i] = floats[4 + i]
mat.specular[i] = floats[8 + i]
# texture name
chars = input_file.read(20)
for i in range(20):
if chars[i] == '\0':
|
mat.texture = struct.unpack('={}s'.format(i), chars[:i])[0]
break
values = struct.unpack('=ffiHHHH', input_file.read(20))
mat.state = values[2]
dirt = values[3]
if dirt != 0:
mat.texture2 = 'dirty{:02d}.png'.format(dirt)
# optimizing materials
replaced = False
for material in materials:
if mat == material:
triangle.material = material
replaced = True
break
if not replaced:
materials.append(mat)
model.triangles.append(triangle)
# end of triangle
input_file.close()
return True
def write(self, filename, model, params):
output_file = open(filename, 'wb')
# write header
output_file.write(struct.pack('i', 1)) # version major
output_file.write(struct.pack('i', 2)) # version minor
output_file.write(struct.pack('i', len(model.triangles))) # total triangles
# padding
for x in range(10):
output_file.write(struct.pack('i', 0))
# triangles
for triangle in model.triangles:
output_file.write(struct.pack('=B', True)) # used
output_file.write(struct.pack('=B', False)) # selected ?
output_file.write(struct.pack('=H', 0)) # padding (2 bytes)
# write vertices
for vertex in triangle.vertices:
output_file.write(struct.pack('=fff', vertex.x, vertex.y, vertex.z)) # verte
|
gjlawran/ckanext-bcgov
|
ckanext/bcgov/scripts/save_orgs.py
|
Python
|
agpl-3.0
| 1,656
| 0.010266
|
# Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
import json
import urllib2
import urllib
import pprint
from base import (site_url, api_key)
org_filename = './data/orgs_list.json'
data_string = json.dumps({'all_fields' : True})
org_list = []
try :
request = urllib2.Request(site_url + '/api/3/action/organization_list')
request.add_header('Authorization', api_key)
response = urllib2.urlopen(request, data_string)
assert response.code == 200
response_dict = json.loads(response.read())
assert response_dict['success'] is True
org_list = response_dict['result']
# pprint.pprint(user_list)
except Exception, e:
pass
#Create a dictionary of org_name : org_id
#We need this dictionary to get the id of each org when creating organizations
orgs_dict = {}
for org in org_list :
members = []
data_dict = {'id' : org['id'], 'object_type' : 'user'}
data_string = urllib.quote(json.dumps(data_dict))
try :
request = urllib2.Request(site_url + '/api/3/action/member_list')
request.add_header('Authorization', api_key)
response = urllib2.urlopen(request, data_string)
assert response.code == 200
response_dict = json.loads(response.read())
assert response_dict['success'] is True
members = response_dict['result']
# pprint
|
.pprint(user_list)
except Exception, e:
pass
org_dict = {'id' : org['id'], 'members' : members}
orgs_dict[org['name']] = org_dict
with open(or
|
g_filename, 'w') as org_file :
org_file.write(json.dumps(orgs_dict))
|
peckhams/topoflow
|
topoflow/gui/Input_Dialog.py
|
Python
|
mit
| 21,814
| 0.011094
|
# Add ability to pass TF_Input_Dialog a data structure
# instead of an XML filename. XML file is only for
# initial creation, but for updates or showing user-
# modified settings, need this other option.
#---------------------------------------------------------
#!/usr/bin/env python
# August 8 & 11, 2008
# February 10, 2009
# April 21-28, 2009
# S.D. Peckham
import wx
import xml.dom.minidom
import time
import webbrowser ## standard Python module
## import wx.html
import glob
from TF_Input_Var_Box import * ################
# Check these out later.
# import urllib
# import xmllib (deprecated: use xml.sax instead)
# import htmllib
#-------------------------------------------------------------
# class TF_In_Variable_Info
# class TF_In_Variable_List
# class TF_Process_Info
# class TF_Input_Dialog
# In_Variable_Notebook
# In_Variable_Wizard #### (not ready yet)
# Timestep_Box
# Button_Box
# On_Type_Choice
# On_OK
# On_Help
# On_Cancel
# Read_XML_Info
#----------------------------------------------------------------
class TF_In_Variable_Info():
def __init__(self, name='', label='', vtype='Scalar',
value='', units='', type_choices=None):
self.name = name
self.label = label
self.type = vtype
self.value = value
self.units = units
if (type_choices == None):
self.type_choices = ['Scalar', 'Time series', \
'Grid', 'Grid sequence']
# __init__()
#----------------------------------------------------------------
class TF_In_Variable_List():
def __init__(self, n_variables=1):
#-----------------
# First approach
#-----------------
## self.variables = []
## for each in range(n_variables):
## self.variables.append( TF_Variable_Info() )
#------------------------------------------
# Alternate approach with some advantages
#------------------------------------------
self.var_names = []
self.var_labels = []
self.var_types = []
self.var_values = []
self.var_units = []
self.var_type_choices = []
# __init__()
#----------------------------------------------------------------
class TF_Process_Info():
def __init__(self, name='Infiltration',
n_layers=1, n_variables=1):
#----------------------------------------
# Each layer has a list of TF variables
#----------------------------------------
self.n_layers = n_layers
self.n_variables = n_variables # (per layer)
self.layers = []
for each in range(n_layers):
self.layers.append( TF_In_Variable_List(n_variables=n_variables) )
self.timestep = TF_In_Variable_Info()
self.help_file = ''
# __init__()
#-------------------------------------------------------------
class TF_Input_Dialog(wx.Frame):
#-----------------------------------------------------
# Notes: This class is for creating TopoFlow input
# dialogs, which all use a similar template.
# Initial settings, labels, etc. are read
# from the XML file provided
# Default is wx.ALIGN_LEFT for labels & text
#-----------------------------------------------------
def __init__(self, parent=None, id=-1,
main_frame=None,
xml_file="infil_richards_1D.xml"):
#-----------------------
# Try to find XML file
#-----------------------
files = glob.glob(xml_file)
if (len(files) == 0):
msg = "Can't find the XML file:\n\n"
msg += (xml_file + "\n")
dialog = wx.MessageBox(msg, caption='SORRY,')
return
#--------------------------------------------
# Set XML file to read info from, including
# the name of the HTML help file
#--------------------------------------------
self.xml_file = xml_file
self.Read_XML_Info()
#----------------------------------
# Get title for this input dialog
#----------------------------------
title = self.proc_info.proc_name + " : " + \
self.proc_info.method_name + \
" Input Dialog"
#-------------------------------------------
# Initialize a wxPython frame, add a panel
#-------------------------------------------
wx.Frame.__init__(self, parent, id, title)
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetBackgroundColour('Light Blue')
self.panel = panel
self.main_sizer = sizer
#--------------------------------------------------
# Saving main_frame allows collected values to be
# stored in its state before this one closes.
#--------------------------------------------------
self.main_frame = main_fr
|
ame
self.parent = parent
self.title = title
self.vgap = 10
self.hgap = 6
#-------------------------------------------------
# Later move these into Variable_Box() or remove
#-----------------
|
--------------------------------
self.type_code = {'Scalar':0, 'Time series':1, \
'Grid':2, 'Grid sequence':3}
self.type_name = {0:'Scalar', 1:'Time series', \
2:'Grid', 3:'Grid sequence'}
#-----------------------------------------
# Create objects to appear in the dialog
#-----------------------------------------
if (self.proc_info.n_layers == 1):
data = self.proc_info.layers[0]
var_box = TF_Input_Var_Box(parent=self.panel,
main_frame=self.main_frame,
data=data)
else:
var_box = self.In_Variable_Notebook()
## var_box = self.In_Variable_Wizard()
ADD_TIME_BOX = (self.proc_info.timestep.value != '')
if (ADD_TIME_BOX):
time_box = self.Timestep_Box()
button_bar = self.Button_Box()
#--------------------------------
# Add objects to the main sizer
#--------------------------------
border1 = self.vgap
border2 = 2 * self.vgap
sizer.Add(var_box, 0, wx.ALL, border1)
if (ADD_TIME_BOX):
sizer.Add(time_box, 0, wx.ALL, border1)
sizer.Add(button_bar, 0, wx.ALL, border2)
panel.SetSizer(sizer)
sizer.Fit(self)
self.Show() # (need here if called)
self.Centre()
## sizer.SetSizeHints(self) # (not needed)
#--------------------------------------------
# Doesn't work for some reason (see p. 328)
#--------------------------------------------
# self.SetSizer(sizer)
# self.Fit()
# __init__()
#----------------------------------------------------------------
def In_Variable_Notebook(self):
notebook = wx.Notebook(self.panel, style=wx.BK_TOP)
k = 0
n_layers = self.proc_info.n_layers
for k in range(n_layers):
data = self.proc_info.layers[k]
kstr = str(k+1)
label = "Layer " + kstr + " variables"
page = TF_Input_Var_Box(parent=notebook, \
data=data, \
box_label=label)
notebook.AddPage(page, "Layer " + kstr)
return notebook
# In_Variable_Notebook()
#----------------------------------------------------------------
def In_Variable_Wizard(self):
pass
# In_Variable_Wizard()
#----------------------------------------------------------------
def Timestep_Box(self):
#---------------------------------------------
# Create
|
ctu-osgeorel-proj/bp-pesek-2016
|
src/position_correction.py
|
Python
|
gpl-2.0
| 7,629
| 0.003016
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
SuroLeveling
A QGIS plugin
todo
-------------------
begin : 2016-02-12
git sha : $Format:%H$
copyright : (C) 2016 by Ondřej Pešek
email : ondra.lobo@seznam.cz
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from PyQt4.QtGui import QAction, QIcon
# Initialize Qt resources from file resources.py
import resources
# Import the code for the DockWidget
#from suro_leveling_dockwidget import PositionCorrectionDockWidget#SuroLevelingDockWidget
from position_correction_dockwidget import PositionCorrectionDockWidget
import os.path
class PositionCorrection:#SuroLeveling:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'SuroLeveling_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&GPS position lag correction')
#print "** INITIALIZING SuroLeveling"
self.pluginIsActive = False
self.dockwidget = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('GPS position lag correction', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
self.iface.addToolBarIcon(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/SuroLeveling/icon.png'
self.add_action(
icon_path,
text=self.tr(u'GPS position lag correction'),
callback=self.run,
parent=self.iface.mainWin
|
dow())
#--------------------------------------------------------------------------
#def onClosePlugin(self): CAUSE OF ENABLE SECOND OPENING
# """Cleanup necessary items here when plugin dockwidget is closed"""
# pr
|
int "** CLOSING SuroLeveling"
# disconnects
# self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
# remove this statement if dockwidget is to remain
# for reuse if plugin is reopened
# Commented next statement since it causes QGIS crashe
# when closing the docked window:
# self.dockwidget = None
# self.pluginIsActive = False
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
#print "** UNLOAD SuroLeveling"
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&GPS position lag correction'),
action)
self.iface.removeToolBarIcon(action)
#--------------------------------------------------------------------------
def run(self):
"""Run method that loads and starts the plugin"""
if not self.pluginIsActive:
self.pluginIsActive = True
#print "** STARTING SuroLeveling"
# dockwidget may not exist if:
# first run of plugin
# removed on close (see self.onClosePlugin method)
if self.dockwidget == None:
# Create the dockwidget (after translation) and keep reference
self.dockwidget = PositionCorrectionDockWidget()#SuroLevelingDockWidget()
# connect to provide cleanup on closing of dockwidget
# self.dockwidget.closingPlugin.connect(self.onClosePlugin) CAUSE OF ENABLE SECOND OPENING
# show the dockwidget
self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
self.dockwidget.show()
self.pluginIsActive = False
|
HanWenfang/syncless
|
test/wsgi_test.py
|
Python
|
apache-2.0
| 9,998
| 0.005501
|
#! /usr/local/bin/stackless2.6
# by pts@fazekas.hu at Sat Apr 24 00:25:31 CEST 2010
import errno
import logging
import socket
import sys
import unittest
from syncless import coio
from syncless import wsgi
def TestApplication(env, start_response):
if env['PATH_INFO'] == '/answer':
start_response('200 OK', [('Content-Type', 'text/plain')])
return ('Forty-two.',)
if env['PATH_INFO'] == '/':
start_response('200 OK', [('Content-Type', 'text/html')])
return ['Hello, World!']
if env['PATH_INFO'] == '/save':
start_response('200 OK', [('Content-Type', 'text/plain')])
return [env['wsgi.input'].readline().upper()]
if env['PATH_INFO'] == 'policy-file':
start_response('200 OK', [('Content-Type', 'text/plain')])
return 'I hope you like our policy.'
# A run-time error caught by the wsgi moduel if this is reached.
TEST_DATE = wsgi.GetHttpDate(1234567890) # 2009-02-13
def CallWsgiWorker(accepted_socket, do_multirequest=True):
env = {}
wsgi.PopulateDefaultWsgiEnv(env, ('127.0.0.1', 80))
peer_name = ('127.0.0.1', 2)
wsgi.WsgiWorker(accepted_socket, peer_name, TestApplication, env, TEST_DATE,
do_multirequest, None)
def ParseHttpResponse(data):
head = 'Status: '
i = data.find('\n\n')
j = data.find('\n\r\n')
if i >= 0 and i < j:
head += data[:i]
body = data[i + 2:]
elif j >= 0:
head += data[:j]
body = data[j + 3:]
else:
raise ValueError('missing HTTP response headers: %r' % data)
# TODO(pts): Don't parse line continuations.
head = dict(line.split(': ', 1) for line in
head.rstrip('\r').replace('\r\n', '\n').split('\n'))
return head, body
def SplitHttpResponses(data):
"""Split a string containing multiple HTTP responses.
Returns:
List of strings (individual HTTP responses).
"""
return ['HTTP/1.' + item for item in data.split('HTTP/1.')[1:]]
class WsgiTest(unittest.TestCase):
# TODO(pts): Write more tests, especially for error responses.
# TODO(pts): Test HEAD requests.
def testDate(self):
self.assertEqual('Fri, 13 Feb 2009 23:31:30 GMT', TEST_DATE)
def AssertHelloResponse(self, head, body, http_version='1.0'):
self.assertEqual('Hello, World!', body)
self.assertEqual('HTTP/%s 200 OK' % http_version, head['Status'])
self.assertEqual('13', head['Content-Length'])
self.assertTrue('syncless' in head['Server'].lower(), head['Server'])
self.assertEqual(TEST_DATE, head['Date'])
self.assertEqual('text/html', head['Content-Type'])
def AssertAnswerResponse(self, head, body, http_version='1.0',
is_new_date=False):
self.assertEqual('Forty-two.', body)
self.assertEqual('HTTP/%s 200 OK' % http_version, head['Status'])
self.assertEqual('10', head['Content-Length'])
self.assertTrue('syncless' in head['Server'].lower(), head['Server'])
if is_new_date:
self.assertNotEqual(TEST_DATE, head['Date'])
self.assertTrue(head['Date'].endswith(' GMT'), head['Date'])
else:
self.assertEqual(TEST_DATE, head['Date'])
self.assertEqual('text/plain', head['Content-Type'])
def AssertSaveResponse(self, head, body, http_version='1.0',
is_new_date=False, msg='FOO\n'):
self.assertEqual(msg, body)
self.assertEqual('HTTP/%s 200 OK' % http_version, head['Status'])
self.assertEqual(str(len(msg)), head['Content-Length'])
self.assertTrue('syncless' in head['Server'].lower(), head['Server'])
if is_new_date:
self.assertNotEqual(TEST_DATE, head['Date'])
self.assertTrue(head['Date'].endswith(' GMT'), head['Date'])
else:
self.assertEqual(TEST_DATE, head['Date'])
self.assertEqual('text/plain', head['Content-Type'])
def testSingleRequestWithoutCr(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET / HTTP/1.0\n\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('close', head['Connection'])
self.AssertHelloResponse(head, body)
def testSingleGetRequest(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET / HTTP/1.0\r\n\r\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('close', head['Connection'])
self.AssertHelloResponse(head, body)
def testSingleTooLongRequest(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
max_size = min(coio.
|
max_nonblocking_pipe_write_size, 33333)
request = 'GET / HTTP/1.0\r\n'
request += 'Xy: Z\r\n' * ((max_size - len(request)) / 7)
assert len(request) <= max_size
b.sendall(request)
b.shutdown(1)
CallWsgiWorker(a)
try:
response = b.recv(8192)
except IOError, e:
if e.errno != errno.ECONNRESET:
raise
# Some Linux 2.6.32 sy
|
stems raise ECONNRESET if the peer is very fast
# to close the connection. The exact reasons why we get ECONNRESET
# instead of just an EOF sometimes is unclear to me.
response = ''
self.assertEqual('', response)
def testSinglePolicyFileRequest(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('<policy-file-request/>\0foobar')
b.shutdown(1)
CallWsgiWorker(a)
response = b.recv(8192)
self.assertEqual('I hope you like our policy.', response)
def testSinglePostRequest(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('POST /save HTTP/1.0\r\nContent-Length: 7\r\n'
'X-1: Z\r\n\r\nfoo\nbar')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('close', head['Connection'])
self.AssertSaveResponse(head, body)
def testContinuableHTTP10Request(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertHelloResponse(head, body)
def testContinuableHTTP11Request(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET /?foo=bar HTTP/1.1\r\n\r\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertHelloResponse(head, body, http_version='1.1')
def testTwoSequentialHTTP11GetFirstRequests(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET / HTTP/1.1\r\n\r\n')
CallWsgiWorker(a, do_multirequest=False)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertHelloResponse(head, body, http_version='1.1')
b.sendall('GET /answer?foo=bar HTTP/1.1\r\n\r\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertAnswerResponse(head, body, http_version='1.1')
def testTwoSequentialHTTP11PostFirstRequests(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('POST /save HTTP/1.1\r\nContent-Length: 7\r\n\r\nfoo\nbar')
CallWsgiWorker(a, do_multirequest=False)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertSaveResponse(head, body, http_version='1.1')
b.sendall('GET /answer?foo=bar HTTP/1.1\r\n\r\n')
b.shutdown(1)
CallWsgiWorker(a)
head, body = ParseHttpResponse(b.recv(8192))
self.assertEqual('Keep-Alive', head['Connection'])
self.AssertAnswerResponse(head, body, http_version='1.1')
def testThreePipelinedHTTP11GetRequests(self):
a, b = coio.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
b.sendall('GET / HTTP/1.1\r\n\r\n'
'GET /answer?foo=x+y&bar= HTTP/1.0\r\n\r\n'
'GET /unreached... HTTP/1.1\r\n\r\n')
CallWsgiWorker(a)
responses = SplitHttpResponses(b.recv(8192))
# The WsgiWorker doesn't respond to request 2 (/unr
|
Locu/djredis
|
djredis/__init__.py
|
Python
|
mit
| 38
| 0
|
#
|
coding: utf-8
__version__ = '0.1.0'
| |
doyubkim/fluid-engine-dev
|
src/tests/python_tests/test_cell_centered_scalar_grid.py
|
Python
|
mit
| 12,197
| 0
|
"""
Copyright (c) 2018 Doyub Kim
I am making my contributions/submissions to this project solely in my personal
capacity and am not conveying any rights to any intellectual property of any
third parties.
"""
import numpy as np
import pyjet
from pytest import approx
from pytest_utils import *
cnt = 0
def test_grid2():
global cnt
a = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.origin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
assert_bounding_box_similar(
a.boundingBox, pyjet.BoundingBox2D((7, 5), (10, 13)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0), (7.5, 6))
b = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.hasSameShape(b)
def func(i, j):
global cnt
assert i >= 0 and i < 3
assert j >= 0 and j < 4
cnt += 1
cnt = 0
a.forEachCellIndex(func)
assert cnt == 12
def test_scalar_grid2():
global cnt
a = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
a.resize(resolution=(12, 7),
gridSpacing=(3, 4),
gridOrigin=(9, 2))
assert a.resolution == (12, 7)
assert_vector_similar(a.origin, (9, 2))
assert_vector_similar(a.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
a[5, 6] = 17.0
assert a[5, 6] == 17.0
a.fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2
a.fill(func)
pos = a.dataPosition()
acc = np.array(a.dataAccessor(), copy=False)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j)
assert func(pt) == a[i, j]
assert func(pt) == approx(a.sample(pt))
assert acc[j, i] == a[i, j]
# Can't compare to analytic solution because FDM with such a coarse
# grid will return inaccurate results by design.
assert_vector_similar(a.gradientAtDataPoint(i, j), a.gradient(pt))
assert a.la
|
placianAtDataPoint(i, j) == a.laplacian(pt)
def func(i, j):
global cnt
assert i >= 0 and i < a.resolution.x
assert j >= 0 and j < a.resolution.y
cnt += 1
cnt = 0
a.forEachDataPointIndex(func)
assert cnt == a.resolution.x * a.resolution.y
blob = a.serialize()
b = pyjet.CellCenteredScalarGrid2()
b.deserialize(blob)
assert b.resolution == (
|
12, 7)
assert_vector_similar(b.origin, (9, 2))
assert_vector_similar(b.gridSpacing, (3, 4))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == b[i, j]
def test_cell_centered_scalar_grid2():
# CTOR
a = pyjet.CellCenteredScalarGrid2()
assert a.resolution == (1, 1)
assert_vector_similar(a.origin, (0.0, 0.0))
assert_vector_similar(a.gridSpacing, (1.0, 1.0))
a = pyjet.CellCenteredScalarGrid2((3, 4), (1, 2), (7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.origin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.origin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
a = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
domainSizeX=12.0,
gridOrigin=(7, 5))
assert a.resolution == (3, 4)
assert_vector_similar(a.origin, (7, 5))
assert_vector_similar(a.gridSpacing, (4, 4))
# Properties
a = pyjet.CellCenteredScalarGrid2(resolution=(3, 4),
gridSpacing=(1, 2),
gridOrigin=(7, 5))
assert_vector_similar(a.dataSize, (3, 4))
assert_vector_similar(a.dataOrigin, (7.5, 6))
# Modifiers
b = pyjet.CellCenteredScalarGrid2(resolution=(6, 3),
gridSpacing=(5, 9),
gridOrigin=(1, 2))
a.fill(42.0)
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
a.swap(b)
assert a.resolution == (6, 3)
assert_vector_similar(a.origin, (1, 2))
assert_vector_similar(a.gridSpacing, (5, 9))
assert b.resolution == (3, 4)
assert_vector_similar(b.origin, (7, 5))
assert_vector_similar(b.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 0.0
for j in range(b.resolution.y):
for i in range(b.resolution.x):
assert b[i, j] == 42.0
a.set(b)
assert a.resolution == (3, 4)
assert_vector_similar(a.origin, (7, 5))
assert_vector_similar(a.gridSpacing, (1, 2))
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j] == 42.0
c = a.clone()
assert c.resolution == (3, 4)
assert_vector_similar(c.origin, (7, 5))
assert_vector_similar(c.gridSpacing, (1, 2))
for j in range(c.resolution.y):
for i in range(c.resolution.x):
assert c[i, j] == 42.0
# ------------------------------------------------------------------------------
def test_grid3():
global cnt
a = pyjet.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.resolution == (3, 4, 5)
assert_vector_similar(a.origin, (7, 5, 3))
assert_vector_similar(a.gridSpacing, (1, 2, 3))
assert_bounding_box_similar(
a.boundingBox, pyjet.BoundingBox3D((7, 5, 3), (10, 13, 18)))
f = a.cellCenterPosition
assert_vector_similar(f(0, 0, 0), (7.5, 6, 4.5))
b = pyjet.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
assert a.hasSameShape(b)
def func(i, j, k):
global cnt
assert i >= 0 and i < 3
assert j >= 0 and j < 4
assert k >= 0 and k < 5
cnt += 1
cnt = 0
a.forEachCellIndex(func)
assert cnt == 60
def test_scalar_grid3():
global cnt
a = pyjet.CellCenteredScalarGrid3(resolution=(3, 4, 5),
gridSpacing=(1, 2, 3),
gridOrigin=(7, 5, 3))
a.resize(resolution=(12, 7, 2),
gridSpacing=(3, 4, 5),
gridOrigin=(9, 2, 5))
assert a.resolution == (12, 7, 2)
assert_vector_similar(a.origin, (9, 2, 5))
assert_vector_similar(a.gridSpacing, (3, 4, 5))
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 0.0
a[5, 6, 1] = 17.0
assert a[5, 6, 1] == 17.0
a.fill(42.0)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
assert a[i, j, k] == 42.0
def func(pt):
return pt.x ** 2 + pt.y ** 2 + pt.z ** 2
a.fill(func)
pos = a.dataPosition()
acc = np.array(a.dataAccessor(), copy=False)
for k in range(a.resolution.z):
for j in range(a.resolution.y):
for i in range(a.resolution.x):
pt = pos(i, j, k)
assert func(pt) == a[i, j, k]
assert func(pt) == approx(a.sample(pt))
assert acc[k, j, i] == a[i, j, k]
|
jeremiahyan/odoo
|
addons/l10n_be_edi/tests/test_ubl.py
|
Python
|
gpl-3.0
| 8,636
| 0.002432
|
# -*- coding: utf-8 -*-
from odoo.addons.account_edi.tests.common import AccountEdiTestCommon
from odoo.tests import tagged
from odoo import Command
@tagged('post_install_l10n', 'post_install', '-at_install')
class TestL10nBeEdi(AccountEdiTestCommon):
@classmethod
def setUpClass(cls, chart_template_ref='l10n_be.l10nbe_chart_template', edi_format_ref='l10n_be_edi.edi_efff_1'):
super().setUpClass(chart_template_ref=chart_template_ref, edi_format_ref=edi_format_ref)
# ==== Init ====
cls.tax_10_include = cls.env['account.tax'].create({
'name': 'tax_10_include',
'amount_type': 'percent',
'amount': 10,
'type_tax_use': 'sale',
'price_include': True,
'include_base_amount': True,
'sequence': 10,
})
cls.tax_20 = cls.env['account.tax'].create({
'name': 'tax_20',
'amount_type': 'percent',
'amount': 20,
'invoice_repartition_line_ids': [
(0, 0, {'factor_percent': 100.0, 'repartition_type': 'base'}),
(0, 0, {'factor_percent': 40.0, 'repartition_type': 'tax'}),
(0, 0, {'factor_percent': 60.0, 'repartition_type': 'tax'}),
],
'refund_repartition_line_ids': [
(0, 0, {'factor_percent': 100.0, 'repartition_type': 'base'}),
(0, 0, {'factor_percent': 40.0, 'repartition_type': 'tax'}),
(0, 0, {'factor_percent': 60.0, 'repartition_type': 'tax'}),
],
'type_tax_use': 'sale',
'sequence': 20,
})
cls.tax_group = cls.env['account.tax'].create({
'name': 'tax_group',
'amount_type': 'group',
'amount': 0.0,
'type_tax_use': 'sale',
'children_tax_ids': [(6, 0, (cls.tax_10_include + cls.tax_20).ids)],
})
cls.partner_a.vat = 'BE0477472701'
# ==== Invoice ====
cls.invoice = cls.env['account.move'].create({
'move_type': 'out_invoice',
'journal_id': cls.journal.id,
'partner_id': cls.partner_b.id,
'invoice_date': '2017-01-01',
'date': '2017-01-01',
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [(0, 0, {
'product_id': cls.product_a.id,
'product_uom_id': cls.env.ref('uom.product_uom_dozen').id,
'price_unit': 275.0,
'quantity': 5,
'discount': 20.0,
'tax_ids': [(6, 0, cls.tax_20.ids)],
})],
})
cls.expected_invoice_efff_values = '''
<Invoice>
<UBLVersionID>2.0</UBLVersionID>
<ID>INV/2017/00001</ID>
<IssueDate>2017-01-01</IssueDate>
<InvoiceTypeCode>380</InvoiceTypeCode>
<DocumentCurrencyCode>Gol</DocumentCurrencyCode>
<AccountingSupplierParty>
<Party>
<PartyName>
<Name>company_1_data</Name>
</PartyName>
<Language>
<LocaleCode>en_US</LocaleCode>
</Language>
<PostalAddress/>
<Contact>
<Name>company_1_data</Name>
</Contact>
</Party>
</AccountingSupplierParty>
<AccountingCustomerParty>
<Party>
<PartyName>
<Name>partner_b</Name>
</PartyName>
<Language>
<LocaleCode>en_US</LocaleCode>
</Language>
<PostalAddress/>
<Contact>
<Name>partner_b</Name>
</Contact>
|
</Party>
</AccountingCustomerParty>
<PaymentMeans>
<PaymentMeansCode listID="UN/ECE 4461">31</PaymentMeansCode>
<PaymentDueDate>2017-01-01</PaymentDueDate>
<InstructionID>INV/2017/00001</InstructionID>
</PaymentMeans>
<TaxTotal>
<TaxAmount
|
currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
<LegalMonetaryTotal>
<LineExtensionAmount currencyID="Gol">1100.000</LineExtensionAmount>
<TaxExclusiveAmount currencyID="Gol">1100.000</TaxExclusiveAmount>
<TaxInclusiveAmount currencyID="Gol">1320.000</TaxInclusiveAmount>
<PrepaidAmount currencyID="Gol">0.000</PrepaidAmount>
<PayableAmount currencyID="Gol">1320.000</PayableAmount>
</LegalMonetaryTotal>
<InvoiceLine>
<ID>___ignore___</ID>
<Note>Discount (20.0 %)</Note>
<InvoicedQuantity>5.0</InvoicedQuantity>
<LineExtensionAmount currencyID="Gol">1100.000</LineExtensionAmount>
<TaxTotal>
<TaxAmount currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
<Item>
<Description>product_a</Description>
<Name>product_a</Name>
</Item>
<Price>
<PriceAmount currencyID="Gol">275.000</PriceAmount>
</Price>
</InvoiceLine>
</Invoice>
'''
####################################################
# Test export
####################################################
def test_efff_simple_case(self):
''' Test the generated Facturx Edi attachment without any modification of the invoice. '''
self.assert_generated_file_equal(self.invoice, self.expected_invoice_efff_values)
def test_efff_group_of_taxes(self):
self.invoice.write({
'invoice_line_ids': [(1, self.invoice.invoice_line_ids.id, {'tax_ids': [Command.set(self.tax_group.ids)]})],
})
applied_xpath = '''
<xpath expr="//TaxTotal/TaxAmount" position="replace">
<TaxAmount currencyID="Gol">320.000</TaxAmount>
</xpath>
<xpath expr="//LegalMonetaryTotal/LineExtensionAmount" position="replace">
<LineExtensionAmount currencyID="Gol">1000.000</LineExtensionAmount>
</xpath>
<xpath expr="//LegalMonetaryTotal/TaxExclusiveAmount" position="replace">
<TaxExclusiveAmount currencyID="Gol">1000.000</TaxExclusiveAmount>
</xpath>
<xpath expr="//InvoiceLine/LineExtensionAmount" position="replace">
<LineExtensionAmount currencyID="Gol">1000.000</LineExtensionAmount>
</xpath>
<xpath expr="//InvoiceLine/TaxTotal" position="replace">
<TaxTotal>
<TaxAmount currencyID="Gol">100.000</TaxAmount>
</TaxTotal>
<TaxTotal>
<TaxAmount currencyID="Gol">220.000</TaxAmount>
</TaxTotal>
</xpath>
'''
self.assert_generated_file_equal(self.invoice, self.expected_invoice_efff_values, applied_xpath)
####################################################
# Test import
####################################################
def test_invoice_edi_xml_update(self):
invoice = self._create_empty_vendor_bill()
invoice_count = len(self.env['account.move'].search([]))
self.update_invoice_from_file('l10n_be_edi', 'test_xml_file', 'efff_test.xml', invoice)
self.assertEqual(len(self.env['account.move'].search([])), invoice_count)
self.assertEqual(invoice.amount_total, 666.50)
self.assertEqual(invoice.amount_tax, 115.67)
self.assertEqual(invoice.partner_id, self.partner_a)
|
Cisco-Talos/pyrebox
|
volatility/volatility/plugins/overlays/windows/vista_sp12_x64_syscalls.py
|
Python
|
gpl-2.0
| 43,395
| 0.053693
|
# Volatility
# Copyright (c) 2008-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: MHL
@license: GNU General Public License 2.0
@contact: michael.ligh@mnin.org
This file provides support for Vista SP1 and SP2 x64
"""
syscalls = [
[
'NtMapUserPhysicalPagesScatter', # 0x0
'NtWaitForSingleObject', # 0x1
'NtCallbackReturn', # 0x2
'NtReadFile', # 0x3
'NtDeviceIoControlFile', # 0x4
'NtWriteFile', # 0x5
'NtRemoveIoCompletion', # 0x6
'NtReleaseSemaphore', # 0x7
'NtReplyWaitReceivePort', # 0x8
'NtReplyPort', # 0x9
'NtSetInformationThread', # 0xa
'NtSetEvent', # 0xb
'NtClose', # 0xc
'NtQueryObject', # 0xd
'NtQueryInformationFile', # 0xe
'NtOpenKey', # 0xf
'NtEnumerateValueKey', # 0x10
'NtFindAtom', # 0x11
'NtQueryDefaultLocale', # 0x12
'NtQueryKey', # 0x13
'NtQueryValueKey', # 0x14
'NtAllocateVirtualMemory', # 0x15
'NtQueryInformationProcess', # 0x16
'NtWaitForMultipleObjects32', # 0x17
'NtWriteFileGather', # 0x18
'NtSetInformationProcess', # 0x19
'NtCreateKey', # 0x1a
'NtFreeVirtualMemory', # 0x1b
'NtImpersonateClientOfPort', # 0x1c
'NtReleaseMutant', # 0x1d
'NtQueryInformationToken', # 0x1e
'NtRequestWaitReplyPort', # 0x1f
'NtQueryVirtualMemory', # 0x20
'NtOpenThreadToken', # 0x21
'NtQueryInformationThread', # 0x22
'NtOpenProcess', # 0x23
'NtSetInformationFile', # 0x24
'NtMapViewOfSection', # 0x
|
25
'NtAccessCheckAndAuditAlarm', # 0x26
'NtUnmapViewOfSection', # 0x27
'NtReplyWaitReceivePortEx', # 0x28
'NtTerminateProcess', # 0x29
'NtSetEventBoostPriority', # 0x2a
'NtReadFileScatter', # 0x2b
'NtOpenThread
|
TokenEx', # 0x2c
'NtOpenProcessTokenEx', # 0x2d
'NtQueryPerformanceCounter', # 0x2e
'NtEnumerateKey', # 0x2f
'NtOpenFile', # 0x30
'NtDelayExecution', # 0x31
'NtQueryDirectoryFile', # 0x32
'NtQuerySystemInformation', # 0x33
'NtOpenSection', # 0x34
'NtQueryTimer', # 0x35
'NtFsControlFile', # 0x36
'NtWriteVirtualMemory', # 0x37
'NtCloseObjectAuditAlarm', # 0x38
'NtDuplicateObject', # 0x39
'NtQueryAttributesFile', # 0x3a
'NtClearEvent', # 0x3b
'NtReadVirtualMemory', # 0x3c
'NtOpenEvent', # 0x3d
'NtAdjustPrivilegesToken', # 0x3e
'NtDuplicateToken', # 0x3f
'NtContinue', # 0x40
'NtQueryDefaultUILanguage', # 0x41
'NtQueueApcThread', # 0x42
'NtYieldExecution', # 0x43
'NtAddAtom', # 0x44
'NtCreateEvent', # 0x45
'NtQueryVolumeInformationFile', # 0x46
'NtCreateSection', # 0x47
'NtFlushBuffersFile', # 0x48
'NtApphelpCacheControl', # 0x49
'NtCreateProcessEx', # 0x4a
'NtCreateThread', # 0x4b
'NtIsProcessInJob', # 0x4c
'NtProtectVirtualMemory', # 0x4d
'NtQuerySection', # 0x4e
'NtResumeThread', # 0x4f
'NtTerminateThread', # 0x50
'NtReadRequestData', # 0x51
'NtCreateFile', # 0x52
'NtQueryEvent', # 0x53
'NtWriteRequestData', # 0x54
'NtOpenDirectoryObject', # 0x55
'NtAccessCheckByTypeAndAuditAlarm', # 0x56
'NtQuerySystemTime', # 0x57
'NtWaitForMultipleObjects', # 0x58
'NtSetInformationObject', # 0x59
'NtCancelIoFile', # 0x5a
'NtTraceEvent', # 0x5b
'NtPowerInformation', # 0x5c
'NtSetValueKey', # 0x5d
'NtCancelTimer', # 0x5e
'NtSetTimer', # 0x5f
'NtAcceptConnectPort', # 0x60
'NtAccessCheck', # 0x61
'NtAccessCheckByType', # 0x62
'NtAccessCheckByTypeResultList', # 0x63
'NtAccessCheckByTypeResultListAndAuditAlarm', # 0x64
'NtAccessCheckByTypeResultListAndAuditAlarmByHandle', # 0x65
'NtAcquireCMFViewOwnership', # 0x66
'NtAddBootEntry', # 0x67
'NtAddDriverEntry', # 0x68
'NtAdjustGroupsToken', # 0x69
'NtAlertResumeThread', # 0x6a
'NtAlertThread', # 0x6b
'NtAllocateLocallyUniqueId', # 0x6c
'NtAllocateUserPhysicalPages', # 0x6d
'NtAllocateUuids', # 0x6e
'NtAlpcAcceptConnectPort', # 0x6f
'NtAlpcCancelMessage', # 0x70
'NtAlpcConnectPort', # 0x71
'NtAlpcCreatePort', # 0x72
'NtAlpcCreatePortSection', # 0x73
'NtAlpcCreateResourceReserve', # 0x74
'NtAlpcCreateSectionView', # 0x75
'NtAlpcCreateSecurityContext', # 0x76
'NtAlpcDeletePortSection', # 0x77
'NtAlpcDeleteResourceReserve', # 0x78
'NtAlpcDeleteSectionView', # 0x79
'NtAlpcDeleteSecurityContext', # 0x7a
'NtAlpcDisconnectPort', # 0x7b
'NtAlpcImpersonateClientOfPort', # 0x7c
'NtAlpcOpenSenderProcess', # 0x7d
'NtAlpcOpenSenderThread', # 0x7e
'NtAlpcQueryInformation', # 0x7f
'NtAlpcQueryInformationMessage', # 0x80
'NtAlpcRevokeSecurityContext', # 0x81
'NtAlpcSendWaitReceivePort', # 0x82
'NtAlpcSetInformation', # 0x83
'NtAreMappedFilesTheSame', # 0x84
'NtAssignProcessToJobObject', # 0x85
'NtCancelDeviceWakeupRequest', # 0x86
'NtCancelIoFileEx', # 0x87
'NtCancelSynchronousIoFile', # 0x88
'NtCommitComplete', # 0x89
'NtCommitEnlistment', # 0x8a
'NtCommitTransaction', # 0x8b
'NtCompactKeys', # 0x8c
'NtCompareTokens', # 0x8d
'NtCompleteConnectPort', # 0x8e
'NtCompressKey', # 0x8f
'NtConnectPort', # 0x90
'NtCreateDebugObject', # 0x91
'NtCreateDirectoryObject', # 0x92
'NtCreateEnlistment', # 0x93
'NtCreateEventPair', # 0x94
'NtCreateIoCompletion', # 0x95
'NtCreateJobObject', # 0x96
'NtCreateJobSet', # 0x97
'NtCreateKeyTransacted', # 0x98
'NtCreateKeyedEvent', # 0x99
'NtCreateMailslotFile', # 0x9a
'NtCreateMutant', # 0x9b
'NtCreateNamedPipeFile', # 0x9c
'NtCreatePagingFile', # 0x9d
'NtCreatePort', # 0x9e
'NtCreatePrivateNamespace', # 0x9f
'NtCreateProcess', # 0xa0
'NtCreateProfile', # 0xa1
'NtCreateResourceManager', # 0xa2
'NtCreateSemaphore', # 0xa3
'NtCreateSymbolicLinkObject', # 0xa4
'NtCreateThreadEx', # 0xa5
'NtCreateTimer', # 0xa6
'NtCreateToken', # 0xa7
'NtCreateTransaction', # 0xa8
'NtCreateTransactionManager', # 0xa9
'NtCreateUserProcess', # 0xaa
'NtCreateWaitablePort', # 0xab
'NtCreateWorkerFactory', # 0xac
'NtDebugActiveProcess', # 0xad
'NtDebugContinue', # 0xae
'NtDeleteAtom', # 0xaf
'NtDeleteBootEntry', # 0xb0
'NtDeleteDriverEntry', # 0xb1
'NtDeleteFile', # 0xb2
'NtDeleteKey', # 0xb3
'NtDeleteObjectAuditAlarm', # 0xb4
'NtDeletePrivateNamespace', # 0xb5
'NtDeleteValueKey', # 0xb6
'NtDisplayString', # 0xb7
'NtEnumerateBootEntries', # 0xb8
'NtEnumerateDriverEntries', # 0xb9
'NtEnumerateSystemEnvironmentValuesEx', # 0xba
'NtEnumerateTransactionObject', # 0xbb
'NtExtendSection', # 0xbc
'NtFilterToken', # 0xbd
'NtFlushInstallUILanguage', # 0xbe
'NtFlushInstructionCache', # 0xbf
'NtFlushKey', # 0xc0
'NtFlushProcessWriteBuffers', # 0xc1
'NtFlushVirtualMemory', # 0xc2
'NtFlushWriteBuffer', # 0xc3
'NtFreeUserPhysicalPages', # 0xc4
'NtFreezeRegistry', # 0xc5
'NtFreezeTransactions', # 0xc6
'NtGetContextThread', # 0xc7
'NtGetCurrentProcessorNumber', # 0xc8
'NtGetDevicePowerState', # 0xc9
'NtGetMUIRegistryInfo', # 0xca
'NtGetNextProcess', # 0xcb
'NtGetNextThread', # 0xcc
'NtGetNlsSectionPtr', # 0xcd
'NtGetNotificationResourceManager', # 0xce
'NtGetPlugPlayEvent', # 0xcf
'NtGetWriteWatch', # 0xd0
'NtImpersonateAnonymousToken', # 0xd1
'NtImpersonateThread', # 0xd2
'NtInitializeNlsFiles', # 0xd3
'NtInitializeRegistry', # 0xd4
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/xdis/opcodes/opcode_30.py
|
Python
|
mit
| 1,967
| 0.004067
|
# (C) Copyright 2017, 2019-2020 by Rocky Bernstein
"""
CPython 3.0 bytecode opcodes
This is a like Python 3.0's opcode.py with some classification
of stack usage.
"""
from xdis.opcodes.base import (
def_op,
extended_format_ATTR,
extended_format_CALL_FUNCTION,
finalize_opcodes,
format_MAKE_FUNCTION_default_argc,
format_extended_arg,
init_opdata,
jrel_op,
rm_op,
update_pj2,
)
import xdis.opcodes.opcode_31 as opcode_31
version = 3.0
python_implementation = "CPython"
l = locals()
init_opdata(l, opcode_31, version)
# These are in Python 3.x but not in Python 3.0
rm_op(l, 'JUMP_IF_FALSE_OR_POP', 111)
rm_op(l, 'JUMP_IF_TRUE_OR_POP', 112)
rm_op(l, 'POP_JUMP_IF_FALSE', 114)
rm_op(l, 'POP_JUMP_IF_TRUE', 115)
rm_op(l, 'LIST_APPEND', 145)
rm_op(l, 'MAP_ADD', 147)
# These are are in 3.0 but are not in 3.1 or they have
# different opcode numbers. Note: As a result of opcode value
# changes, these have to be applied *after* removing ops (with
# the same name).
#
|
OP NAME OPCODE POP PUSH
#--------------------------------------------
def_op(l, 'SET_ADD', 17, 2, 0) # Calls set.add(TOS1[-i], TOS).
|
# Used to implement set comprehensions.
def_op(l, 'LIST_APPEND', 18, 2, 0) # Calls list.append(TOS1, TOS).
# Used to implement list comprehensions.
jrel_op(l, 'JUMP_IF_FALSE', 111, 1, 1)
jrel_op(l, 'JUMP_IF_TRUE', 112, 1, 1)
# Yes, pj2 not pj3 - Python 3.0 is more like 2.7 here with its
# JUMP_IF rather than POP_JUMP_IF.
update_pj2(globals(), l)
opcode_arg_fmt = {
'MAKE_FUNCTION': format_MAKE_FUNCTION_default_argc,
'EXTENDED_ARG': format_extended_arg,
}
opcode_extended_fmt = {
"LOAD_ATTR": extended_format_ATTR,
"CALL_FUNCTION": extended_format_CALL_FUNCTION,
"STORE_ATTR": extended_format_ATTR,
}
finalize_opcodes(l)
|
WimpyAnalytics/django-readonly-schema
|
readonly/readonly/settings/local.py
|
Python
|
mit
| 1,156
| 0.00519
|
""" Local dev settings and globals. """
from base import *
""" DEBUG CONFIGURATION """
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
""" LOGGING """
for logger_key in LOGGING['loggers'].keys():
LOGGING['loggers'][logger_key]['level'] = 'DEBUG'
for handler_key in LOGGING['handlers'].keys():
LOGGING['handlers'][handler_key]['level'] = 'DEBUG'
""" TOOLBAR CONFIGURATION """
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
# Should be added
|
as soon as possible in the order
MIDDLEWARE_CLASSES = ('debug_toolbar.middleware.DebugToolbarMiddleware',) + MIDDLEWARE_CLASSES
# See: https://github.com/django-debug-toolbar/django
|
-debug-toolbar#installation
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TEMPLATE_CONTEXT': True,
}
|
aacebedo/build-utilities
|
setup.py
|
Python
|
lgpl-3.0
| 2,674
| 0.008975
|
#!/usr/bin/env python3
#
# build-utilities
# Copyright (c) 2015, Alexandre ACEBEDO, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
"""
Setup script for build-utilities
"""
import sys
import os
from platform import python_version
try:
import versioneer
except Exception as e:
sys.exit("versioneer for python3 is missing")
try:
from setuptools import setup, find_packages
except Exception as e:
sys.exit("setuptools for python3 is missing")
from setuptools.command.install import install
class InstallCommand(install):
user_options = install.user_options + [
('prefix=', None, 'Install prefix pathsomething')
]
def initialize_options(self):
install.initialize_options(self)
self.prefix = None
def finalize_options(self):
#print('The custom option for install is ', self.custom_option)
install.finalize_options(self)
def run(self):
if self.prefix != None:
os.environ["PYTHONPATH"] = os.path.join(self.prefix,"lib","python{}.{}".format(python_version()[0],python_version()[2]),"site-packages")
install.run(self)
def process_setup():
"""
Setup function
"""
if sys.version_info < (3,0):
sys.exit("build-utilities only supports python3. Please run setup.py with python3.")
cmds = versioneer.get_cmdclass()
cmds["install"] = InstallCommand
setup(
name="build-utilities",
version=versioneer.get_version(),
cmdclass=cmds,
packages=find_packages("src"),
package_dir ={'':'src'},
install_requires=['GitPython>=2.0', 'progressbar2>=2.0.0'],
author="Alexandre ACEBEDO",
author_em
|
ail="Alexandre ACEBEDO",
description="Build utilities for pyt
|
hon and go projects.",
license="LGPLv3",
keywords="build go python",
url="http://github.com/aacebedo/build-utilities",
entry_points={'console_scripts':
['build-utilities = buildutilities.__main__:BuildUtilities.main']}
)
if __name__ == "__main__":
process_setup()
|
ZeitOnline/zeit.content.cp
|
src/zeit/content/cp/blocks/teaser.py
|
Python
|
bsd-3-clause
| 7,905
| 0
|
from zeit.cms.i18n import MessageFactory as _
import copy
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.property
import zeit.cms.interfaces
import zeit.cms.syndication.feed
import zeit.cms.syndication.interfaces
import zeit.content.cp.blocks.block
import zeit.content.cp.interfaces
import zeit.edit.interfaces
import zope.component
import zope.container.interfaces
import zope.interface
import zope.schema
class TeaserBlock(
zeit.content.cp.blocks.block.Block,
zeit.cms.syndication.feed.ContentList):
zope.interface.implementsOnly(
zeit.content.cp.interfaces.ITeaserBlock,
zeit.cms.syndication.interfaces.IFeed,
zope.container.interfaces.IContained)
type = 'teaser'
force_mobile_image = zeit.cms.content.property.ObjectPathAttributeProperty(
'.', 'force_mobile_image', zeit.content.cp.interfaces.ITeaserBlock[
'force_mobile_image'])
def __init__(self, context, xml):
super(TeaserBlock, self).__init__(context, xml)
if self.xml.get('module') == 'teaser':
if isinstance(self.layout, zeit.content.cp.layout.NoBlockLayout):
raise ValueError(_(
'No default teaser layout defined for this area.'))
self.layout = self.layout
assert self.xml.get('module') != 'teaser'
@property
def entries(self):
# overriden so that super.insert() and updateOrder() work
return self.xml
@property
def layout(self):
id = self.xml.get('module')
source = zeit.content.cp.interfaces.ITeaserBlock['layout'].source(
self)
layout = source.find(id)
if layout:
return layout
return zeit.content.cp.interfaces.IArea(self).default_teaser_layout \
or zeit.content.cp.layout.NoBlockLayout(self)
@layout.setter
def layout(self, layout):
self._p_changed = True
self.xml.set('module', layout.id)
TEASERBLOCK_FIELDS = (
set(zope.schema.getFieldNames(
zeit.content.cp.interfaces.ITeaserBlock)) -
set(zeit.cms.content.interfaces.IXMLRepresentation)
)
def update(self, other):
if not zeit.content.cp.interfaces.ITeaserBlock.providedBy(other):
raise ValueError('%r is not an ITeaserBlock' % other)
# Copy teaser contents.
for content in other:
self.append(content)
# Copy block properties (including __name__ and __parent__)
for name in self.TEASERBLOCK_FIELDS:
setattr(self, name, getattr(other, name))
class Factory(zeit.content.cp.blocks.block.BlockFactory):
produces = TeaserBlock
title = _('List of teasers')
@grok.adapter(zeit.content.cp.interfaces.IArea,
zeit.cms.interfaces.ICMSContent,
int)
@grok.implementer(zeit.edit.interfaces.IElement)
def make_block_from_content(container, content, position):
block = Factory(container)(position)
block.insert(0, content)
return block
@grok.adapter(zeit.content.cp.interfaces.ITeaserBlock)
@grok.implementer(zeit.edit.interfaces.IElementReferences)
def cms_content_iter(context):
for teaser in context:
yield teaser
@grok.adapter(zeit.content.cp.interfaces.ICenterPage)
@grok.implementer(zeit.content.cp.interfaces.ITeaseredContent)
def extract_teasers_from_cp(context):
for region in context.values():
for area in region.values():
for teaser in zeit.content.cp.interfaces.ITeaseredContent(area):
yield teaser
@grok.adapter(zeit.content.cp.interfaces.IArea)
@grok.implementer(zeit.content.cp.interfaces.ITeaseredContent)
def extract_teasers_from_area(context):
for teaser in context.filter_values(
zeit.content.cp.interfaces.ITeaserBlock):
for content in list(teaser):
yield content
def extract_manual_teasers(context):
for teaser in context.values():
if not zeit.content.cp.interfaces.ITeaserBlock.providedBy(teaser):
continue
for content in list(teaser):
yield content
@grok.subscribe(
zeit.content.cp.interfaces.ITeaserBlock,
zope.container.interfaces.IObjectMovedEvent)
def change_layout_if_not_allowed_in_new_area(context, event):
# Getting a default layout can mean that the current layout is not allowed
# in this area (can happen when a block was moved between areas). Thus, we
# want to change the XML to actually reflect the new default layout.
if context.layout.is_default(context):
context.layout = context.layout
@grok.subscribe(
zeit.content.cp.interfaces.ITeaserBlock,
zope.container.interfaces.IObjectAddedEvent)
def apply_layout_for_added(context, event):
"""Set layout for new teasers only."""
area = context.__parent__
if not area.apply_teaser_layouts_automatically:
return
# XXX The overflow_blocks handler also listens to the IObjectAddedEvent and
# may have removed this item from the container. Since overflow_blocks
# retrieves the item via a getitem access, it is newly created from the XML
# node. That means `context is not context.__parent__[context.__name__]`.
# Since it is not the same object, changes to the newly created object will
# not be reflected in the context given to event handlers. So we need a
# guard here to check if overflow_blocks has removed the item and skip the
# method in case it has. (Modifying __parent__ of context does not seem
# like a good idea, hell might break loose. So lets just forget about this
# possiblity.)
if context.__name__ not in area.keys():
return
if area.keys().index(context.__name__) == 0:
context.layout = area.first_teaser_layout
else:
context.layout = area.default_teaser_layout
@grok.subscribe(
zeit.content.cp.interfaces.IArea,
zeit.edit.interfaces.IOrderUpdatedEvent)
def set_layout_to_default_when_moved_down_from_first_position(area, event):
if not area.apply_teaser_layouts_automatically:
return
# XXX The overflow_blocks handler listens to the IObjectAddedEvent and may
# have removed this item from the container. In that case we have to do
# nothing, since checking the layout is handled by the new container.
if event.old_order[0] not in area.keys():
return
previously_first = area[event.old_order[0]]
if (zeit.content.cp.interfaces.ITeaserBlock.providedBy(
previously_first) and
area.values().index(previously_fi
|
rst)) > 0:
previously_first.layout = area.default_teaser_layout
@grok.adapter(zeit.content.cp.interfaces.ITeaserBlock)
@grok.implementer(zeit.content.cp.interfaces.IRenderedXML)
d
|
ef rendered_xml_teaserblock(context):
container = getattr(
lxml.objectify.E, context.xml.tag)(**context.xml.attrib)
# Render non-content items like topiclinks.
for child in context.xml.getchildren():
# BBB: xinclude is not generated anymore, but some might still exist.
if child.tag not in [
'block', '{http://www.w3.org/2003/XInclude}include']:
container.append(copy.copy(child))
# Render content.
for entry in context:
node = zope.component.queryAdapter(
entry, zeit.content.cp.interfaces.IRenderedXML, name="content")
if node is not None:
container.append(node)
return container
@grok.adapter(zeit.cms.interfaces.ICMSContent, name="content")
@grok.implementer(zeit.content.cp.interfaces.IRenderedXML)
def rendered_xml_cmscontent(context):
if not context.uniqueId:
return None
block = lxml.objectify.E.block(
uniqueId=context.uniqueId, href=context.uniqueId)
updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(context)
updater.update(block, suppress_errors=True)
return block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.