repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/deed/pet_deed/shared_kimogila_deed.py
|
Python
|
mit
| 691
| 0.037627
|
#### NOTICE: THIS FILE IS AUTOGENE
|
RATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/pet_deed/shared_kimogila_deed.iff"
result.attribute_template_id = 2
result.stfName("pet_de
|
ed","kimogila")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/deed_datapad.py")
result.setStringAttribute("deed_pcd", "object/intangible/pet/shared_kimogila_hue.iff")
result.setStringAttribute("deed_mobile", "object/mobile/shared_kimogila_hue.iff")
#### END MODIFICATIONS ####
return result
|
alanfranz/duplicity
|
testing/overrides/gettext.py
|
Python
|
gpl-2.0
| 1,496
| 0.004016
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4; encoding:utf8 -*-
#
# Copyright 2014 Michael Terry <mike@mterry.name>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 0
|
2111-1307 USA
# This is just a small override to the system gettext.py which allows us to
# always return a string with fancy unicode characters, which will notify us
# if we ever get a unicode->ascii translation by accident.
def translation(*args, **kwargs):
class Translation:
ZWSP = u"" # ZERO WIDTH SPACE, basically an invisible space separator
def install(self, **kwargs):
import __builtin__
__builtin__.__dict__['_'] = lambda x: x + self.ZWSP
def ungettext(self, one, more, n):
if n == 1: return one + self.ZWSP
else: return more + self.ZWSP
return Translation()
|
KaoticEvil/Sopel-Modules
|
horoscope.py
|
Python
|
gpl-3.0
| 1,835
| 0.002725
|
'''
A simple module for Sopel (http://sopel.chat) to get horoscope
|
information from a JSON API and put it back into chat
All orignal code written by: BluDragyn. (www.bludragyn.net)
This module is released under the terms of the GPLv3
(https://www.gnu.org/licenses/gpl-3.0.en.html)
If you use and like this module, please send me an email
|
(dragyn@bludragyn.net) or drop in to see me on Occultus IRC
(irc.occultus.net), I hang out in #sacredpaths
'''
import json
import urllib3
from sopel import module
@module.commands('hs', 'horoscope')
def horoscope(bot, trigger):
signs = set(['aquarius', 'Aquarius',
'pisces', 'Pisces',
'aries', 'Aries',
'taurus', 'Taurus',
'gemini', 'Gemini',
'cancer', 'Cancer',
'leo', 'Leo',
'virgo', 'Virgo',
'libra', 'Libra',
'scorpio', 'Scorpio',
'sagittarius', 'Sagittarius',
'capricorn', 'Capricorn'])
sign = trigger.group(2)
nick = trigger.nick
if sign in signs:
sign = sign.lower()
hs = get_hs(sign)
sign = sign.capitalize()
bot.say('Today\'s horoscope for ' + sign + ' is: ' + hs)
else:
bot.say(nick + ', please use a valid zodiac sign and try again.')
def get_hs(sunsign):
http = urllib3.PoolManager()
url = 'http://sandipbgt.com/theastrologer/api/horoscope/' \
+ sunsign + '/today/'
response = http.request('GET', url)
raw = json.loads(response.data.decode('utf8'))
hscope = raw['horoscope']
if not hscope:
hscope = 'There was an error getting the horoscope right now.\
Please try again later.'
return hscope
else:
hscope = hscope[:-59]
return hscope
|
brunosmmm/hdltools
|
hdltools/vecgen/generate.py
|
Python
|
mit
| 4,978
| 0.000201
|
"""Generate pass."""
from scoff.ast.visits.syntax import (
SyntaxChecker,
SyntaxCheckerError,
SyntaxErrorDescriptor,
)
class VecgenPass(SyntaxChecker):
"""Visit AST and generate intermediate code."""
_CONFIG_DIRECTIVES = ("register_size",)
_SYNTAX_ERR_INVALID_VAL = SyntaxCheckerError("invalid value", "v001")
_SYNTAX_ERR_INVALID_NAME = SyntaxCheckerError(
"unknown name: '{name}'", "v002"
)
_SYNTAX_ERR_TIME_PAST = SyntaxErrorDescriptor(
"t001",
"absolute time is in the past",
"absolute time is in the past, current time is {cur}, requested is {req}",
)
_SYNTAX_ERRORS = {"t001": _SYNTAX_ERR_TIME_PAST}
def __init__(self, *args, **kwargs):
"""Initialize."""
super().__init__(*args, **kwargs)
self._sequence = []
self._definitions = {}
self._directives = {}
self._current_time = 0
def visit_ConfigurationDirective(self, node):
"""Visit configuration directive."""
if node.directive not in self._CONFIG_DIRECTIVES:
# unknown, ignore for now
return
if node.directive in self._directives:
# re-define, warning
pass
# store
self._directives[node.directive] = node.value
def visit_ValueDefinition(self, node):
"""Visit value definition."""
if node.name in self._definitions:
# re-define, warning
pass
self._definitions[node.name] = node.value
def visit_InitialElement(self, node):
"""Visit initial element."""
if isinstance(node.val, str):
# symbol lookup
if node.val not in self._definitions:
raise self.get_error_from_code(node, "v002", name=node.val)
node.val = self._definitions[node.val]
self._current_time += 1
return {"event": "initial", "value": node.val}
def visit_SequenceElement(self, node):
"""Visit sequence element."""
if isinstance(node.mask, str):
# symbol lookup
if node.mask not in self._definitions:
raise self.get_error_from_code(node, "v002", name=node.mask)
node.mask = self._definitions[node.mask]
if node.time is None:
self._current_time += 1
# insert relative time
time = {"mode": "rel", "delta": 1}
else:
if node.time["mode"] == "rel":
self._current_time += node.time["delta"]
else:
abs_time = node.time["time"]
if abs_time < self._current_time:
# time is in the past, cannot be
raise self.get_error_from_code(
node, "t001", cur=self._current_time, req=abs_time
)
self._current_time = abs_time
time = node.time
return {"event": node.event, "mask": node.mask, "time": time}
def visit_HexValue(self, node):
"""Visit hexadecimal value."""
try:
value = int(node.val, 16)
except ValueError:
raise self._SYNTAX_ERR_INVALID_VAL
return value
def visit_BinValue(self, node):
"""Visit binary value."""
try:
|
value = int(node.val, 2)
except ValueError:
raise self._SYNTAX_ERR_INVALID_VAL
return value
def visit_AbsTimeV
|
alue(self, node):
"""Visit absolute time value."""
if node.time < 0:
raise self._SYNTAX_ERR_INVALID_VAL
return {"mode": "abs", "time": node.time}
def visit_RelTimeValue(self, node):
"""Visit relative time value."""
if node.time < 0:
raise self._SYNTAX_ERR_INVALID_VAL
return {"mode": "rel", "delta": node.time}
def visit_VectorDescription(self, node):
"""Visit AST root."""
ir = self._directives
ir["sequence"] = [node.initial] + node.sequence
# doesnt return, not sure why
self._sequence = ir
return ir
def visit_BitwiseBinOperation(self, node):
"""Visit binary bitwise operation."""
if node.op == "<<":
return node.lhs << node.rhs
elif node.op == ">>":
return node.lhs >> node.rhs
elif node.op == "|":
return node.lhs | node.rhs
elif node.op == "&":
return node.lhs & node.rhs
elif node.op == "^":
return node.lhs ^ node.rhs
else:
# cannot happen!
return None
def visit_BitwiseNegate(self, node):
"""Visit negation."""
return ~node.val
def visit_BooleanExpr(self, node):
"""Visit boolean expression."""
return node.op
def visit_Comment(self, node):
"""Visit comment."""
def visit(self, node):
"""Perform visit."""
ret = super().visit(node)
return self._sequence
|
F5Networks/f5-ansible
|
ansible_collections/f5networks/f5_modules/plugins/doc_fragments/f5ssh.py
|
Python
|
gpl-3.0
| 3,698
| 0.004327
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Standard F5 documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
version_added: "1.0.0"
suboptions:
password:
description:
- The password for the user account used to connect to the BIG-IP.
- You may omit this option by setting the environment variable C(F5_PASSWORD).
type: str
required: true
aliases: [ pass, pwd ]
server:
description:
- The BIG-IP host.
- You may omit this option by setting the environment variable C(F5_SERVER).
type: str
required: true
server_port:
description:
- The BIG-IP server port.
- You may omit this option by setting the environment variable C(F5_SERVER_PORT).
type: int
default: 22
user:
description:
- The username to connect to the BIG-IP with. This user must have
administrative privileges on the device.
- You may omit this option by setting the environment variable C(F5_USER).
type: str
required: true
validate_certs:
description:
- If C(no), SSL certificates are not validated. Use this only
on personally controlled sites using self-signed certificates.
- You may omit this option by setting the environment variable C(F5_VALIDATE_CERTS).
type: bool
default: yes
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
- You may omit this option by setting the en
|
vironment variable C(ANSIBLE_NET_SSH_KEYFILE).
type: path
transport:
|
description:
- Configures the transport connection to use when connecting to the
remote device.
type: str
choices: ['cli']
default: cli
no_f5_teem:
description:
- If C(yes), TEEM telemetry data is not sent to F5.
- You may omit this option by setting the environment variable C(F5_TELEMETRY_OFF).
- Previously used variable C(F5_TEEM) is deprecated as its name was confusing.
default: no
type: bool
auth_provider:
description:
- Configures the auth provider for to obtain authentication tokens from the remote device.
- This option is really used when working with BIG-IQ devices.
type: str
notes:
- For more information on using Ansible to manage F5 Networks devices see U(https://www.ansible.com/integrations/networks/f5).
- Requires BIG-IP software version >= 12.
- The F5 modules only manipulate the running configuration of the F5 product. To ensure that BIG-IP
specific configuration persists to disk, be sure to include at least one task that uses the
M(f5networks.f5_modules.bigip_config) module to save the running configuration. Refer to the module's documentation for
the correct usage of the module to save your running configuration.
'''
|
apache/allura
|
Allura/allura/tests/functional/test_feeds.py
|
Python
|
apache-2.0
| 3,296
| 0.000303
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from formencode.variabledecode import variable_encode
from allura.tests import TestController
from allura.tests import decorators as td
from allura.lib import helpers as h
class TestFeeds(TestController):
def setUp(self):
TestController.setUp(self)
self._setUp()
@td.with_wiki
@td.with_tracker
def _setUp(self):
self.app.get('/wiki/')
self.app.get('/bugs/')
self.app.post(
'/bugs/save_ticket',
params=variable_encode(dict(
ticket_form=dict(
ticket_num='',
labels='',
assigned_to='',
milestone='',
summary='This is a ticket',
status='open',
description='This is a description'))),
status=302)
title = 'Descri\xe7\xe3o e Arquitetura'
self.app.post(
h.urlquote('/wiki/%s/update' % title),
params=dict(
title=title.encode('utf-8'),
text="Nothing much",
labels='',
),
status=302)
self.app.get(h.urlquote('/wiki/%s/' % title))
def test_project_feed(self):
self.app.get('/feed.rss')
self.app.get('/feed.atom')
@td.with_wiki
def test_wiki_feed(self):
self.app.get('/wiki/feed.rss')
self.app.get('/wiki/feed.atom')
@td.with_wiki
def test_wiki_page_feed(self):
self.app.post('/wiki/Root/update', params={
'title': 'Root',
'text': '',
'labels': '',
})
self.app.get('/wiki/Root/feed.rss')
self.app.get('/wiki/Root/feed.atom')
@td.with_tracker
def test_ticket_list_feed(self):
self.app.get('/bugs/feed.rss')
self.app.get('/bugs/feed.atom')
@td.with_tracker
def test_ticket_feed(self):
self.app.get('/bugs/1/feed.rss')
r = self.app.get('/bugs/1
|
/feed.atom')
self.app.post('/bugs/1/update_ticket', params=dict(
assigned_to='',
ticket_num='',
labels='',
summary='This is a new ticket',
status='unread',
milestone='',
description='This is another description'), extra_environ=di
|
ct(username='root'))
r = self.app.get('/bugs/1/feed.atom')
assert '=&gt' in r
assert '\n+' in r
|
damahou/sagewui
|
sagewui/blueprints/static_paths.py
|
Python
|
gpl-3.0
| 1,622
| 0
|
# (c) 2015 J Miguel Farto, jmfarto@gmail.com
r'''
Aditional static paths
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from flask import Blueprint
from flask import current_app as app
from flask.helpers import send_from_directory
from ..config import JMOL_PATH
from ..config import JSMOL_PATH
from ..config import J2
|
S_PATH
from ..config import THREEJS_PATH
static
|
_paths = Blueprint('static_paths', __name__)
@static_paths.route('/css/<path:filename>')
def css(filename):
# send_static file secures filename
return app.send_static_file(os.path.join('sage', 'css', filename))
@static_paths.route('/images/<path:filename>')
@static_paths.route('/favicon.ico', defaults={'filename': 'favicon.ico'})
def images(filename):
# send_static file secures filename
return app.send_static_file(os.path.join('sage', 'images', filename))
@static_paths.route('/javascript/<path:filename>')
@static_paths.route('/java/<path:filename>')
def static_file(filename):
return app.send_static_file(filename)
@static_paths.route('/java/jmol/<path:filename>')
def jmol(filename):
return send_from_directory(JMOL_PATH, filename)
@static_paths.route('/jsmol/<path:filename>')
def jsmol(filename):
return send_from_directory(JSMOL_PATH, filename)
@static_paths.route('/j2s/<path:filename>')
def j2s(filename):
return send_from_directory(J2S_PATH, filename)
@static_paths.route('/threejs/<path:filename>')
def threejs(filename):
return send_from_directory(THREEJS_PATH, filename)
|
unioslo/pybofh
|
bofh/formatting.py
|
Python
|
gpl-3.0
| 11,183
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright 2010-2019 University of Oslo, Norway
#
# This file is part of pybofh.
#
# pybofh is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pybofh is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pybofh; if not, see <https://www.gnu.org/licenses/>.
"""
This module consists of formatting utils for displaying responses from the
XMLRPC server in a human readable form.
Most notably is the parsing and formatting according to the hints
(format suggestions) given by the server.
Not all commands will have format suggestions. An XMLRPC command will either:
- Not use format suggestions and return a pre-formatted string
- Use format suggestions and return a dictionary or list of dictionaries.
For commands with format suggestions, the formatting class
:py:class:`SuggestionFormatter` is used. For all other commands,
:py:class:`StringFormatter` is used.
Format suggestions
------------------
A format suggestion is a dictionary with the following keys and values:
hdr
An optional header line (string)
str_vars
Either a string, a tuple or a list of tuples.
If str_vars is a string, it will be outputted directly.
If str_vars is a tuple, it should contain two or three items:
1. A format string (e.g. "foo=%s, bar=%s")
2. A list of keys from the bofh response to use for formatting the
string (e.g. ['foo_value', 'bar_value'])
3. An optional sub header
If str_vars is a list of tuples, each tuple should be on the format
mentioned. Each tuple is only formatted and added to the output if the
keys in the tuple exists in the bofh response.
"""
from __future__ import absolute_import, unicode_literals
import abc
import logging
import six
from six.moves import reduce
logger = logging.getLogger(__name__)
class FieldRef(object):
"""
A field reference for format suggestions.
Field references from format suggestions are strings that identify the
expected names and types of fields in result sets.
Each reference is a string that follow one of the following syntaxes:
- <name>
- <name>:<type>:<params>
The only currently supported <type> is "date", which expects a date format
as <params>
"""
def __init__(self, field_name, field_type=None, field_params=None):
self.name = field_name
self.type = field_type or None
self.params = field_params or None
def __repr__(self):
return '<{cls.__name__} {obj.name}>'.format(cls=type(self), obj=self)
@classmethod
def from_str(cls, field_ref):
try:
field_name, field_type, field_params = field_ref.split(":", 2)
except ValueError:
field_name, field_type, field_params = (field_ref, None, None)
return cls(field_name,
field_type=field_type,
field_params=field_params)
def sdf2strftime(sdf_string):
"""
Convert java SimpleDateFormat to strftime.
The bofhd server returns date formatting hints in a
`java.text.SimpleDateFormat` syntax, because reasons.
"""
conversions = (
# (subst, with),
("yyyy", "%Y"),
("MM", "%m"),
("dd", "%d"),
("HH", "%H"),
("mm", "%M"),
("ss", "%S"),
)
return reduce(lambda f, r: f.replace(*r), conversions, sdf_string)
def get_formatted_field(field_ref, data_set):
"""
Format a single field value from a data set
:type field_ref: FieldRef
:param field_ref:
a reference to a field in the data set
:type data_set: dict
:param data_set:
a data set in the result from running a command
"""
value = data_set[field_ref.name]
# Convert value according to field_type and field_data
if field_ref.type is None:
pass
elif field_ref.type == 'date':
format_str = str(sdf2strftime(field_ref.params))
value = value.strftime(format_str) if value else value
else:
raise ValueError("invalid field_ref type %r" % (field_ref.type, ))
if value is None:
return "<not set>"
else:
return value
class FormatItem(object):
"""
Formatter for a bofh response data set.
The formatter consists of a format string, field references to map into the
format string, and an optional header.
"""
def __init__(self, format_str, fields=None, header=None):
"""
:param str format_str:
A format string, e.g. ``"foo: %s, bar: %s"``.
:param tuple fields:
FieldRef references to insert into the format string.
:param str header:
An optional header for the format string.
"""
self.format_str = format_str
self.fields = tuple(fields or ())
self.header = header
def __repr__(self):
return '<FormatItem fields=%r>' % (tuple(f.name for f in self.fields),)
def mismatches(self, data_set):
"""
Get a tuple of field references missing in a data_set
:type data_set: dict
:param data_set: A partial reponse (item).
:rtype: tuple
:returns:
Returns missing field names (keys) missing in the data_set.
"""
return tuple(f.name for f in self.f
|
ields if f.name not in data_set)
def match(self, data_set):
"""
Check if this FormatItem applies to a given data set.
:type data_set: dict
:param data_set: A partial reponse (item).
:rtype: bool
:returns:
True if the data_set contains all required field references in
self.field.
"""
return not bool(self.mismatches(data_set))
def format(s
|
elf, data_set):
"""
Format a given data set with this FormatItem.
:type data_set: dict
:rtype: six.text_type
"""
values = tuple(get_formatted_field(f, data_set)
for f in self.fields)
return self.format_str % values
class FormatSuggestion(object):
"""
Format suggestion for a bofh command.
The format suggestion is a collection of :py:class:`FormatItem` formatters
for items (usually dicts) in a bofhd server response.
"""
key_header = "hdr"
key_string_vars = "str_vars"
def __init__(self, items, header=None):
self.items = items
self.header = header
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
@staticmethod
def _iter_format_strings(string_vars):
"""Generate FormatItems from a sequence of str_vars."""
if isinstance(string_vars, six.string_types):
# For some reason, we got a single format string rather than a
# sequence of (format, (vars, ...)) tuples.
yield FormatItem(string_vars, None, None)
return
for t in string_vars:
if len(t) == 3:
format_str, field_refs, sub_header = t
# TODO: What's the deal here?
# Looks to be a fix for an issue where a format
# suggestion had swapped sub_header and format_str?!
if "%" in sub_header:
format_str, sub_header = sub_header, None
elif len(t) == 2:
format_str, field_refs = t
sub_header = None
else:
raise ValueError("invalid tuple length (%d)" % (len(t), ))
fields = map(FieldRef.from_str, field_refs or ())
yield FormatItem(format_str, fields=fields, header=sub_header)
@classmethod
def from_dict(cls, suggestion_response):
"""
Create a FormatSuggestion() from
|
googleapis/python-dialogflow-cx
|
google/cloud/dialogflowcx_v3beta1/services/pages/async_client.py
|
Python
|
apache-2.0
| 32,025
| 0.001624
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.pages import pagers
from google.cloud.dialogflowcx_v3beta1.types import fulfillment
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import PagesTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PagesGrpcAsyncIOTransport
from .client import PagesClient
class PagesAsyncClient:
"""Service for managing
[Pages][google.cloud.dialogflow.cx.v3beta1.Page].
"""
_client: PagesClient
DEFAULT_ENDPOINT = PagesClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PagesClient.DEFAULT_MTLS_ENDPOINT
entity_type_path = staticmethod(PagesClient.entity_type_path)
parse_entity_type_path = staticmethod(PagesClient.parse_entity_type_path)
flow_path = staticmethod(PagesClient.flow_path)
parse_flow_path = staticmethod(PagesClient.parse_flow_path)
intent_path = staticmethod(PagesClient.intent_path)
parse_intent_path = staticmethod(PagesClient.parse_intent_path)
page_path = staticmethod(PagesClient.page_path)
parse_page_path = staticmethod(PagesClient.parse_page_path)
transition_route_group_path = staticmethod(PagesClient.transition_route_group_path)
parse_transition_route_group_path = staticmethod(
PagesClient.parse_transition_route_group_path
)
webhook_path = staticmethod(PagesClient.webhook_path)
parse_webhook_path = staticmethod(PagesClient.parse_webhook_path)
common_billing_account_path = staticmethod(PagesClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(
PagesClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(PagesClient.common_folder_path)
parse_common_folder_path = staticmethod(PagesClient.parse_common_folder_path)
common_organization_path = staticmethod(PagesClient.common_organization_path)
parse_common_organization_path = staticmethod(
PagesClient.parse_common_organization_path
)
common_project_path = staticmethod(PagesClient.common_project_path)
parse_common_project_path = staticmethod(PagesClient.parse_common_project_path)
common_location_path = staticmethod(PagesClient.common_location_path)
parse_common_location_path = staticmethod(PagesClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_info.__func__(PagesAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PagesAsyncClient: The constructed client.
"""
return PagesClient.from_service_account_file.__func__(PagesAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return PagesClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> PagesTransport:
"""Returns the transport used by the client instance.
Returns:
PagesTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(PagesClient).get_transport_class, type(PagesClie
|
nt)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, PagesTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic
|
_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the pages client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.PagesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be u
|
lumidify/fahrenheit451
|
AICharacter.py
|
Python
|
gpl-2.0
| 3,797
| 0.003687
|
import os
import sys
import random
import pygame
from Engine import *
from Montag import *
from Character import Character
from pygame.locals import *
class AICharacter(Character):
def __init__(self, screen, **kwargs):
super().__init__(screen, **kwargs)
self.enemy = kwargs.get("enemy", None)
self.movement_state = kwargs.get("movement_state", None)
self.waypoints = kwargs.get("waypoints", None)
self.area = kwargs.get("random_walk_area", None)
self.obstaclemap = kwargs.get("obstaclemap", None)
self.pathfinding_grid = self.obstaclemap.grid
self.dialog = kwargs.get("dialog", None)
self.dialogmanager = kwargs.get("dialogmanager", None)
if self.waypoints:
self.remaining_waypoints = self.waypoints.copy()
self.grid_pos = self.remaining_waypoints[0].copy()
self.walk_to_points = [self.remaining_waypoints.pop(0)]
self.movement_state = "waypoints"
self.state = "walk"
elif self.area:
self.movement_state = "random_walk"
self
|
.pause_time = kwargs.get("pause_time", 1000)
self.pause_time_passed = 0
def click(self):
if self.dialog:
self.dialogmanager.start_dialog(self.dialog)
def hold_position(self):
self.movement_state = None
def update(self, current_time=None, event=None):
if not current_time:
current_time = pygame.time.get_ticks()
if self.state == "stand":
time_change = current_time - self.current_time
self.pause_time_passed +
|
= time_change
else:
self.pause_time_passed = 0
if not self.dead:
if not self.movement_temporarily_suppressed:
if not self.walk_to_points and self.pause_time_passed >= self.pause_time:
if self.movement_state == "random_walk":
self.walk_to_points = self.pathfinding_grid.find_path(self.grid_pos, [
random.uniform(self.area[0], self.area[0] + self.area[2]),
random.uniform(self.area[1], self.area[1] + self.area[3])])
self.frame = 0
elif self.movement_state == "waypoints":
if len(self.remaining_waypoints) == 0:
self.remaining_waypoints = self.waypoints.copy()
self.walk_to_points = [self.remaining_waypoints.pop(0)]
super().update(current_time, event)
if __name__ == "__main__":
pygame.init()
clock = pygame.time.Clock()
screen_info = pygame.display.Info()
screen_size = [screen_info.current_w, screen_info.current_h]
screen = pygame.display.set_mode(screen_size, RESIZABLE)
chars = []
b = Engine(screen)
b.load_tilemap("TheMap/map.floor", 0)
b.load_obstaclemap("TheMap/map.obstacles", 0)
montag = AICharacter(screen, "graphics/droids/blue_guard/atlas.txt", "graphics/droids/red_guard/config.txt", pathfinding_grid=b.obstacles.grid, pos=[3, 0], movement_state="random_walk", area=[5, 0, 10, 5])
while True:
current_time = pygame.time.get_ticks()
clock.tick(60)
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == VIDEORESIZE:
screen_size = event.dict["size"]
screen = pygame.display.set_mode(screen_size, RESIZABLE)
else:
montag.update(current_time, event)
b.update()
b.draw([0, 0])
#chars.sort(key=lambda x: (x.pos[1], x.pos[0]))
montag.update(current_time)
montag.draw()
pygame.display.update()
|
justquick/google-chartwrapper
|
setup.py
|
Python
|
bsd-3-clause
| 1,680
| 0.002976
|
from setuptools import setup, find_packages
CLASSIFIERS = (
('Development Status :: 4 - Beta'),
('Environment :: Console'),
('Environment :: Web Environment'),
('Framework :: Django'),
('Intended Audience :: Developers'),
('Intended Audience :: Science/Research'),
('I
|
ntended Audience :: System Ad
|
ministrators'),
('License :: OSI Approved :: BSD License'),
('Natural Language :: English'),
('Operating System :: OS Independent'),
('Topic :: Artistic Software'),
('Topic :: Internet :: WWW/HTTP'),
('Topic :: Internet :: WWW/HTTP :: Dynamic Content'),
('Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries'),
('Topic :: Multimedia'),
('Topic :: Multimedia :: Graphics'),
('Topic :: Scientific/Engineering :: Visualization'),
('Topic :: Software Development :: Libraries :: Python Modules'),
('Topic :: Utilities'),
)
DESCRIPTION = """Second generation Python wrapper for the `Google Chart Image API <http://code.google.com/apis/chart/image/>`_.
Chart instances can render the URL of the actual Google chart and quickly insert into webpages on the fly or save images for later use.
Made for dynamic Python websites (Django, Zope, CGI, etc.) that need on the fly, dynamic chart image generation. Works for Python versions 2.3 to 3.2.
"""
setup(
name='google-chartwrapper',
version='1.0.0',
description='Python Google Chart Wrapper',
long_description=DESCRIPTION,
author="Justin Quick",
author_email='justquick@gmail.com',
url='https://github.com/justquick/google-chartwrapper',
classifiers=CLASSIFIERS,
packages=find_packages('.', ('examples',)),
)
|
jmcarp/pycrawl
|
pycrawl/crawl.py
|
Python
|
bsd-3-clause
| 3,966
| 0.000504
|
import re
import abc
import asyncio
import contextlib
import urllib.parse as urlparse
import aiohttp
import pyquery
from pycrawl.utils import Queue
from pycrawl.http import Request
from pycrawl.http import Response
from pycrawl.middleware import CrawlerMiddlewareManager
class Spider(metaclass=abc.ABCMeta):
def __init__(self, middlewares=None, loop=None, **config):
self.config = config
self._context = {}
self._loop = loop or asyncio.get_event_loop()
self._connector = aiohttp.TCPConnector(loop=self._loop)
self._middlewares = CrawlerMiddlewareManager(self, middlewares)
def enqueue_request(self, **kwargs):
context = self._context[self.task]
max_depth = self.config.get('max_depth')
if max_depth and context['request'].depth > max_depth:
return
request = Request(referer=context['response'], **kwargs)
if request.url in self._seen:
return
if not self._url_allowed(request):
return
request.depth = context['response'].request.depth + 1
self._queue.put_nowait(request)
def _url_allowed(self, request):
return next(
(
True for domain in self.config['domains']
if request.furl.host.endswith(domain)
),
False,
)
@asyncio.coroutine
def start(self):
self._seen = set()
self._queue = Queue(loop=self._loop)
for url in self.config['urls']:
self._queue.put_nowait(Request(url))
workers = [asyncio.Task(self._work()) for _ in range(self.config['concurrency'])]
yield from self._queue.join()
for worker in workers:
worker.cancel()
@asyncio.coroutine
def _work(self):
while True:
request = yield from self._queue.get()
yield from self._fetch(request)
self._queue.task_done()
@asyncio.coroutine
def _fetch(self, request):
for callback in self._middlewares['before_request']:
request = callback(request)
resp = yield from aiohttp.request('GET', request.url,
|
loop=self._loop)
body = yield from resp.read_and_close()
response = Response(request, resp, body)
for callback in self._middlewares['after_response']:
response = callback(response)
with self._request_context(self._loop, request, response):
self.parse(response)
@property
def _task(self):
return asyncio.get_current_task(loop=self._loop)
|
@contextlib.contextmanager
def _request_context(self, request, response):
self._context[self.task] = {'request': request, 'response': response}
try:
yield
finally:
del self._context[self.task]
@abc.abstractmethod
def parse(self, response):
pass
class Route:
def __init__(self, pattern, callback):
self.pattern = re.compile(pattern)
self.callback = callback
def filter_urls(self, urls):
return (url for url in urls if self.pattern.search(url))
class RouteSpider(Spider):
def __init__(self, middlewares=None, routes=None, **config):
super().__init__(middlewares=middlewares, **config)
self._routes = routes or []
def route(self, pattern):
def wrapper(callback):
self._routes.append(Route(callback, pattern))
return callback
return wrapper
def parse(self, response):
route = response.request.meta.get('route')
if route:
route.callback(self, response)
parsed = pyquery.PyQuery(response.content)
elms = parsed('a[href]')
hrefs = elms.map(lambda: urlparse.urljoin(response.request.url, pyquery.PyQuery(this).attr('href')))
for route in self._routes:
for url in route.filter_urls(hrefs):
self.enqueue_request(url=url, route=route)
|
houssine78/addons
|
stock_picking_back2draft/models/__init__.py
|
Python
|
agpl-3.0
| 161
| 0
|
# -*- coding: utf-8 -*-
# © 2016 Lorenzo Battistini - Agile Busines
|
s Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import
|
stock
|
patrickwind/My_Blog
|
venv/lib/python2.7/site-packages/click/types.py
|
Python
|
gpl-2.0
| 15,176
| 0
|
import os
import sys
import stat
from ._compat import open_stream, text_type, filename_to_ui, get_streerror
from .exceptions import BadParameter
from .utils import safecall, LazyFile
class ParamType(object):
"""Helper for converting values through types. The following is
necessary for a valid type:
* it needs a name
* it needs to pass through None unchanged
* it needs to convert from a string
* it needs to convert its result type through unchanged
(eg: needs to be idempotent)
* it needs to be able to deal with param and context being `None`.
This can be the case when the object is used with prompt
inputs.
"""
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it p
|
rovides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
|
def convert(self, value, param, ctx):
"""Converts the value. This is not invoked for values that are
`None` (the missing value).
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or '').split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = text_type(value)
except UnicodeError:
value = str(value).decode('utf-8', 'replace')
self.fail(value, param, ctx)
class StringParamType(ParamType):
name = 'text'
def convert(self, value, param, ctx):
if isinstance(value, bytes):
try:
enc = getattr(sys.stdin, 'encoding', None)
if enc is not None:
value = value.decode(enc)
except UnicodeError:
try:
value = value.decode(sys.getfilesystemencoding())
except UnicodeError:
value = value.decode('utf-8', 'replace')
return value
return value
def __repr__(self):
return 'STRING'
class Choice(ParamType):
"""The choice type allows a value to checked against a fixed set of
supported values. All of these values have to be strings.
See :ref:`choice-opts` for an example.
"""
name = 'choice'
def __init__(self, choices):
self.choices = choices
def get_metavar(self, param):
return '[%s]' % '|'.join(self.choices)
def get_missing_message(self, param):
return 'Choose from %s.' % ', '.join(self.choices)
def convert(self, value, param, ctx):
# Exact match
if value in self.choices:
return value
# Match through normalization
if ctx is not None and \
ctx.token_normalize_func is not None:
value = ctx.token_normalize_func(value)
for choice in self.choices:
if ctx.token_normalize_func(choice) == value:
return choice
self.fail('invalid choice: %s. (choose from %s)' %
(value, ', '.join(self.choices)), param, ctx)
def __repr__(self):
return 'Choice(%r)' % list(self.choices)
class IntParamType(ParamType):
name = 'integer'
def convert(self, value, param, ctx):
try:
return int(value)
except ValueError:
self.fail('%s is not a valid integer' % value, param, ctx)
def __repr__(self):
return 'INT'
class IntRange(IntParamType):
"""A parameter that works similar to :data:`click.INT` but restricts
the value to fit into a range. The default behavior is to fail if the
value falls outside the range, but it can also be silently clamped
between the two edges.
See :ref:`ranges` for an example.
"""
name = 'integer range'
def __init__(self, min=None, max=None, clamp=False):
self.min = min
self.max = max
self.clamp = clamp
def convert(self, value, param, ctx):
rv = IntParamType.convert(self, value, param, ctx)
if self.clamp:
if self.min is not None and rv < self.min:
return self.min
if self.max is not None and rv > self.max:
return self.max
if self.min is not None and rv < self.min or \
self.max is not None and rv > self.max:
if self.min is None:
self.fail('%s is bigger than the maximum valid value '
'%s.' % (rv, self.max), param, ctx)
elif self.max is None:
self.fail('%s is smaller than the minimum valid value '
'%s.' % (rv, self.min), param, ctx)
else:
self.fail('%s is not in the valid range of %s to %s.'
% (rv, self.min, self.max), param, ctx)
return rv
def __repr__(self):
return 'IntRange(%r, %r)' % (self.min, self.max)
class BoolParamType(ParamType):
name = 'boolean'
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', '1', 'yes', 'y'):
return True
elif value in ('false', '0', 'no', 'n'):
return False
self.fail('%s is not a valid boolean' % value, param, ctx)
def __repr__(self):
return 'BOOL'
class FloatParamType(ParamType):
name = 'float'
def convert(self, value, param, ctx):
try:
return float(value)
except ValueError:
self.fail('%s is not a valid floating point value' %
value, param, ctx)
def __repr__(self):
return 'FLOAT'
class UUIDParameterType(ParamType):
name = 'uuid'
def convert(self, value, param, ctx):
import uuid
try:
return uuid.UUID(value)
except ValueError:
self.fail('%s is not a valid UUID value' % value, param, ctx)
def __repr__(self):
return 'UUID'
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or
upon first IO. The default is to be non lazy for standard input and
output streams as well as files opened for reading, lazy otherwise.
|
SkyTruth/pybossa_tools
|
drillpadcatimgtaskload.py
|
Python
|
agpl-3.0
| 1,281
| 0.003123
|
#! /usr/bin/env python
#
|
-*- coding: utf-8 -*-
import os
import sys
import os.path
import createTasks
import csv
import json
SERVER = "http://crowdcrafting.org"
URL_ROOT = "https://s3-us-west-2.amazonaws.com/drillpadcat/"
def dictre
|
ader(rows):
rows = iter(rows)
header = rows.next()
for row in rows:
yield dict(zip(header, row))
if len(sys.argv) != 4:
print """Usage: drillpadcatimgtaskload.py frackfinder 00000000-0000-0000-0000-000000000000 somefile.csv
Replace the zeroes with your access key
The csv should contain at least the following columns:
latitude,longitude,path
Path is the path relative to teh root of the drillpadcat s3 bucket.
"""
else:
app, accesskey, csvfile = sys.argv[1:]
with open(csvfile) as f:
for row in dictreader(csv.reader(f)):
row['url'] = URL_ROOT + row.pop("path")
class options:
api_url = SERVER
api_key = accesskey
create_app = False
update_template = False
update_tasks = False
app_root = app
create_task = json.dumps(row)
n_answers = 30
app_name = None
verbose = False
createTasks.CreateTasks(options)
|
Abjad/abjad
|
abjad/_update.py
|
Python
|
gpl-3.0
| 15,897
| 0.000944
|
"""
Updates start offsets, stop offsets and indicators everywhere in score.
.. note:: This is probably the most important part of Abjad to optimize. Use the
profiler to figure out how many unnecessary updates are happening. Then reimplement.
As a hint, the update manager implements a weird version of the "observer pattern."
It may make sense to revisit a textbook example of the observer pattern and review
the implementation of the update manager.
"""
from . import duration as _duration
from . import indicators as _indicators
from . import iterate as iterate_
from . import math as _math
from . import obgc as _obgc
from . import parentage as _parentage
from . import score as _score
from . import sequence as _sequence
from . import timespan as _timespan
def _get_after_grace_leaf_offsets(leaf):
container = leaf._parent
main_leaf = container._main_leaf
main_leaf_stop_offset = main_leaf._stop_offset
assert main_leaf_stop_offset is not None
displacement = -leaf._get_duration()
sibling = leaf._sibling(1)
while sibling is not None and sibling._parent is container:
displacement -= sibling._get_duration()
sibling = sibling._sibling(1)
if leaf._parent is not None and leaf._parent._main_leaf is not None:
main_leaf = leaf._parent._main_leaf
sibling = main_leaf._sibling(1)
if (
sibling is not None
and hasattr(sibling, "_before_grace_container")
and sibling._before_grace_container is not None
):
before_grace_container = sibling._before_grace_container
duration = before_grace_container._get_duration()
displacement -= duration
start_offset = _duration.Offset(main_leaf_stop_offset, displacement=displacement)
displacement += leaf._get_duration()
stop_offset = _duration.Offset(main_leaf_stop_offset, displacement=displacement)
return start_offset, stop_offset
def _get_before_grace_leaf_offsets(leaf):
container = leaf._parent
main_leaf = container._main_leaf
main_leaf_start_offset = main_leaf._start_offset
assert main_leaf_start_offset is not None
displacement = -leaf._get_duration()
sibling = leaf._sibling(1)
while sibling is not None and sibling._parent is container:
displacement -= sibling._get_duration()
sibling = sibling._sibling(1)
start_offset = _duration.Offset(main_leaf_start_offset, displacement=displacement)
displacement += leaf._get_duration()
stop_offset = _duration.Offset(main_leaf_start_offset, displacement=displacement)
return start_offset, stop_offset
def _get_measure_start_offsets(component):
wrappers = []
prototype = _indicators.TimeSignature
root = _parentage.Parentage(component).root
for component_ in _iterate_entire_score(root):
wrappers_ = component_._get_indicators(prototype, unwrap=False)
wrappers.extend(wrappers_)
pairs = []
for wrapper in wrappers:
component = wrapper.component
start_offset = component._get_timespan().start_offset
time_signature = wrapper.indicator
pair = start_offset, time_signature
pairs.append(pair)
offset_zero = _duration.Offset(0)
default_time_signature = _indicators.TimeSignature((4, 4))
default_pair = (offset_zero, default_time_signature)
if pairs and not pairs[0] == offset_zero:
pairs.insert(0, default_pair)
elif not pairs:
pairs = [default_pair]
pairs.sort(key=lambda x: x[0])
score_stop_offset = root._get_timespan().stop_offset
dummy_last_pair = (score_stop_offset, None)
pairs.append(dummy_last_pair)
measure_start_offsets = []
at_first_measure = True
for current_pair, next_pair in _sequence.nwise(pairs):
current_start_offset, current_time_signature = current_pair
next_start_offset, next_time_signature = next_pair
measure_start_offset = current_start_offset
while measure_start_offset < next_start_offset:
measure_start_offsets.append(measure_start_offset)
partial = current_time_signature.partial
if at_first_measure and partial is not None:
measure_start_offset += partial
measure_start_offsets.append(measure_start_offset)
at_first_measure = False
measure_start_offset += current_time_signature.duration
return measure_start_offsets
def _get_on_beat_grace_leaf_offsets(leaf):
container = leaf._parent
anchor_leaf = container._get_on_beat_anchor_leaf()
anchor_leaf_start_offset = anchor_leaf._start_offset
assert anchor_leaf_start_offset is not None
anchor_leaf_start_offset = _duration.Offset(anchor_leaf_start_offset.pair)
start_displacement = _duration.Duration(0)
sibling = leaf._sibling(-1)
while sibling is not None and sibling._parent is container:
start_displacement += sibling._get_duration()
sibling = sibling._sibling(-1)
stop_displacement = start_displacement + leaf._get_duration()
if start_displacement == 0:
start_displacement = None
start_offset = _duration.Offset(
anchor_leaf_start_offset.pair, displacement=start_displacement
)
stop_offset = _duration.Offset(
anchor_leaf_start_offset.pair, displacement=stop_displacement
)
return start_offset, stop_offset
def _get_score_tree_state_flags(parentage):
offsets_are_current = True
indicators_are_current = True
offsets_in_seconds_are_current = True
for component in parentage:
if offsets_are_current:
if not component._offsets_are_current:
offsets_are_current = False
if indicators_are_current:
if not component._indicators_are_current:
indicators_are_current = False
if offsets_in_seconds_are_current:
if not component._offsets_in_seconds_are_current:
offsets_in_seconds_are_current = False
return (
offsets_are_current,
indicators_are_current,
offsets_in_seconds_are_current,
)
def _iterate_entire_score(root):
"""
NOTE: RETURNS GRACE NOTES LAST (AND OUT-OF-ORDER).
"""
components = list(iterate_.components(root, grace=False))
graces = iterate_.components(root, grace=True)
components.extend(graces)
return components
def _make_metronome_mark_map(root):
pairs = []
all_stop_offsets = set()
for component in _iterate_entire_score(root):
indicators = component._get_indicators(_indicators.MetronomeMark)
if len(indicators) == 1:
metronome_mark = indicators[0]
if not metronome_mark.is_imprecise:
pair = (component._start_offset, metronome_mark)
pairs.append(pair)
if component._stop_offset is not None:
all_stop_offsets.add(component._stop_offset)
pairs.sort(key=lambda _: _[0])
if not pairs:
return
if pairs[0][0] != 0:
return
score_stop_offset = max(all_stop_offsets)
timespans = _timespan.TimespanList()
clocktime_start_offset = _duration.Offset(0)
for left, right in _sequence.nwise(pairs, wrapped=True):
metronome_mark = left[-1]
start_offset = left[0]
stop_offset = right[0]
# last timespan
if stop_offset == 0:
stop_offset = score_stop_offset
duration = stop_offset - start_offset
multiplie
|
r = _duration.Multiplier(60, metronome_mark.units_per_minute)
clocktime_duration = duration / metronome_mark.reference_duration
clocktime_duration *= multiplier
timespan = _timespan.Timespan(
start_offset=start_offset,
stop_offset=stop_offset,
annotation=(clocktime_start_offset,
|
clocktime_duration),
)
timespans.append(timespan)
clocktime_start_offset += clocktime_duration
return timespans
# TODO: reimplement with some type of bisection
def _to_measure_number(component, measure_start_offsets):
component_start_offset = component._get_timespan().start_offset
displacement = component
|
marcus-nystrom/share-gaze
|
sync_clocks/test_clock_resolution.py
|
Python
|
mit
| 1,930
| 0.020207
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 09 13:04:53 2015
* If TimerTool.exe is running, kill the process.
* If input parameter is given, start TimerTool and set clock resolution
Starts TimerTool.exe and sets the clock resolution to argv[0] ms
Ex: python set_clock_resolution 0.5
@author: marcus
"""
import time, datetime
from socket import gethostname, gethostbyname
import os
import numpy as np
def main():
my_path = os.path.join('C:',os.sep,'Share','sync_clocks')
os.chdir(my_path)
# Initial timestamps
t1 = time.clock()
t2 = time.time()
t3 = datetime.datetime.now()
td1 = []
td2 = []
td3 = []
for i in xrange(100):
td1.append(time.clock()-t1)
td2.append(time.time() -t2)
|
td3.append((datetime.d
|
atetime.now()-t3).total_seconds())
time.sleep(0.001)
# Create text file and write header
t = datetime.datetime.now()
ip = gethostbyname(gethostname()).split('.')[-1]
f_name = '_'.join([ip,'test_clock_res',str(t.year),str(t.month),str(t.day),
str(t.hour),str(t.minute),str(t.second)])
f = open(f_name+'.txt','w')
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %
('mean_clock','median_clock','sd_clock',
'mean_time','median_time','sd_time',
'mean_datetime','median_datetime','sd_datetime',))
# Write results to text file
f.write('%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n' %
(np.mean(np.diff(td1))*1000, np.median(np.diff(td1))*1000,np.std(np.diff(td1))*1000,
np.mean(np.diff(td2))*1000, np.median(np.diff(td2))*1000,np.std(np.diff(td2))*1000,
np.mean(np.diff(td3))*1000, np.median(np.diff(td3))*1000,np.std(np.diff(td3))*1000))
f.close()
if __name__ == "__main__":
main()
|
plotly/python-api
|
packages/python/plotly/plotly/validators/surface/_surfacecolor.py
|
Python
|
mit
| 459
| 0.002179
|
import _plotly_utils.basevalidators
class Surfaceco
|
lorValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="surfacecolor", parent_name="surface", **kwargs):
super(SurfacecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "da
|
ta"),
**kwargs
)
|
rosalindfdt/huzzahbadge
|
huzzah/register/setup.py
|
Python
|
artistic-2.0
| 1,740
| 0.000575
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from s
|
etuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
lo
|
ng_description = f.read()
setup(
name='adafruit-circuitpython-register',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='CircuitPython data descriptor classes to represent hardware registers on I2C and SPI devices.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/adafruit/Adafruit_CircuitPython_Register',
# Author details
author='Adafruit Industries',
author_email='support@adafruit.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Hardware',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='adafruit register micropython circuitpython',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['adafruit_register'],
)
|
TathagataChakraborti/resource-conflicts
|
PLANROB-2015/seq-sat-lama/Python-2.5.2/Doc/tools/toc2bkm.py
|
Python
|
mit
| 4,520
| 0.000442
|
#! /usr/bin/env python
"""Convert a LaTeX .toc file to some PDFTeX magic to create that neat outline.
The output file has an extension of '.bkm' instead of '.out', since hyperref
already uses that extension.
"""
import getopt
import os
import re
import string
import sys
# Ench item in an entry is a tuple of:
#
# Section #, Title String, Page #, List of Sub-entries
#
# The return value of parse_toc() is such a tuple.
cline_re = r"""^
\\contentsline\ \{([a-z]*)} # type of section in $1
\{(?:\\numberline\ \{([0-9.A-Z]+)})? # section number
(.*)} # title string
\{(\d+)}$""" # page number
cline_rx = re.compile(cline_re, re.VERBOSE)
OUTER_TO_INNER = -1
_transition_map = {
('chapter', 'section'): OUTER_TO_INNER,
('section', 'subsection'): OUTER_TO_INNER,
('subsection', 'subsubsection'): OUTER_TO_INNER,
('subsubsection', 'subsection'): 1,
('subsection', 'section'): 1,
('section', 'chapter'): 1,
('subsection', 'chapter'): 2,
('subsubsection', 'section'): 2,
('subsubsection', 'chapter'): 3,
}
INCLUDED_LEVELS = ("chapter", "section", "subsection", "subsubsection")
class BadSectionNesting(Exception):
"""Raised for unsupported section level transitions."""
def __init__(self, level, newsection, path, lineno):
self.level = level
self.newsection = newsection
self.path = path
self.lineno = lineno
def __str__(self):
return ("illegal transition from %s to %s at %s (line %s)"
% (self.level, self.newsection, self.path, self.lineno))
def parse_toc(fp, bigpart=None):
toc = top = []
stack = [toc]
level = bigpart or 'chapter'
lineno = 0
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
m = cline_rx.match(line)
if m:
stype, snum, title, pageno = m.group(1, 2, 3, 4)
title = clean_title(title)
entry = (stype, snum, title, int(pageno), [])
if stype == level:
toc.append(entry)
else:
if stype not in INCLUDED_LEVELS:
# we don't want paragraphs & subparagraphs
continue
try:
direction = _transition_map[(level, stype)]
except KeyError:
raise BadSectionNesting(level, stype, fp.name, lineno)
if direction == OUTER
|
_TO_INNER:
toc = toc[-1][-1]
stack.insert(0, toc)
toc.append(entry)
else:
for i in range(direction):
|
del stack[0]
toc = stack[0]
toc.append(entry)
level = stype
else:
sys.stderr.write("l.%s: " + line)
return top
hackscore_rx = re.compile(r"\\hackscore\s*{[^}]*}")
raisebox_rx = re.compile(r"\\raisebox\s*{[^}]*}")
title_rx = re.compile(r"\\([a-zA-Z])+\s+")
title_trans = string.maketrans("", "")
def clean_title(title):
title = raisebox_rx.sub("", title)
title = hackscore_rx.sub(r"\\_", title)
pos = 0
while 1:
m = title_rx.search(title, pos)
if m:
start = m.start()
if title[start:start+15] != "\\textunderscore":
title = title[:start] + title[m.end():]
pos = start + 1
else:
break
title = title.translate(title_trans, "{}")
return title
def write_toc(toc, fp):
for entry in toc:
write_toc_entry(entry, fp, 0)
def write_toc_entry(entry, fp, layer):
stype, snum, title, pageno, toc = entry
s = "\\pdfoutline goto name{page%03d}" % pageno
if toc:
s = "%s count -%d" % (s, len(toc))
if snum:
title = "%s %s" % (snum, title)
s = "%s {%s}\n" % (s, title)
fp.write(s)
for entry in toc:
write_toc_entry(entry, fp, layer + 1)
def process(ifn, ofn, bigpart=None):
toc = parse_toc(open(ifn), bigpart)
write_toc(toc, open(ofn, "w"))
def main():
bigpart = None
opts, args = getopt.getopt(sys.argv[1:], "c:")
if opts:
bigpart = opts[0][1]
if not args:
usage()
sys.exit(2)
for filename in args:
base, ext = os.path.splitext(filename)
ext = ext or ".toc"
process(base + ext, base + ".bkm", bigpart)
if __name__ == "__main__":
main()
|
enthought/etsproxy
|
enthought/mayavi/core/api.py
|
Python
|
bsd-3-clause
| 84
| 0
|
# proxy modu
|
le
from __future__ import absolu
|
te_import
from mayavi.core.api import *
|
janeen666/mi-instrument
|
mi/core/test/test_persistent_store.py
|
Python
|
bsd-2-clause
| 11,196
| 0.003483
|
#!/usr/bin/env python
"""
@package mi.core.test.test_persistent_store
@file <git-workspace>/ooi/edex/com.raytheon.uf.ooi.plugin.instrumentagent/utility/edex_static/base/ooi/instruments/mi-instrument/mi/core/test/test_persistent_store.py
@author Johnathon Rusk
@brief Unit tests for PersistentS
|
toreDict module
"""
# Note: Execute via, "nosetests -a UNIT -v mi/core/test/test_persistent_store.py"
__author__ = 'Johnathon Rusk'
__license__ = 'Apache 2.0'
from nose.plugins.attrib import attr
from mi.core.unit_test import MiUnitTest
import sys
from mi.core.persistent_store import PersistentStoreDict
@attr('UNIT', group='mi')
class TestPersistentStoreDict(MiUnitTest):
def setUp(self):
self.UNICODE_KEY = "UNICODE_KEY" # Test 'str' type key
self.UNICODE
|
_VALUES = [u"this is a unicode string", u"this is another unicode string"]
self.INT_KEY = u"INT_KEY"
self.INT_VALUES = [1234, 5678]
self.LONG_KEY = "LONG_KEY" # Test 'str' type key
self.LONG_VALUES = [sys.maxint + 1, sys.maxint + 2]
self.FLOAT_KEY = u"FLOAT_KEY"
self.FLOAT_VALUES = [56.78, 12.34]
self.BOOL_KEY = "BOOL_KEY" # Test 'str' type key
self.BOOL_VALUES = [True, False]
self.DICT_KEY = u"DICT_KEY"
self.DICT_VALUES = [{u"KEY_1":1, u"KEY_2":2, u"KEY_3":3}, {u"KEY_4":4, u"KEY_5":5, u"KEY_6":6}]
self.LIST_KEY = "LIST_KEY" # Test 'str' type key
self.LIST_VALUES = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0]]
self.persistentStoreDict = PersistentStoreDict("unit_test", "GI01SUMO-00001")
def tearDown(self):
self.persistentStoreDict.clear() # NOTE: This technically assumes the delete functionality works.
def helper_get(self, key, expectedValue, expectedValueType):
self.assertIn(type(key), [str, unicode])
value = self.persistentStoreDict[key]
self.assertIs(type(value), expectedValueType)
self.assertEqual(value, expectedValue)
def helper_set(self, key, value, valueType, shouldAddKey):
self.assertIn(type(key), [str, unicode])
self.assertIs(type(value), valueType)
self.assertIs(type(shouldAddKey), bool)
initialKeyCount = len(self.persistentStoreDict.keys())
self.persistentStoreDict[key] = value
self.assertEqual(len(self.persistentStoreDict.keys()), (initialKeyCount + 1) if shouldAddKey else initialKeyCount)
def helper_del(self, key):
self.assertIn(type(key), [str, unicode])
initialKeyCount = len(self.persistentStoreDict.keys())
del self.persistentStoreDict[key]
self.assertEqual(len(self.persistentStoreDict.keys()), initialKeyCount - 1)
def test_createRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
def test_createRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
def test_createRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
def test_createRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
def test_createRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
def test_createRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
def test_createRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
def test_createRecords_fail_badKeyType(self):
key = 0
value = u"this will fail"
self.assertNotIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_createRecords_fail_badItemType(self):
key = u"this will fail"
value = 2+3j
self.assertIn(type(key), [str, unicode])
self.assertNotIn(type(value), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_createRecords_fail_badItemType_nested(self):
key = u"this will fail"
value = {u"KEY_1":[1, 2, 3], u"KEY_2":[1+2j, 3+4j, 5+6j]}
self.assertIn(type(key), [str, unicode])
self.assertIn(type(value), [unicode, int, long, float, bool, dict, list])
self.assertNotIn(type(value[u'KEY_2'][0]), [unicode, int, long, float, bool, dict, list])
with self.assertRaises(TypeError) as contextManager:
self.persistentStoreDict[key] = value
self.assertEqual(contextManager.exception.args[0], "Value must be of type: 'unicode', 'int', 'long', 'float', 'bool', 'dict', or 'list'")
def test_getRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
def test_getRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
def test_getRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
def test_getRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLOAT_VALUES[0], float, True)
self.helper_get(self.FLOAT_KEY, self.FLOAT_VALUES[0], float)
def test_getRecords_success_bool(self):
self.helper_set(self.BOOL_KEY, self.BOOL_VALUES[0], bool, True)
self.helper_get(self.BOOL_KEY, self.BOOL_VALUES[0], bool)
def test_getRecords_success_dict(self):
self.helper_set(self.DICT_KEY, self.DICT_VALUES[0], dict, True)
self.helper_get(self.DICT_KEY, self.DICT_VALUES[0], dict)
def test_getRecords_success_list(self):
self.helper_set(self.LIST_KEY, self.LIST_VALUES[0], list, True)
self.helper_get(self.LIST_KEY, self.LIST_VALUES[0], list)
def test_getRecords_fail_badKeyType(self):
key = 0
self.assertNotIn(type(key), [str, unicode])
with self.assertRaises(TypeError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "Key must be of type 'str' or 'unicode'.")
def test_getRecords_fail_keyNotFound(self):
key = u"this will fail"
self.assertIn(type(key), [str, unicode])
with self.assertRaises(KeyError) as contextManager:
value = self.persistentStoreDict[key]
self.assertEqual(contextManager.exception.args[0], "No item found with key: '{0}'".format(key))
def test_updateRecords_success_unicode(self):
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode, True)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[0], unicode)
self.helper_set(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode, False)
self.helper_get(self.UNICODE_KEY, self.UNICODE_VALUES[1], unicode)
def test_updateRecords_success_int(self):
self.helper_set(self.INT_KEY, self.INT_VALUES[0], int, True)
self.helper_get(self.INT_KEY, self.INT_VALUES[0], int)
self.helper_set(self.INT_KEY, self.INT_VALUES[1], int, False)
self.helper_get(self.INT_KEY, self.INT_VALUES[1], int)
def test_updateRecords_success_long(self):
self.helper_set(self.LONG_KEY, self.LONG_VALUES[0], long, True)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[0], long)
self.helper_set(self.LONG_KEY, self.LONG_VALUES[1], long, False)
self.helper_get(self.LONG_KEY, self.LONG_VALUES[1], long)
def test_updateRecords_success_float(self):
self.helper_set(self.FLOAT_KEY, self.FLO
|
wcmitchell/insights-core
|
insights/parsers/tests/test_hostname.py
|
Python
|
apache-2.0
| 685
| 0
|
from insights.parsers.hostname import Hostname
from insights.tests import context_wrap
HOSTNAME = "rhel7.example.com"
HOSTNAME_SHORT = "rhel7"
def test_hostname():
data = Hostname(context_wrap(HOSTNAME))
assert data.fqdn == "rhel7.example.com"
assert data.hostname == "rhel7"
assert data.domain == "example.com"
assert "{0}".format(data) == "<hostname: rhel7, domain: example.com>"
data = Hostname(context_wrap(HOSTNAME_SHORT))
assert data.fqdn == "rhel7"
assert data.hostname == "rhel7"
|
assert data.domain == ""
data = Hostname(context_wrap(""))
assert data.fqdn is None
assert data.hostname is None
assert data.domain is
|
None
|
xdevelsistemas/taiga-back-community
|
tests/integration/resources_permissions/test_application_tokens_resources.py
|
Python
|
agpl-3.0
| 4,691
| 0.000853
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2016 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from tests import factories as f
from tests.utils import helper_test_http_method, disconnect_signals, reconnect_signals
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.token = f.ApplicationTokenFactory(state="random-state")
m.registered_user_with_token = m.token.user
return m
def test_application_tokens_create(client, data):
url = reverse('application-tokens-list')
users = [
None,
data.registered_user,
data.registered_user_with_token
]
data = json.dumps({"application": data.token.application.id})
results = helper_test_http_method(client, "post", url, data, users)
assert results == [405, 405, 405]
def test_applications_retrieve_token(client, data):
url=reverse('applications-token', kwargs={"pk": data.token.application.id})
users = [
None,
data.registered_user,
data.registered_user_with_token
]
results = helper_test_http_method(client, "get", url, None, users)
assert results == [401, 200, 200]
def test_application_tokens_retrieve(client, data):
url = reverse('application-tokens-detail', kwargs={
|
"pk
|
": data.token.id})
users = [
None,
data.registered_user,
data.registered_user_with_token
]
results = helper_test_http_method(client, "get", url, None, users)
assert results == [401, 404, 200]
def test_application_tokens_authorize(client, data):
url=reverse('application-tokens-authorize')
users = [
None,
data.registered_user,
data.registered_user_with_token
]
data = json.dumps({
"application": data.token.application.id,
"state": "random-state-123123",
})
results = helper_test_http_method(client, "post", url, data, users)
assert results == [401, 200, 200]
def test_application_tokens_validate(client, data):
url=reverse('application-tokens-validate')
users = [
None,
data.registered_user,
data.registered_user_with_token
]
data = json.dumps({
"application": data.token.application.id,
"auth_code": data.token.auth_code,
"state": data.token.state
})
results = helper_test_http_method(client, "post", url, data, users)
assert results == [200, 200, 200]
def test_application_tokens_update(client, data):
url = reverse('application-tokens-detail', kwargs={"pk": data.token.id})
users = [
None,
data.registered_user,
data.registered_user_with_token
]
patch_data = json.dumps({"application": data.token.application.id})
results = helper_test_http_method(client, "patch", url, patch_data, users)
assert results == [405, 405, 405]
def test_application_tokens_delete(client, data):
url = reverse('application-tokens-detail', kwargs={"pk": data.token.id})
users = [
None,
data.registered_user,
data.registered_user_with_token
]
results = helper_test_http_method(client, "delete", url, None, users)
assert results == [401, 403, 204]
def test_application_tokens_list(client, data):
url = reverse('application-tokens-list')
users = [
None,
data.registered_user,
data.registered_user_with_token
]
results = helper_test_http_method(client, "get", url, None, users)
assert results == [401, 200, 200]
|
kafana/ubik
|
lib/ubik/fab/tgz.py
|
Python
|
gpl-3.0
| 1,827
| 0.004926
|
# Copyright 2012 Lee Verberne <lee@blarg.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import logging
import os, os.path
from fabric.api import local, prompt
NAME = 'tgz'
log = lo
|
gging.getLogger(NAME)
def _get_config(configfile='package.ini'):
config = ConfigParser.SafeConfigParser()
config.read(configfile)
return config
def untar(version, config, env):
'Downloads a file URI and untars to builddir'
if not version:
version = ''
if not config:
config = _get_config()
destdir = env.builddir
try:
if config.get(NAME, 'destination') == 'root':
destdir = env.rootdir
destdir = os.path.join(destdir, config.get(NAME, 'prefix').lstrip('/'))
except ConfigParser.NoOptionError:
pass
sourceurl = config.get(NAME, 'source', False,
{'version': version.split('-',1)[0],})
log.debug('Using source URL of %s' % sourceurl)
# For now just use system tools for this
if not os.path.exists('src.tgz'):
local("curl -f -o src.tgz " + sourceurl, capture=False)
if not os.path.exists(destdir):
os.makedirs(destdir)
local("tar -C %s -xvf src.tgz" % destdir, capture=False)
|
calfonso/python-citrination-client
|
citrination_client/search/pif/query/chemical/composition_query.py
|
Python
|
apache-2.0
| 4,113
| 0.004619
|
from citrination_client.search.pif.query.chemical.chemical_field_operation import ChemicalFieldOperation
from citrination_client.search.pif.query.core.base_object_query import BaseObjectQuery
from citrination_client.search.pif.query.core.field_operation import FieldOperation
class CompositionQuery(BaseObjectQuery):
"""
Class to query against a PIF Composition object.
"""
def __init__(self, element=None, actual_weight_percent=None, actual_atomic_percent=None,
ideal_weight_percent=None, ideal_atomic_percent=None, logic=None, tags=None,
length=None, offset=None):
"""
Constructor.
:param element: One or more :class:`ChemicalFieldOperation` operations against the element field.
:param actual_weight_percent: One or more :class:`FieldOperation` operations against the actual
weight percent field.
:param actual_atomic_percent: One or more :class:`FieldOperation` operations against the actual
atomic percent field.
:param ideal_weight_percent: One or more :class:`FieldOperation` operations against the ideal
weight percent field.
:param ideal_atomic_percent: One or more :class:`FieldOperation` operations against the ideal
atomic percent field.
:param logic: Logic for this filter. Must be equal to one of "MUST", "MUST_NOT", "SHOULD", or "OPTIONAL".
:param tags: One or more :class:`FieldOperation` operations against the tags field.
:param length: One or more :class:`FieldOperation` operations against the length field.
:param offset: One or more :class:`FieldOperation` operations against the offset field.
"""
super(CompositionQuery, self).__init__(logic=logic, tags=tags, length=length, offset=offset)
self._element = None
self.element = element
self._actual_weight_percent = None
self.actual_weight_percent = actual_weight_percent
self._actual_atomic_percent = None
self.actual_atomic_percent = actual_atomic_percent
self._ideal_weight_percent = None
self.ideal_weight_percent = ideal_weight_percent
self._ideal_atomic_percent = None
self.ideal_atomic_percent = ideal_atomic_percent
@property
def element(self):
return self._element
@element.setter
def element(self, element):
self._element = self._get_object(ChemicalFieldOperation, element)
@element.deleter
def element(self):
self._element = None
@property
def actual_weight_percent(self):
return self._actual_weight_percent
@actual_weight_percent
|
.setter
def actual_weight_percent(self, actual_weight_percent):
self._actual_weight_percent = self._get_object(FieldOperation, actual_weight_percent)
@actual_weight_percent.deleter
def actual_weight_percent(self):
self._actual_weight_percent = None
@property
def actual_atomic_percent(self):
return self._actual_atomic_percent
@actual_atomic_percent.setter
def actual_atomic_percent(self, actual_atomic_percent):
self._actua
|
l_atomic_percent = self._get_object(FieldOperation, actual_atomic_percent)
@actual_atomic_percent.deleter
def actual_atomic_percent(self):
self._actual_atomic_percent = None
@property
def ideal_weight_percent(self):
return self._ideal_weight_percent
@ideal_weight_percent.setter
def ideal_weight_percent(self, ideal_weight_percent):
self._ideal_weight_percent = self._get_object(FieldOperation, ideal_weight_percent)
@ideal_weight_percent.deleter
def ideal_weight_percent(self):
self._ideal_weight_percent = None
@property
def ideal_atomic_percent(self):
return self._ideal_atomic_percent
@ideal_atomic_percent.setter
def ideal_atomic_percent(self, ideal_atomic_percent):
self._ideal_atomic_percent = self._get_object(FieldOperation, ideal_atomic_percent)
@ideal_atomic_percent.deleter
def ideal_atomic_percent(self):
self._ideal_atomic_percent = None
|
dilosung/ad-manage
|
config/settings/common.py
|
Python
|
mit
| 13,364
| 0.002469
|
# -*- coding: utf-8 -*-
"""
Django settings for ad-manage project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
from oscar.defaults import *
from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps
import os
import sys
import environ
from path import Path as path
ROOT_DIR = environ.Path(__file__) - 3 # (ad_manage/config/settings/common.py - 3 = ad_manage/)
PROJECT_ROOT = path(__file__).abspath().dirname().dirname().dirname() # Root del proyecto en str
APPS_DIR = ROOT_DIR.path('ad_manage')
APPS_DIR_STR = PROJECT_ROOT / 'ad_manage' / 'djangoapps' #Directorio de aplicaciones django
sys.path.append(APPS_DIR_STR) #Definicion de carpeta para encontrar las apps
env = environ.Env()
environ.Env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'rest_framework',
'paypal',
'django_extensions',
'django_messages',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
'jsonify',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'compressor',
'widget_tweaks',
'social.apps.django_app.default',
'django_countries', # paises para db
'geopy',
'corsheaders',
)
OSCAR_APPS = tuple(get_core_apps(['catalogue', 'search', 'partner','customer']))
# Apps specific for this project go here.
LOCAL_APPS = (
# Your stuff: custom apps go here
'searchapi',
'course',
'document_manager',
'third_party_auth',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS + OSCAR_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'social.apps.django_app.middleware.SocialAuthExceptionMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""dilosung""", 'santosa@dilosung.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres:///admanage'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
LANGUAGES = (
('en', ('English')),
('es', ('Spanish')),
)
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-mx'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
OSCAR_MAIN_TEMPLATE_DIR
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.check
|
out',
'oscar.apps.customer.notifications.context_processors.notification
|
s',
'oscar.core.context_processors.metadata',
# django-social-auth
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/sta
|
gencer/mwparserfromhell
|
mwparserfromhell/nodes/html_entity.py
|
Python
|
mit
| 6,771
| 0.000148
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 Ben Kurtovic <ben.kurtovic@gmail.
|
com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Softwar
|
e, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from . import Node
from ..compat import htmlentities, py3k, str
__all__ = ["HTMLEntity"]
class HTMLEntity(Node):
"""Represents an HTML entity, like `` ``, either named or unnamed."""
def __init__(self, value, named=None, hexadecimal=False, hex_char="x"):
super(HTMLEntity, self).__init__()
self._value = value
if named is None: # Try to guess whether or not the entity is named
try:
int(value)
self._named = False
self._hexadecimal = False
except ValueError:
try:
int(value, 16)
self._named = False
self._hexadecimal = True
except ValueError:
self._named = True
self._hexadecimal = False
else:
self._named = named
self._hexadecimal = hexadecimal
self._hex_char = hex_char
def __unicode__(self):
if self.named:
return "&{0};".format(self.value)
if self.hexadecimal:
return "&#{0}{1};".format(self.hex_char, self.value)
return "&#{0};".format(self.value)
def __strip__(self, **kwargs):
if kwargs.get("normalize"):
return self.normalize()
return self
if not py3k:
@staticmethod
def _unichr(value):
"""Implement builtin unichr() with support for non-BMP code points.
On wide Python builds, this functions like the normal unichr(). On
narrow builds, this returns the value's encoded surrogate pair.
"""
try:
return unichr(value)
except ValueError:
# Test whether we're on the wide or narrow Python build. Check
# the length of a non-BMP code point
# (U+1F64A, SPEAK-NO-EVIL MONKEY):
if len("\U0001F64A") == 1: # pragma: no cover
raise
# Ensure this is within the range we can encode:
if value > 0x10FFFF:
raise ValueError("unichr() arg not in range(0x110000)")
code = value - 0x10000
if value < 0: # Invalid code point
raise
lead = 0xD800 + (code >> 10)
trail = 0xDC00 + (code % (1 << 10))
return unichr(lead) + unichr(trail)
@property
def value(self):
"""The string value of the HTML entity."""
return self._value
@property
def named(self):
"""Whether the entity is a string name for a codepoint or an integer.
For example, ``Σ``, ``Σ``, and ``Σ`` refer to the same
character, but only the first is "named", while the others are integer
representations of the codepoint.
"""
return self._named
@property
def hexadecimal(self):
"""If unnamed, this is whether the value is hexadecimal or decimal."""
return self._hexadecimal
@property
def hex_char(self):
"""If the value is hexadecimal, this is the letter denoting that.
For example, the hex_char of ``"ሴ"`` is ``"x"``, whereas the
hex_char of ``"ሴ"`` is ``"X"``. Lowercase and uppercase ``x``
are the only values supported.
"""
return self._hex_char
@value.setter
def value(self, newval):
newval = str(newval)
try:
int(newval)
except ValueError:
try:
int(newval, 16)
except ValueError:
if newval not in htmlentities.entitydefs:
raise ValueError("entity value is not a valid name")
self._named = True
self._hexadecimal = False
else:
if int(newval, 16) < 0 or int(newval, 16) > 0x10FFFF:
raise ValueError("entity value is not in range(0x110000)")
self._named = False
self._hexadecimal = True
else:
test = int(newval, 16 if self.hexadecimal else 10)
if test < 0 or test > 0x10FFFF:
raise ValueError("entity value is not in range(0x110000)")
self._named = False
self._value = newval
@named.setter
def named(self, newval):
newval = bool(newval)
if newval and self.value not in htmlentities.entitydefs:
raise ValueError("entity value is not a valid name")
if not newval:
try:
int(self.value, 16)
except ValueError:
err = "current entity value is not a valid Unicode codepoint"
raise ValueError(err)
self._named = newval
@hexadecimal.setter
def hexadecimal(self, newval):
newval = bool(newval)
if newval and self.named:
raise ValueError("a named entity cannot be hexadecimal")
self._hexadecimal = newval
@hex_char.setter
def hex_char(self, newval):
newval = str(newval)
if newval not in ("x", "X"):
raise ValueError(newval)
self._hex_char = newval
def normalize(self):
"""Return the unicode character represented by the HTML entity."""
chrfunc = chr if py3k else HTMLEntity._unichr
if self.named:
return chrfunc(htmlentities.name2codepoint[self.value])
if self.hexadecimal:
return chrfunc(int(self.value, 16))
return chrfunc(int(self.value))
|
mhaddy/FeedGoo
|
man_feedgoo.py
|
Python
|
gpl-3.0
| 2,896
| 0.020718
|
#!/usr/bin/python
# encoding: utf-8
# license: MIT
# Raspberry Pi-pow
|
ered cat feeder
# Ryan Matthews
# mhaddy@gmail.com
#import schedule
import time
import datetime
import logging
import RPi.GPIO as GPIO
from twython import Twython
import configvars as cv
from random impo
|
rt randint
import pygame
import pygame.camera
from pygame.locals import *
import requests
# cronitor
requests.get(
'https://cronitor.link/{}/run'.format(cv.cronitor_hash),
timeout=10
)
logging.basicConfig(filename=cv.log_dir+cv.log_filename,format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
# we're using randint() here to prevent Twitter deleting tweets it feels are duplicates
# Twython
APP_KEY = cv.APPKEY
APP_SECRET = cv.APPSECRET
OAUTH_TOKEN = cv.ACCESSTOKEN
OAUTH_TOKEN_SECRET = cv.ACCESSTOKENSECRET
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
logging.info('----------------------------')
logging.info('Initiated FeedGoo routine for {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S %Z%z")))
GPIO.setmode(GPIO.BCM)
GPIO.setup(cv.servo_pin, GPIO.OUT)
GPIO.setup(cv.buzz_pin, GPIO.OUT)
GPIO.setup(cv.butt_switch_pin, GPIO.IN)
GPIO.setup(cv.butt_led_pin, GPIO.OUT)
GPIO.output(cv.buzz_pin, False)
GPIO.output(cv.butt_led_pin, False)
# Functions that make the world, err, feeder wheels go 'round
# Rotate feeder wheel clockwise
def servo_cw():
servo = GPIO.PWM(cv.servo_pin, 50)
try:
servo.start(cv.rotate_time_cw)
time.sleep(cv.sleep_time_servo)
servo.stop()
except KeyboardInterrupt:
logging.info("CTRL+C pressed, servo operation stopped")
except:
logging.info("Servo operation interrupted")
finally:
GPIO.cleanup()
# Call the appropriate servo_XXX function
def feed_goo():
#not yet implemented
#GPIO.output(butt_led_pin, False)
for x in range(0,10):
GPIO.output(cv.buzz_pin, True)
time.sleep(cv.sleep_time_buzz)
GPIO.output(cv.buzz_pin, False)
#not yet implemented
#GPIO.output(butt_led_pin, True)
logging.debug("Servo rotate CW start")
servo_cw()
logging.debug("Servo rotate CW finish")
# TODO: Hook this into IFTTT
def manual_feed():
feed_goo()
logging.info("Goo has been manually fed!")
# take a picture 2 seconds after servo stops
pygame.init()
pygame.camera.init()
cam = pygame.camera.Camera('/dev/video0',(640,480))
cam.start()
image = cam.get_image()
pygame.image.save(image,"{}/image.jpg".format(cv.img_path))
photo = open("{}/image.jpg".format(cv.img_path),"rb")
response = twitter.upload_media(media=photo)
twitter.update_status(status="Goo has been fed! /{}".format(randint(0,10000)), media_ids=[response['media_id']])
# call the feeding routine
# scheduled via cron (schedule was too unreliable)
manual_feed()
# cronitor
requests.get(
'https://cronitor.link/{}/complete'.format(cv.cronitor_hash),
timeout=10
)
|
rwl/muntjac
|
muntjac/test/server/component/test_tab_sheet.py
|
Python
|
apache-2.0
| 3,921
| 0.001275
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
from unittest import TestCase
from muntjac.ui.label import Label
from muntjac.ui.tab_sheet import TabSheet
class Tes
|
tTabSheet(TestCase):
def testAddExistingComponent(self):
c = Label('abc')
tabSheet = TabSheet()
|
tabSheet.addComponent(c)
tabSheet.addComponent(c)
itr = tabSheet.getComponentIterator()
self.assertEquals(c, itr.next())
self.assertRaises(StopIteration, itr.next)
self.assertNotEquals(tabSheet.getTab(c), None)
def testGetComponentFromTab(self):
c = Label('abc')
tabSheet = TabSheet()
tab = tabSheet.addTab(c)
self.assertEquals(c, tab.getComponent())
def testAddTabWithComponentOnly(self):
tabSheet = TabSheet()
tab1 = tabSheet.addTab(Label('aaa'))
tab2 = tabSheet.addTab(Label('bbb'))
tab3 = tabSheet.addTab(Label('ccc'))
# Check right order of tabs
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab2))
self.assertEquals(2, tabSheet.getTabPosition(tab3))
# Calling addTab with existing component does not move tab
tabSheet.addTab(tab1.getComponent())
# Check right order of tabs
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab2))
self.assertEquals(2, tabSheet.getTabPosition(tab3))
def testAddTabWithComponentAndIndex(self):
tabSheet = TabSheet()
tab1 = tabSheet.addTab(Label('aaa'))
tab2 = tabSheet.addTab(Label('bbb'))
tab3 = tabSheet.addTab(Label('ccc'))
tab4 = tabSheet.addTab(Label('ddd'), 1)
tab5 = tabSheet.addTab(Label('eee'), 3)
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab4))
self.assertEquals(2, tabSheet.getTabPosition(tab2))
self.assertEquals(3, tabSheet.getTabPosition(tab5))
self.assertEquals(4, tabSheet.getTabPosition(tab3))
# Calling addTab with existing component does not move tab
tabSheet.addTab(tab1.getComponent(), 3)
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab4))
self.assertEquals(2, tabSheet.getTabPosition(tab2))
self.assertEquals(3, tabSheet.getTabPosition(tab5))
self.assertEquals(4, tabSheet.getTabPosition(tab3))
def testAddTabWithAllParameters(self):
tabSheet = TabSheet()
tab1 = tabSheet.addTab(Label('aaa'))
tab2 = tabSheet.addTab(Label('bbb'))
tab3 = tabSheet.addTab(Label('ccc'))
tab4 = tabSheet.addTab(Label('ddd'), 'ddd', None, 1)
tab5 = tabSheet.addTab(Label('eee'), 'eee', None, 3)
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab4))
self.assertEquals(2, tabSheet.getTabPosition(tab2))
self.assertEquals(3, tabSheet.getTabPosition(tab5))
self.assertEquals(4, tabSheet.getTabPosition(tab3))
# Calling addTab with existing component does not move tab
tabSheet.addTab(tab1.getComponent(), 'xxx', None, 3)
self.assertEquals(0, tabSheet.getTabPosition(tab1))
self.assertEquals(1, tabSheet.getTabPosition(tab4))
self.assertEquals(2, tabSheet.getTabPosition(tab2))
self.assertEquals(3, tabSheet.getTabPosition(tab5))
self.assertEquals(4, tabSheet.getTabPosition(tab3))
def testGetTabByPosition(self):
tabSheet = TabSheet()
tab1 = tabSheet.addTab(Label('aaa'))
tab2 = tabSheet.addTab(Label('bbb'))
tab3 = tabSheet.addTab(Label('ccc'))
self.assertEquals(tab1, tabSheet.getTab(0))
self.assertEquals(tab2, tabSheet.getTab(1))
self.assertEquals(tab3, tabSheet.getTab(2))
|
twotymz/lucy
|
hue/lights.py
|
Python
|
mit
| 705
| 0.025532
|
import logging
import requests
HUE_IP = '192.168.86.32'
HUE_USERNAME = '7KcxItfntdF0DuWV9t0GPMeToEBlvHTgqWNZqxu6'
logger = logging.getLogger('hue')
def getLights():
url = 'http://{0}/api/{1}/lights'.format(HUE_IP, HUE_USERNAME)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for all lights')
return
if r.status
|
_code == 200:
data = r.json()
return data
def getStatus(id):
url = 'http://{0}/api/{1}/lights/{2}'.format(HUE_IP, HUE_USERNAME, id)
try:
r = requests.get(url)
except:
logger.error('Failed getting status for light {0}'.for
|
mat (id))
return
if r.status_code == 200:
data = r.json()
return data
|
jamslevy/gsoc
|
app/soc/logic/models/timeline.py
|
Python
|
apache-2.0
| 1,237
| 0.003234
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timeline (Model) query functions.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from soc.logic.models import base
from soc.logic.models impo
|
rt sponsor as sponsor_logic
import soc.models.timeline
class Logic(base.Logic):
"""Logic methods for the Timeline model.
"""
def __init__(self, model=soc.models.timeline.Timeline,
base_model=None, scope_logic=sponsor_logic):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model=model, base_model=base_model,
scope
|
_logic=scope_logic)
logic = Logic()
|
tkaitchuck/nupic
|
external/linux64/lib/python2.6/site-packages/matplotlib/backend_bases.py
|
Python
|
gpl-3.0
| 69,740
| 0.003656
|
"""
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
"""
from __future__ import division
import os, warnings, time
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
from matplotlib import rcParams
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
def open_group(self, s):
"""
Open a grouping element with label *s*. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
tpath = trans.transform_path(path)
for vertices, codes in tpath.iter_segments():
if len(vertices):
x,y = vertices[-2:]
self.draw_path(gc, marker_path,
m
|
arker_trans + transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
"""
Draws a collection of paths, s
|
electing drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before
being applied.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
draw_path. Some backends may want to override this in order
to render each set of path data only once, and then reference
that path multiple times with the different offsets, colors,
styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
path, transform = path_id
transform = transforms.Affine2D(transform.get_matrix()).translate(xo, yo)
self.draw_path(gc, path, transform, rgbFace)
def draw_quad_mesh(self, master_transform, cliprect, clippath,
clippath_trans, meshWidth, meshHeight, coordinates,
offsets, offsetTrans, facecolors, antialiased,
showedges):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if showedges:
edgecolors = np.array([[0.0, 0.0, 0.0, 1.0]], np.float_)
linewidths = np.array([1.0], np.float_)
else:
edgecolors = facecolors
linewidths = np.array([0.0], np.float_)
return self.draw_path_collection(
master_transform, cliprect, clippath, clippath_trans,
paths, [], offsets, offsetTrans, facecolors, edgecolors,
linewidths, [], [antialiased], [None])
def _iter_collection_raw_paths(self, master_transform, paths, all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, path_ids, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the
|
ShaolongHu/Nitrate
|
tcms/testplans/forms.py
|
Python
|
gpl-2.0
| 20,537
| 0
|
# -*- coding: utf-8 -*-
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from odf.odf2xhtml import ODF2XHTML, load
from tcms.core.contrib.xml2dict.xml2dict import XML2Dict
from tcms.core.forms.fields import UserField, StripURLField
from tinymce.widgets import TinyMCE
from tcms.management.models import Component, Product, Version, TCMSEnvGroup, \
Priority, TestTag
from tcms.testcases.models import TestCaseStatus
from models import TestPlan, TestPlanType
# ===========Plan Fields==============
class PlanFileField(forms.FileField):
default_error_messages = {
'invalid_file_type': 'The file you uploaded is not a correct, '
'Html/Plain text/ODT file.',
'unexcept_odf_error': 'Unable to analyse the file or the file you '
'upload is not Open Document.',
}
def clean(self, data, initial=None):
f = super(PlanFileField, self).clean(data, initial)
if f is None:
return None
elif not data and initial:
return initial
# Detemine the file type, raise error if the file type is not correct
if not (data.content_type == 'text/html'
or data.content_type == 'text/plain'
or data.content_type == 'application/octet-stream'
or data.content_type ==
'application/vnd.oasis.opendocument.text'):
raise forms.ValidationError(
self.error_messages['invalid_file_type'])
# Process the ODF file
if data.content_type == 'application/octet-stream' \
or data.content_type == \
'application/vnd.oasis.opendocument.text':
generatecss = True
embedable = True
odhandler = ODF2XHTML(generatecss, embedable)
try:
doc = load(data)
plan_text = odhandler.odf2xhtml(doc)
except Exception:
raise forms.ValidationError(
self.error_messages['unexcept_odf_error'])
return plan_text
# We need to get a file object. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
plan_text = data.temporary_file_path()
elif hasattr(data, 'read'):
plan_text = data.read()
else:
plan_text = data['content']
return plan_text
class CasePlanXMLField(forms.FileField):
"""
Custom field for the XML file.
Use xml2dict to anasisly the file upload.
Based on ImageField built-in Django source code.
"""
default_error_messages = {
'invalid_file': 'The file you uploaded is not a correct XML file.',
'interpret_error': 'The file you uploaded unable to interpret.',
'root_element_is_needed': 'Root element named testopia is need, '
'please use the xml exported by TCMS or '
'testopia.',
'test_case_element_is_needed': 'At least one test case is required '
'in the XML file, plese export the '
'plan with cases.',
'xml_version_is_incorrect': 'XML version is incorrect, please use '
'the xml exported by TCMS or testopia 3.',
'element_could_not_found': 'The element \'%s\' value \'%s\' could '
'not found in database.',
'element_is_required': 'The element \'%s\' is required in XML.'
}
xml_data = ''
def process_case(self, case):
# Check author
element = 'author'
if case.get(element, {}).get('value'):
try:
author = User.objects.get(email=case[element]['value'])
author_id = author.id
except User.DoesNotExist:
raise forms.ValidationError(
self.error_messages['element_could_not_found'] % (
element, case[element]['value']))
else:
raise forms.ValidationError(
self.error_messages['element_is_required'] % element)
# Check default tester
element = 'defaulttester'
if case.get(element, {}).get('value'):
try:
default_tester = User.objects.get(email=case[element]['value'])
default_tester_id = default_tester.id
except User.DoesNotExist:
raise forms.ValidationError(
self.error_messages['element_could_not_found'] % (
element, case[element]['value']))
else:
default_tester_id = None
# Check priority
element = 'priority'
if case.get(element, {}).get('value'):
try:
priority = Priority.objects.get(value=case[element]['value'])
priority_id = priority.id
except Priority.DoesNotExist:
raise forms.ValidationError(
self.error_messages['element_could_not_found'] % (
element, case[element]['value']))
else:
raise forms.ValidationError(
self.error_messages['element_is_required'] % element)
# Check automated status
element = 'automated'
if case.get(element, {}).get('value'):
is_automated = case[element][
'value'] == 'Automatic' and True or False
else:
is_automated = False
# Check status
element = 'status'
if case.get(element, {}).get('value'):
try:
case_status = TestCaseStatus.objects.get(
name=case[element]['value'])
case_status_id = case_status.id
except TestCaseStatus.DoesNotExist:
raise forms.ValidationError(
self.error_messages['element_could_not_found'] % (
element, case[element]['value']))
else:
raise forms.ValidationError(
self.error_messages['element_is_required'] % element)
# Check category
# *** Ugly code here ***
# There is a bug in the XML file, the category is related to product.
# But unfortunate it did not defined product in the XML file.
|
# So we have to define the category_name at the moment then get the
# product from the plan.
# If we did not found the c
|
ategory of the product we will create one.
element = 'categoryname'
if case.get(element, {}).get('value'):
category_name = case[element]['value']
else:
raise forms.ValidationError(
self.error_messages['element_is_required'] % element)
# Check or create the tag
element = 'tag'
if case.get(element, {}):
tags = []
if isinstance(case[element], dict):
tag, create = TestTag.objects.get_or_create(
name=case[element]['value'])
tags.append(tag)
if isinstance(case[element], list):
for tag_name in case[element]:
tag, create = TestTag.objects.get_or_create(
name=tag_name['value'])
tags.append(tag)
else:
tags = None
new_case = {
'summary': case.get('summary', {}).get('value', ''),
'author_id': author_id,
'author': author,
'default_tester_id': default_tester_id,
'priority_id': priority_id,
'is_automated': is_automated,
'case_status_id': case_status_id,
'category_name': category_name,
'notes': case.get('notes', {}).get('value', ''),
'action': case.get('action', {}).get('value', ''),
'effect': case.get('expectedresults', {}).get('value', ''),
'setup': case.get('setup', {}).get('value', ''),
'breakdown': case.get('breakdown'
|
tv42/downburst
|
downburst/image.py
|
Python
|
mit
| 5,246
| 0.000191
|
import logging
import requests
import tarfile
from lxml import etree
from . import discover
from . import template
log = logging.getLogger(__name__)
URLPREFIX = 'https://cloud-images.ubuntu.com/precise/current/'
PREFIXES = dict(
server='{release}-server-cloudimg-amd64.',
desktop='{release}-desktop-cloudimg-amd64.',
)
SUFFIX = '.img'
def list_clou
|
d_images(pool, release, flavor):
"""
List all Ubuntu 12.04 Cloud image in the libvirt pool.
Return the keys.
"""
PREFIX = PREFIXES[flavor].format(release=release)
for name in pool.listVolumes():
log.debug('Considering image: %s', name)
if not name.startswith(PREFIX
|
):
continue
if not name.endswith(SUFFIX):
continue
if len(name) <= len(PREFIX) + len(SUFFIX):
# no serial number in the middle
continue
# found one!
log.debug('Saw image: %s', name)
yield name
def find_cloud_image(pool, release, flavor):
"""
Find an Ubuntu 12.04 Cloud image in the libvirt pool.
Return the name.
"""
names = list_cloud_images(pool, release=release, flavor=flavor)
# converting into a list because max([]) raises ValueError, and we
# really don't want to confuse that with exceptions from inside
# the generator
names = list(names)
if not names:
log.debug('No cloud images found.')
return None
# the build serial is zero-padded, hence alphabetically sortable;
# max is the latest image
return max(names)
def upload_volume(vol, fp):
"""
Upload a volume into a libvirt pool.
"""
stream = vol.connect().newStream(flags=0)
vol.upload(stream=stream, offset=0, length=0, flags=0)
def handler(stream, nbytes, _):
data = fp.read(nbytes)
return data
stream.sendAll(handler, None)
stream.finish()
def make_volume(
pool,
fp,
release,
flavor,
serial,
suffix,
):
# volumes have no atomic completion marker; this will forever be
# racy!
name = '{prefix}{serial}{suffix}'.format(
prefix=PREFIXES[flavor].format(release=release),
serial=serial,
suffix=suffix,
)
log.debug('Creating libvirt volume %s ...', name)
volxml = template.volume(
name=name,
# TODO we really should feed in a capacity, but we don't know
# what it should be.. libvirt pool refresh figures it out, but
# that's probably expensive
# capacity=2*1024*1024,
)
# TODO this fails if the image exists already, which means
# there's no clean way to continue after errors, currently
vol = pool.createXML(etree.tostring(volxml), flags=0)
upload_volume(
vol=vol,
fp=fp,
)
return vol
def ensure_cloud_image(conn, release, flavor):
"""
Ensure that the Ubuntu 12.04 Cloud image is in the libvirt pool.
Returns the volume.
"""
log.debug('Opening libvirt pool...')
pool = conn.storagePoolLookupByName('default')
log.debug('Listing cloud image in libvirt...')
name = find_cloud_image(pool=pool, release=release, flavor=flavor)
if name is not None:
# all done
log.debug('Already have cloud image: %s', name)
vol = pool.storageVolLookupByName(name)
return vol
log.debug('Discovering cloud images...')
image = discover.get(release=release, flavor=flavor)
log.debug('Will fetch serial number: %s', image['serial'])
url = image['url']
log.info('Downloading image: %s', url)
r = requests.get(url, stream=True)
t = tarfile.open(fileobj=r.raw, mode='r|*', bufsize=1024*1024)
# reference to the main volume of this vm template
vol = None
for ti in t:
if not ti.isfile():
continue
if ti.name.startswith("README"):
continue
if ti.name.endswith("-root.tar.gz"):
continue
if ti.name.endswith("-loader"):
continue
if "-vmlinuz-" in ti.name:
continue
if "-initrd-" in ti.name:
continue
if ti.name.endswith("-root.tar.gz"):
continue
f = t.extractfile(ti)
if ti.name.endswith("-disk1.img"):
vol = make_volume(
pool=pool,
fp=f,
release=release,
flavor=flavor,
serial=image['serial'],
suffix="-disk1.img",
)
elif ti.name.endswith(".img"):
vol = make_volume(
pool=pool,
fp=f,
release=release,
flavor=flavor,
serial=image['serial'],
suffix=".img",
)
elif ti.name.endswith("-floppy"):
make_volume(
pool=pool,
fp=f,
release=release,
flavor=flavor,
serial=image['serial'],
suffix="-floppy.img",
)
else:
log.warn("Unknown file in cloud-image tarball: %s", ti.name)
continue
# TODO only here to autodetect capacity
pool.refresh(flags=0)
return vol
|
yast/yast-python-bindings
|
examples/Table-sorting.py
|
Python
|
gpl-2.0
| 1,203
| 0.004988
|
# encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class TableSortingClient:
def main(self):
UI.OpenDialog(
VBox(
Label("Library"),
MinSize(
30,
10,
T
|
able(
Header("Book Title", "Shelf"),
[
Item(Id(1), "3 Trees", " -6"),
Item(Id(2), "missing", None),
Item(Id(3), "just another book", " 8a"),
Item(Id(4), "Here comes Fred", 12),
Item(Id(5), "Zoo", 25),
Item(Id(6), "
|
Lions", "balbla"),
Item(Id(7), "Elephants ", "8b"),
Item(Id(8), "wild animals", "a7"),
Item(Id(9), "Weather forecast", "15yxc"),
Item(Id(10), "my first Book", 1),
Item(Id(11), "this is yours", 95),
Item(Id(12), "Terra X", " 34 sdf"),
Item(Id(13), "Programming", "dfsdf34"),
Item(Id(14), "More programming", 0)
]
)
),
PushButton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
TableSortingClient().main()
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/lib2to3/fixes/fix_unicode.py
|
Python
|
gpl-3.0
| 1,256
| 0.002389
|
r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
|
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
|
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
|
kristinriebe/django-prov_vo
|
prov_vo/apps.py
|
Python
|
apache-2.0
| 129
| 0
|
from __future__ import unico
|
de_literals
from
|
django.apps import AppConfig
class ProvVoConfig(AppConfig):
name = 'prov_vo'
|
cXhristian/django-wiki
|
src/wiki/plugins/images/markdown_extensions.py
|
Python
|
gpl-3.0
| 4,717
| 0.000424
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import markdown
from django.template.loader import render_to_string
from wiki.plugins.images import models, settings
IMAGE_RE = re.compile(
r'.*(\[image\:(?P<id>[0-9]+)(\s+align\:(?P<align>right|left))?(\s+size\:(?P<size>default|small|medium|large|orig))?\s*\]).*',
re.IGNORECASE)
class ImageExtension(markdown.Extension):
""" Images plugin markdown extension for django-wiki. """
def extendMarkdown(self, md, md_globals):
""" Insert ImagePreprocessor before ReferencePreprocessor. """
md.preprocessors.add('dw-images', ImagePreprocessor(md), '>html_block')
md.postprocessors.add('dw-images-cleanup', ImagePostprocessor(md), '>raw_html')
class ImagePreprocessor(markdown.preprocessors.Preprocessor):
"""
django-wiki image preprocessor
Parse text for [image:id align:left|right|center] referenc
|
es.
For instanc
|
e:
[image:id align:left|right|center]
This is the caption text maybe with [a link](...)
So: Remember that the caption text is fully valid markdown!
"""
def run(self, lines): # NOQA
new_text = []
previous_line = ""
line_index = None
previous_line_was_image = False
image = None
image_id = None
alignment = None
size = settings.THUMBNAIL_SIZES['default']
caption_lines = []
for line in lines:
m = IMAGE_RE.match(line)
if m:
previous_line_was_image = True
image_id = m.group('id').strip()
alignment = m.group('align')
if m.group('size'):
size = settings.THUMBNAIL_SIZES[m.group('size')]
try:
image = models.Image.objects.get(
article=self.markdown.article,
id=image_id,
current_revision__deleted=False)
except models.Image.DoesNotExist:
pass
line_index = line.find(m.group(1))
line = line.replace(m.group(1), "")
previous_line = line
caption_lines = []
elif previous_line_was_image:
if line.startswith(" "):
caption_lines.append(line[4:])
line = None
else:
caption_placeholder = "{{{IMAGECAPTION}}}"
width = size.split("x")[0] if size else None
html = render_to_string(
"wiki/plugins/images/render.html",
context={
'image': image,
'caption': caption_placeholder,
'align': alignment,
'size': size,
'width': width
})
html_before, html_after = html.split(caption_placeholder)
placeholder_before = self.markdown.htmlStash.store(
html_before,
safe=True)
placeholder_after = self.markdown.htmlStash.store(
html_after,
safe=True)
new_line = placeholder_before + "\n".join(
caption_lines) + placeholder_after + "\n"
previous_line_was_image = False
if previous_line is not "":
if previous_line[line_index:] is not "":
new_line = new_line[0:-1]
new_text[-1] = (previous_line[0:line_index] +
new_line +
previous_line[line_index:] +
"\n" +
line)
line = None
else:
line = new_line + line
if line is not None:
new_text.append(line)
return new_text
class ImagePostprocessor(markdown.postprocessors.Postprocessor):
def run(self, text):
"""
This cleans up after Markdown's well-intended placing of image tags
inside <p> elements. The problem is that Markdown should put
<p> tags around images as they are inline elements. However, because
we wrap them in <figure>, we don't actually want it and have to
remove it again after.
"""
text = text.replace("<p><figure", "<figure")
text = text.replace("</figure>\n</p>", "</figure>")
return text
|
coxmediagroup/googleads-python-lib
|
examples/dfa/v1_20/get_advertisers.py
|
Python
|
apache-2.0
| 2,314
| 0.005618
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file
|
except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR C
|
ONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetches all advertisers in a DFA account.
This example displays advertiser name, ID and spotlight configuration ID for
the given search criteria. Results are limited to first 10 records.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: advertiser.getAdvertisers
"""
__author__ = 'Joseph DiLallo'
import googleads.dfa
def main(client):
# Initialize appropriate service.
advertiser_service = client.GetService(
'advertiser', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Create advertiser search criteria structure.
page_number = 1
advertiser_search_criteria = {
'pageSize': '100',
'pageNumber': str(page_number)
}
while True:
# Get advertiser record set.
results = advertiser_service.getAdvertisers(advertiser_search_criteria)
# Display advertiser names, IDs and spotlight configuration IDs.
if results['records']:
for advertiser in results['records']:
print ('Advertiser with name \'%s\', ID \'%s\', and spotlight '
'configuration id \'%s\' was found.'
% (advertiser['name'], advertiser['id'], advertiser['spotId']))
page_number += 1
advertiser_search_criteria['pageNumber'] = str(page_number)
if page_number > int(results['totalNumberOfPages']):
break
print 'Number of results found: %s' % results['totalNumberOfRecords']
if __name__ == '__main__':
# Initialize client object.
dfa_client = googleads.dfa.DfaClient.LoadFromStorage()
main(dfa_client)
|
google/uncertainty-baselines
|
baselines/mnist/utils.py
|
Python
|
apache-2.0
| 5,866
| 0.008012
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for (Fashion) MNIST."""
import numpy as np
import scipy
def one_hot(a, num_classes):
return np.squeeze(np.eye(num_classes)[a.reshape(-1)])
def brier_score(y, p):
"""Compute the Brier score.
Brier Score: see
https://www.stat.washington.edu/raftery/Research/PDF/Gneiting2007jasa.pdf,
page 363, Example 1
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p: numpy array, size (?, num_classes)
containing the output predicted probabilities
Returns:
bs: Brier score.
"""
return np.mean(np.power(p - y, 2))
def calibration(y, p_mean, num_bins=10):
"""Compute the calibration.
References:
https://arxiv.org/abs/1706.04599
https://arxiv.org/abs/1807.00263
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p_mean: numpy array, size (?, num_classes)
containing the mean output predicted probabilities
num_bins: number of bins
Returns:
ece: Expected Calibration Error
mce: Maximum Calibration Error
"""
# Compute for every test sample x, the predicted class.
class_pred = np.argmax(p_mean, axis=1)
# and the confidence (probability) associated with it.
conf = np.max(p_mean, axis=1)
# Convert y from one-hot encoding to the number of the class
y = np.argmax(y, axis=1)
# Storage
acc_tab = np.zeros(num_bins) # empirical (true) confidence
mean_conf = np.zeros(num_bins) # predicted confidence
nb_items_bin = np.zeros(num_bins) # number of items
|
in the bins
tau_tab = np.linspace(0, 1, num_bins+1) # confidence bins
for i in np.arange(num_bins): # iterate over the bins
# select the items where the predicted max probability fall
|
s in the bin
# [tau_tab[i], tau_tab[i + 1)]
sec = (tau_tab[i + 1] > conf) & (conf >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# select the predicted classes, and the true classes
class_pred_sec, y_sec = class_pred[sec], y[sec]
# average of the predicted max probabilities
mean_conf[i] = np.mean(conf[sec]) if nb_items_bin[i] > 0 else np.nan
# compute the empirical confidence
acc_tab[i] = np.mean(
class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan
# Cleaning
mean_conf = mean_conf[nb_items_bin > 0]
acc_tab = acc_tab[nb_items_bin > 0]
nb_items_bin = nb_items_bin[nb_items_bin > 0]
# Expected Calibration Error
ece = np.average(
np.absolute(mean_conf - acc_tab),
weights=nb_items_bin.astype(np.float) / np.sum(nb_items_bin))
# Maximum Calibration Error
mce = np.max(np.absolute(mean_conf - acc_tab))
return ece, mce
def ensemble_metrics(x,
y,
model,
log_likelihood_fn,
n_samples=1,
weight_files=None):
"""Evaluate metrics of an ensemble.
Args:
x: numpy array of inputs
y: numpy array of labels
model: tf.keras.Model.
log_likelihood_fn: keras function of log likelihood. For classification
tasks, log_likelihood_fn(...)[1] should return the logits
n_samples: number of Monte Carlo samples to draw per ensemble member (each
weight file).
weight_files: to draw samples from multiple weight sets, specify a list of
weight files to load. These files must have been generated through
keras's model.save_weights(...).
Returns:
metrics_dict: dictionary containing the metrics
"""
if weight_files is None:
ensemble_logprobs = [log_likelihood_fn([x, y])[0] for _ in range(n_samples)]
metric_values = [model.evaluate(x, y, verbose=0)
for _ in range(n_samples)]
ensemble_logits = [log_likelihood_fn([x, y])[1] for _ in range(n_samples)]
else:
ensemble_logprobs = []
metric_values = []
ensemble_logits = []
for filename in weight_files:
model.load_weights(filename)
ensemble_logprobs.extend([log_likelihood_fn([x, y])[0]
for _ in range(n_samples)])
ensemble_logits.extend([log_likelihood_fn([x, y])[1]
for _ in range(n_samples)])
metric_values.extend([model.evaluate(x, y, verbose=0)
for _ in range(n_samples)])
metric_values = np.mean(np.array(metric_values), axis=0)
results = {}
for m, name in zip(metric_values, model.metrics_names):
results[name] = m
ensemble_logprobs = np.array(ensemble_logprobs)
probabilistic_log_likelihood = np.mean(
scipy.special.logsumexp(
np.sum(ensemble_logprobs, axis=2)
if len(ensemble_logprobs.shape) > 2 else ensemble_logprobs,
b=1. / ensemble_logprobs.shape[0],
axis=0),
axis=0)
results['probabilistic_log_likelihood'] = probabilistic_log_likelihood
ensemble_logits = np.array(ensemble_logits)
probs = np.mean(scipy.special.softmax(ensemble_logits, axis=2), axis=0)
class_pred = np.argmax(probs, axis=1)
probabilistic_accuracy = np.mean(np.equal(y, class_pred))
results['probabilistic_accuracy'] = probabilistic_accuracy
results['ece'], results['mce'] = calibration(
one_hot(y, probs.shape[1]), probs)
results['brier_score'] = brier_score(one_hot(y, probs.shape[1]), probs)
return results
|
wxgeo/geophar
|
wxgeometrie/sympy/plotting/experimental_lambdify.py
|
Python
|
gpl-2.0
| 26,133
| 0.001378
|
""" rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debugging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python
|
cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying
|
to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They received an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses
|
sh4r3m4n/twitter-wikiquote-bot
|
bot.py
|
Python
|
gpl-3.0
| 1,879
| 0.009052
|
#-*- coding: utf- -*-
import os
import sys
import random
import time
import json
import wikiquote
import tuitear
from threading import Thread
CONGIG_JSON = 'bots.json'
|
# Variable local, para modificar el intervalo real cambiar la configuración
INTERVALO = 1
stop = False
def start_bot(bot):
""" Hilo que inicia el bot pasado como argumento (diccionario) """
citas = []
for pagina in bot['paginas']:
print 'Cargando', pagina
quotes = wikiquote.get_quotes(pagina.encode('utf8'))
quotes = [(q, pagina) for q in quotes]
|
citas += quotes
tiempo = 0
while not stop:
if tiempo >= bot['intervalo']:
quote, pagina = random.choice(citas)
tweet = bot['format'].encode('utf8') % dict(pagina = \
pagina.encode('utf8'), frase = quote.encode('utf8'))
if len(tweet) > 138:
#print 'tweet largo'
continue
print "%s: %s" % (bot['name'], tweet.decode('utf8'))
tuitear.tuitear(tweet, bot['consumer_key'], bot['consumer_secret'],
bot['access_token'], bot['access_token_secret'])
tiempo = 0
tiempo += INTERVALO
time.sleep(INTERVALO)
print 'Thread para', bot['name'], 'detenido'
def main():
path = os.path.dirname(__file__)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = os.path.join(path, CONGIG_JSON)
print 'Cargando bots en', filename
j = json.load(file(filename))
for bot in j['bots']:
if bot.get('disabled'):
continue
thread = Thread(target = start_bot, args=[bot])
thread.daemon = True
thread.start()
print 'Thread para', bot['name'], 'iniciado'
while True:
# Para que no terminen los hilos
pass
if __name__ == '__main__':
main()
|
Khilo84/PyQt4
|
examples/draganddrop/delayedencoding/delayedencoding.py
|
Python
|
gpl-2.0
| 4,777
| 0.007327
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Hans-Peter Jansen <hpj@urpla.net>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QString', 2)
from PyQt4 import QtCore, QtGui, QtSvg
import delayedencoding_rc
class MimeData(QtCore.QMimeData):
dataRequested = QtCore.pyqtSignal(str)
def formats(self):
formats = QtCore.QMimeData.formats(self)
formats.append('image/png')
return formats
def retrieveData(self, mimeType, qvtype):
self.dataRequested.emit(mimeType)
return QtCore.QMimeData.retrieveData(self, mimeType, qvtype)
class SourceWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(SourceWidget, self).__init__(parent)
self.mimeData = None
imageFile = QtCore.QFile(':/images/example.svg')
imageFile.open(QtCore.QIODevice.ReadOnly)
self.imageData = imageFile.readAll()
imageFile.close()
imageArea = QtGui.QScrollArea()
self.imageLabel = QtSvg.QSvgWidget()
self.imageLabel.renderer().load(self.imageData)
imageArea.setWidget(self.imageLabel)
instructTopLabel = QtGui.QLabel("This is an SVG drawing:")
instructBottomLabel = QtGui.QLabel("Drag the icon to copy the drawing as a PNG file
|
:")
dragIcon = QtGui.QPushButton("Export")
dragIcon.setIcon(QtGui.QIcon(':/images/drag.png'))
dragIcon.pressed.connect(self.startDrag)
layout = QtGui.QGridLayout()
layout.addWidget(instructTopLabel, 0, 0, 1, 2)
|
layout.addWidget(imageArea, 1, 0, 2, 2)
layout.addWidget(instructBottomLabel, 3, 0)
layout.addWidget(dragIcon, 3, 1)
self.setLayout(layout)
self.setWindowTitle("Delayed Encoding")
def createData(self, mimeType):
if mimeType != 'image/png':
return
image = QtGui.QImage(self.imageLabel.size(), QtGui.QImage.Format_RGB32)
painter = QtGui.QPainter()
painter.begin(image)
self.imageLabel.renderer().render(painter)
painter.end()
data = QtCore.QByteArray()
buffer = QtCore.QBuffer(data)
buffer.open(QtCore.QIODevice.WriteOnly)
image.save(buffer, 'PNG')
buffer.close()
self.mimeData.setData('image/png', data)
def startDrag(self):
self.mimeData = MimeData()
self.mimeData.dataRequested.connect(self.createData, QtCore.Qt.DirectConnection)
drag = QtGui.QDrag(self)
drag.setMimeData(self.mimeData)
drag.setPixmap(QtGui.QPixmap(':/images/drag.png'))
drag.exec_(QtCore.Qt.CopyAction)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = SourceWidget()
window.show()
sys.exit(app.exec_())
|
eandersson/amqp-storm
|
amqpstorm/tests/functional/management/basic_tests.py
|
Python
|
mit
| 2,492
| 0
|
from amqpstorm.management import ManagementApi
from amqpstorm.message import Message
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.utility import TestFunctionalFramework
from amqpstorm.tests.utility import setup
class ApiBasicFunctionalTests(TestFunctionalFramework):
@setup(queue=True)
def test_api_basic_publish(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
try:
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
finally:
api.queue.delete(self.queue_name)
@setup(queue=True)
def test_api_basic_get_message(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_
|
name, requeue=False)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].bo
|
dy, self.message)
# Make sure the message wasn't re-queued.
self.assertFalse(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_requeue(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=True)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], Message)
self.assertEqual(result[0].body, self.message)
# Make sure the message was re-queued.
self.assertTrue(api.basic.get(self.queue_name, requeue=False))
@setup(queue=True)
def test_api_basic_get_message_to_dict(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
api.queue.declare(self.queue_name)
self.assertEqual(api.basic.publish(self.message, self.queue_name),
{'routed': True})
result = api.basic.get(self.queue_name, requeue=False, to_dict=True)
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], dict)
self.assertEqual(result[0]['payload'], self.message)
|
tensorflow/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py
|
Python
|
apache-2.0
| 2,195
| 0.003645
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `ShuffleAndRepeatFusion` optimization."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testShuffleAndRepeatFusion(self):
expected = "ShuffleAndRepeat"
dataset = dataset_ops.Dataset.range(10).apply(
testing.assert_next([expected])).shuffle(10).repeat(2)
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
|
options.experimental_optimization.shuffle_and_repeat_fusion = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for
|
_ in range(2):
results = []
for _ in range(10):
results.append(self.evaluate(get_next()))
self.assertAllEqual([x for x in range(10)], sorted(results))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow/datasets
|
tensorflow_datasets/summarization/gigaword.py
|
Python
|
apache-2.0
| 4,196
| 0.003337
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gigaword summarization dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{graff2003english,
title={English gigaword},
author={Graff, David and Kong, Junbo and Chen, Ke and Maeda, Kazuaki},
journal={Linguistic Data Consortium, Philadelphia},
volume={4},
number={1},
pages={34},
year={2003}
}
@article{Rush_2015,
title={A Neural Attention Model for Abstractive Sentence Summarization},
url={http://dx.doi.org/10.18653/v1/D15-1044},
DOI={10.18653/v1/d15-1044},
journal={Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing},
publisher={Association for Computational Linguistics},
author={Rush, Alexander M. and Chopra, Sumit and Weston, Jason},
year={2015}
}
"""
_DESCRIPTION = """
Headline-generation on a corpus of article pairs from Gigaword consisting of
around 4 million articles. Use the 'org_data' provided by
https://github.com/microsoft/unilm/ which is identical to
https://github.com/harvardnlp/sent-summary but with better fo
|
rmat.
There are two features:
- document: article.
- summary: headline.
"""
_URL = "https://drive.google.com/uc?export=download&id=1USoQ8lJgN8kAWnUnRrupMGrPMLlDVqlV"
_DOCUMENT = "document"
_SUMMARY = "summary"
class Gigaword(tfds.core
|
.GeneratorBasedBuilder):
"""Gigaword summarization dataset."""
# 1.0.0 contains a bug that uses validation data as training data.
# 1.1.0 Update to the correct train, validation and test data.
# 1.2.0 Replace <unk> with <UNK> in train/val to be consistent with test.
VERSION = tfds.core.Version("1.2.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_DOCUMENT: tfds.features.Text(),
_SUMMARY: tfds.features.Text()
}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://github.com/harvardnlp/sent-summary",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_path = dl_manager.download_and_extract(_URL)
pattern = os.path.join(dl_path, "org_data", "%s.%s.txt")
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"src_path": pattern % ("train", "src"),
"tgt_path": pattern % ("train", "tgt"),
"replace_unk": True,
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"src_path": pattern % ("dev", "src"),
"tgt_path": pattern % ("dev", "tgt"),
"replace_unk": True,
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"src_path": pattern % ("test", "src"),
"tgt_path": pattern % ("test", "tgt"),
"replace_unk": False,
},
),
]
def _generate_examples(self, src_path=None, tgt_path=None, replace_unk=None):
"""Yields examples."""
with tf.io.gfile.GFile(src_path) as f_d, tf.io.gfile.GFile(tgt_path) as f_s:
for i, (doc_text, sum_text) in enumerate(zip(f_d, f_s)):
if replace_unk:
yield i, {
_DOCUMENT: doc_text.strip().replace("<unk>", "UNK"),
_SUMMARY: sum_text.strip().replace("<unk>", "UNK")
}
else:
yield i, {_DOCUMENT: doc_text.strip(), _SUMMARY: sum_text.strip()}
|
networkpadwan/appliedpython
|
week1/parse2.py
|
Python
|
apache-2.0
| 435
| 0.009195
|
#!/usr/bin/env python
from cisc
|
oconfparse import CiscoConfParse
print "We will use this program to parse a cisco config file"
filename = raw_input("Please enter the name of the file that needs to be parsed: ")
#print filename
i
|
nput_file = CiscoConfParse(filename)
crypto_find = input_file.find_objects_w_child(parentspec=r"^crypto map CRYPTO", childspec=r"pfs group2")
#print crypto_find
for item in crypto_find:
print item.text
|
openstack/nomad
|
cyborg/common/exception.py
|
Python
|
apache-2.0
| 10,839
| 0
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cyborg base exception handling.
SHOULD include dedicated exception logging.
"""
fr
|
om oslo_log import log
import six
from six.moves import http_client
from cyborg.common.i18n import _
from cyborg.conf import CONF
LOG = log.getLogger(__name__)
class CyborgException(Exception):
"""Base Cyborg Exception
To correctly use this class, inherit from it and
|
define
a '_msg_fmt' property. That message will get printf'd
with the keyword arguments provided to the constructor.
If you need to access the message from an exception you should use
six.text_type(exc)
"""
_msg_fmt = _("An unknown exception occurred.")
code = http_client.INTERNAL_SERVER_ERROR
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self._msg_fmt % kwargs
except Exception:
# kwargs doesn't match a variable in self._msg_fmt
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.items():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise
else:
# at least get the core self._msg_fmt out if something
# happened
message = self._msg_fmt
super(CyborgException, self).__init__(message)
def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well."""
if not six.PY3:
return unicode(self.args[0]).encode('utf-8')
return self.args[0]
def __unicode__(self):
"""Return a unicode representation of the exception message."""
return unicode(self.args[0])
class AttachHandleAlreadyExists(CyborgException):
_msg_fmt = _("AttachHandle with uuid %(uuid)s already exists.")
class ControlpathIDAlreadyExists(CyborgException):
_msg_fmt = _("ControlpathID with uuid %(uuid)s already exists.")
class ConfigInvalid(CyborgException):
_msg_fmt = _("Invalid configuration file. %(error_msg)s")
class DeviceAlreadyExists(CyborgException):
_msg_fmt = _("Device with uuid %(uuid)s already exists.")
class DeviceProfileAlreadyExists(CyborgException):
_msg_fmt = _("DeviceProfile with uuid %(uuid)s already exists.")
class DeployableAlreadyExists(CyborgException):
_msg_fmt = _("Deployable with uuid %(uuid)s already exists.")
class ExtArqAlreadyExists(CyborgException):
_msg_fmt = _("ExtArq with uuid %(uuid)s already exists.")
class Invalid(CyborgException):
_msg_fmt = _("Invalid parameters.")
code = http_client.BAD_REQUEST
class InvalidIdentity(Invalid):
_msg_fmt = _("Expected a uuid/id but received %(identity)s.")
class InvalidUUID(Invalid):
_msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidJsonType(Invalid):
_msg_fmt = _("%(value)s is not JSON serializable.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
_msg_fmt = _("%(err)s")
class PatchError(Invalid):
_msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
class NotAuthorized(CyborgException):
_msg_fmt = _("Not authorized.")
code = http_client.FORBIDDEN
class HTTPForbidden(NotAuthorized):
_msg_fmt = _("Access was denied to the following resource: %(resource)s")
class NotFound(CyborgException):
_msg_fmt = _("Resource could not be found.")
code = http_client.NOT_FOUND
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class AttachHandleNotFound(NotFound):
_msg_fmt = _("AttachHandle %(uuid)s could not be found.")
class ControlpathIDNotFound(NotFound):
_msg_fmt = _("ControlpathID %(uuid)s could not be found.")
class ConfGroupForServiceTypeNotFound(ServiceNotFound):
msg_fmt = _("No conf group name could be found for service type "
"%(stype)s.")
class DeviceNotFound(NotFound):
_msg_fmt = _("Device %(uuid)s could not be found.")
class DeviceProfileNotFound(NotFound):
_msg_fmt = _("DeviceProfile %(uuid)s could not be found.")
class DeployableNotFound(NotFound):
_msg_fmt = _("Deployable %(uuid)s could not be found.")
class ExtArqNotFound(NotFound):
_msg_fmt = _("ExtArq %(uuid)s could not be found.")
class InvalidDeployType(CyborgException):
_msg_fmt = _("Deployable have an invalid type")
class Conflict(CyborgException):
_msg_fmt = _('Conflict.')
code = http_client.CONFLICT
class DuplicateDeviceName(Conflict):
_msg_fmt = _("A device with name %(name)s already exists.")
class DuplicateDeviceProfileName(Conflict):
_msg_fmt = _("A device_profile with name %(name)s already exists.")
class DuplicateDeployableName(Conflict):
_msg_fmt = _("A deployable with name %(name)s already exists.")
class PlacementEndpointNotFound(NotFound):
message = _("Placement API endpoint not found")
class PlacementResourceProviderNotFound(NotFound):
message = _("Placement resource provider not found %(resource_provider)s.")
class PlacementInventoryNotFound(NotFound):
message = _("Placement inventory not found for resource provider "
"%(resource_provider)s, resource class %(resource_class)s.")
class PlacementInventoryUpdateConflict(Conflict):
message = _("Placement inventory update conflict for resource provider "
"%(resource_provider)s, resource class %(resource_class)s.")
class ObjectActionError(CyborgException):
_msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class AttributeNotFound(NotFound):
_msg_fmt = _("Attribute %(uuid)s could not be found.")
class AttributeInvalid(CyborgException):
_msg_fmt = _("Attribute is invalid")
class AttributeAlreadyExists(CyborgException):
_msg_fmt = _("Attribute with uuid %(uuid)s already exists.")
# An exception with this name is used on both sides of the placement/
# cyborg interaction.
class ResourceProviderInUse(CyborgException):
msg_fmt = _("Resource provider has allocations.")
class ResourceProviderRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get resource provider with UUID %(uuid)s")
class ResourceProviderAggregateRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get aggregates for resource provider with UUID"
" %(uuid)s")
class ResourceProviderTraitRetrievalFailed(CyborgException):
msg_fmt = _("Failed to get traits for resource provider with UUID"
" %(uuid)s")
class ResourceProviderCreationFailed(CyborgException):
msg_fmt = _("Failed to create resource provider %(name)s")
class ResourceProviderDeletionFailed(CyborgException):
msg_fmt = _("Failed to delete resource provider %(uuid)s")
class ResourceProviderUpdateFailed(CyborgException):
msg_fmt = _("Failed to update resource provider via URL %(url)s: "
"%(error)s")
class ResourceProviderNotFound(NotFound):
msg_fmt = _("No such resource provider %(name_or_uuid)s.")
class ResourceProviderSyncFailed(CyborgException):
msg_fmt = _("Failed to synchronize the placement service with resource "
"prov
|
karllessard/tensorflow
|
tensorflow/python/platform/resource_loader.py
|
Python
|
apache-2.0
| 4,522
| 0.00774
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resource management library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os as _os
import sys as _sys
from tensorflow.python.util import tf_inspect as _inspect
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
try:
from rules_python.python.runfiles import runfiles
except ImportError:
runfiles = None
# pylint: enable=g-import-not-at-top
@tf_export(v1=['resource_loader.load_resource'])
def load_resource(path):
"""Load the resource at given path, where path is relative to tensorflow/.
Args:
path: a string resource path relative to tensorflow/.
Returns:
The contents of that resource.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
with open(get_path_to_datafile(path), 'rb') as f:
return f.read()
# pylint: disable=protected-access
@tf_export(v1=['resource_loader.get_data_files_path'])
def get_data_files_path():
"""Get a direct path to the data files colocated with the script.
Returns:
The directory where files specified in data attribute of py_test
and py_binary are stored.
"""
return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
@tf_export(v1=['resource_loader.get_root_dir_with_all_resources'])
def get_root_dir_with_all_resources():
"""Get a root directory containing all the data attributes in the build rule.
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary. Falls back to returning the same as get_data_files_path if it
fails to detect a bazel runfiles directory.
"""
script_dir = get_data_files_path()
# Create a history of the paths, because the data files are located relative
# to the repository root directory, which is directly under runfiles
# directory.
directories = [script_dir]
data_files_dir = ''
while True:
candidate_dir = directori
|
es[-1]
current_directory = _os.path.basename(candidate_dir)
if '.runfiles' in current_directory:
# Our file should never be directly under runfiles.
# If the history has only one item, it means we are directly inside the
# runfiles directory, something is wrong, fall back to the default return
# value, script directory.
if len(directories) > 1:
data_files_dir = director
|
ies[-2]
break
else:
new_candidate_dir = _os.path.dirname(candidate_dir)
# If we are at the root directory these two will be the same.
if new_candidate_dir == candidate_dir:
break
else:
directories.append(new_candidate_dir)
return data_files_dir or script_dir
@tf_export(v1=['resource_loader.get_path_to_datafile'])
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to tensorflow/
Args:
path: a string resource path relative to tensorflow/
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
# First, try finding in the new path.
if runfiles:
r = runfiles.Create()
new_fpath = r.Rlocation(
_os.path.abspath(_os.path.join('tensorflow', path)))
if new_fpath is not None and _os.path.exists(new_fpath):
return new_fpath
# Then, the old style path, as people became dependent on this buggy call.
old_filepath = _os.path.join(
_os.path.dirname(_inspect.getfile(_sys._getframe(1))), path)
return old_filepath
@tf_export(v1=['resource_loader.readahead_file_path'])
def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument
"""Readahead files not implemented; simply returns given path."""
return path
|
ravenac95/lxc4u
|
tests/test_meta.py
|
Python
|
mit
| 2,809
| 0.001068
|
from mock import Mock, patch, MagicMock
from lxc4u.meta import *
def test_initialize
|
_lxc_meta():
meta1 = LXCMeta()
meta1['hello'] = 'hello'
meta2 = LXCMeta(initial=dict(hello=123))
assert meta1['hello'] == 'hello'
assert meta2['hello'] == 123
@patch('__builtin__.open')
@patch('json.loads')
@patch('os.path.exists')
def test_meta_load_from_file(mock_exists, mock_loads, mock_open):
# Setup Mocks
mock_exists.return_value = True
mock_loads.return_value = {}
|
fake_path = 'path'
# Run Test
meta = LXCMeta.load_from_file(fake_path)
# Assertions
assert isinstance(meta, LXCMeta) == True
@patch('__builtin__.open')
@patch('json.loads')
@patch('os.path.exists')
def test_meta_load_from_file_with_no_file(mock_exists, mock_loads, mock_open):
mock_exists.return_value = False
fake_path = 'path'
# Run Test
meta = LXCMeta.load_from_file(fake_path)
assert mock_loads.called == False, "Mock json was called for some reason"
class TestLXCMeta(object):
def setup(self):
metadata = dict(a=1, b=2, c=3, d='delta')
self.metadata = metadata
self.meta = LXCMeta(initial=metadata)
def test_as_dict(self):
assert self.meta.as_dict() == self.metadata
@patch('lxc4u.meta.BoundLXCMeta')
def test_bind(self, mock_bound_meta_cls):
mock_lxc = Mock()
self.meta.bind(mock_lxc)
mock_bound_meta_cls.bind_to_lxc.assert_called_with(mock_lxc, self.meta)
@patch('lxc4u.meta.BoundLXCMeta')
def test_bind_and_save(self, mock_bound_meta_cls):
self.meta.bind_and_save(None)
mock_bound_meta_cls.bind_to_lxc.return_value.save.assert_called_with()
def test_initialize_bound_lxc_meta():
fake_meta = dict(a=1, b=2, c=3)
mock_lxc = Mock()
bound_meta = BoundLXCMeta.bind_to_lxc(mock_lxc, fake_meta)
bound_meta['hello'] = 'world'
assert bound_meta['a'] == 1
assert bound_meta['hello'] == 'world'
class TestBoundLXCMeta(object):
def setup(self):
mock_meta = MagicMock()
mock_lxc = Mock()
self.bound_meta = BoundLXCMeta.bind_to_lxc(mock_lxc, mock_meta)
self.mock_meta = mock_meta
self.mock_lxc = mock_lxc
@patch('json.dumps', autospec=True)
@patch('__builtin__.open', autospec=True)
def test_save(self, mock_open, mock_dumps):
mock_file = mock_open.return_value
self.bound_meta.save()
mock_open.assert_called_with(self.mock_lxc.path.return_value, 'w')
mock_dumps.assert_called_with(self.mock_meta.as_dict.return_value)
mock_file.write.assert_called_with(mock_dumps.return_value)
mock_file.close.assert_called_with()
def test_as_dict(self):
self.bound_meta.as_dict()
self.mock_meta.as_dict.assert_called_with()
|
rlazojr/totalinstaller
|
plugin.program.totalinstaller/default.py
|
Python
|
gpl-2.0
| 104,757
| 0.029945
|
import xbmc, xbmcaddon, xbmcgui, xbmcplugin,os,sys
import shutil
import urllib2,urllib
import re
import extract
import time
import downloader
import plugintools
import weblogin
import zipfile
import ntpath
ARTPATH = 'http://totalxbmc.tv/totalrevolution/art/' + os.sep
FANART = 'http://totalxbmc.tv/totalrevolution/art/fanart.jpg'
ADDON = xbmcaddon.Addon(id='plugin.program.community.builds')
AddonID = 'plugin.program.community.builds'
AddonTitle = "[COLOR=blue][B]T[/COLOR][COLOR=dodgerblue]R[/COLOR] [COLOR=white]Community Builds[/COLOR][/B]"
zip = ADDON.getSetting('zip')
localcopy = ADDON.getSetting('localcopy')
dialog = xbmcgui.Dialog()
dp = xbmcgui.DialogProgress()
HOME = xbmc.translatePath('special://home/')
USERDATA = xbmc.translatePath(os.path.join('special://home/userdata',''))
MEDIA = xbmc.translatePath(os.path.join('special://home/media',''))
AUTOEXEC = xbmc.translatePath(os.path.join(USERDATA,'autoexec.py'))
AUTOEXECBAK = xbmc.translatePath(os.path.join(USERDATA,'autoexec_bak.py'))
ADDON_DATA = xbmc.translatePath(os.path.join(USERDATA,'addon_data'))
PLAYLISTS = xbmc.translatePath(os.path.join(USERDATA,'playlists'))
PROFILES = xbmc.translatePath(os.path.join(USERDATA,'profiles'))
DATABASE = xbmc.translatePath(os.path.join(USERDATA,'Database'))
ADDONS = xbmc.translatePath(os.path.join('special://home','addons',''))
CBADDONPATH = xbmc.translatePath(os.path.join(ADDONS,AddonID,'default.py'))
GUISETTINGS = os.path.join(USERDATA,'guisettings.xml')
GUI = xbmc.translatePath(os.path.join(USERDATA,'guisettings.xml'))
GUIFIX = xbmc.translatePath(os.path.join(USERDATA,'guifix.xml'))
INSTALL = xbmc.translatePath(os.path.join(USERDATA,'install.xml'))
FAVS = xbmc.translatePath(os.path.join(USERDATA,'favourites.xml'))
SOURCE = xbmc.translatePath(os.path.join(USERDATA,'sources.xml'))
ADVANCED = xbmc.translatePath(os.path.join(USERDATA,'advancedsettings.xml'))
RSS = xbmc.translatePath(os.path.join(USERDATA,'RssFeeds.xml'))
KEYMAPS = xbmc.translatePath(os.path.join(USERDATA,'keymaps','keyboard.xml'))
USB = xbmc.translatePath(os.path.join(zip))
CBPATH = xbmc.translatePath(os.path.join(USB,'Community Builds',''))
cookiepath = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID,'cookiejar'))
startuppath = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID,'startup.xml'))
tempfile = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID,'temp.xml'))
idfile = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID,'id.xml'))
idfiletemp = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID,'idtemp.xml'))
notifyart = xbmc.translatePath(os.path.join(ADDONS,AddonID,'resources/'))
skin = xbmc.getSkinDir()
EXCLUDES = ['plugin.program.community.builds']
username = ADDON.getSetting('username')
password = ADDON.getSetting('password')
login = ADDON.getSetting('login')
userdatafolder = xbmc.translatePath(os.path.join(ADDON_DATA,AddonID))
GUINEW = xbmc.translatePath(os.path.join(userdatafolder,'guinew.xml'))
guite
|
mp = xbmc.translatePath(os.path.join(userdatafolder,'guitemp',''))
factory = xbmc.translatePath(os.path.join(HOME,'..','factory','_DO_NOT_DELETE.txt'))
#-----------------------------------------------------------------------------------------------------------------
#Simple shortcut to create a notification
def Notify(title,message,times,icon):
icon = notifyart+icon
print "icon: "+str(icon)
xbmc.executebuiltin("XBMC.Notification("+title+","+message+","+times+","+icon+")")
|
#-----------------------------------------------------------------------------------------------------------------
#Popup class - thanks to whoever codes the help popup in TVAddons Maintenance for this section. Unfortunately there doesn't appear to be any author details in that code so unable to credit by name.
class SPLASH(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs): self.shut=kwargs['close_time']; xbmc.executebuiltin("Skin.Reset(AnimeWindowXMLDialogClose)"); xbmc.executebuiltin("Skin.SetBool(AnimeWindowXMLDialogClose)")
def onFocus(self,controlID): pass
def onClick(self,controlID):
if controlID==12: xbmc.Player().stop(); self._close_dialog()
def onAction(self,action):
if action in [5,6,7,9,10,92,117] or action.getButtonCode() in [275,257,261]: xbmc.Player().stop(); self._close_dialog()
def _close_dialog(self):
xbmc.executebuiltin("Skin.Reset(AnimeWindowXMLDialogClose)"); time.sleep( .4 ); self.close()
#-----------------------------------------------------------------------------------------------------------------
#Set popup xml based on platform
def pop():
popup=SPLASH('totalxbmc.xml',ADDON.getAddonInfo('path'),'DefaultSkin',close_time=34)
popup.doModal()
del popup
#-----------------------------------------------------------------------------------------------------------------
#Initial online check for new video
def VideoCheck():
print skin
import yt
unlocked = 'no'
if not os.path.exists(userdatafolder):
os.makedirs(userdatafolder)
if not os.path.exists(startuppath):
localfile = open(startuppath, mode='w+')
localfile.write('date="01011001"\nversion="0.0"')
localfile.close()
if not os.path.exists(idfile):
localfile = open(idfile, mode='w+')
localfile.write('id="None"\nname="None"')
localfile.close()
BaseURL='http://totalxbmc.tv/totalrevolution/Community_Builds/update.txt'
link = OPEN_URL(BaseURL).replace('\n','').replace('\r','')
datecheckmatch = re.compile('date="(.+?)"').findall(link)
videomatch = re.compile('video="https://www.youtube.com/watch\?v=(.+?)"').findall(link)
# splashmatch = re.compile('splash="(.+?)"').findall(link)
# splashmatch2 = re.compile('splash2="(.+?)"').findall(link)
datecheck = datecheckmatch[0] if (len(datecheckmatch) > 0) else ''
videocheck = videomatch[0] if (len(videomatch) > 0) else ''
# splashcheck = splashmatch[0] if (len(splashmatch) > 0) else ''
# splashcheck2 = splashmatch2[0] if (len(splashmatch2) > 0) else ''
localfile = open(startuppath, mode='r')
content = file.read(localfile)
file.close(localfile)
localdatecheckmatch = re.compile('date="(.+?)"').findall(content)
localdatecheck = localdatecheckmatch[0] if (len(localdatecheckmatch) > 0) else ''
localversionmatch = re.compile('version="(.+?)"').findall(content)
localversioncheck = localversionmatch[0] if (len(localversionmatch) > 0) else ''
localfile2 = open(idfile, mode='r')
content2 = file.read(localfile2)
file.close(localfile2)
localidmatch = re.compile('id="(.+?)"').findall(content2)
localidcheck = localidmatch[0] if (len(localidmatch) > 0) else 'None'
localbuildmatch = re.compile('name="(.+?)"').findall(content2)
localbuildcheck = localbuildmatch[0] if (len(localbuildmatch) > 0) else ''
print "localbuildmatch: "+str(localbuildmatch)
print "localbuildcheck: "+str(localbuildcheck)
# if localidcheck == "None":
# if os.path.exists(INSTALL):
# os.remove(INSTALL)
if int(localdatecheck) < int(datecheck):
replacefile = content.replace(localdatecheck,datecheck)
writefile = open(startuppath, mode='w')
writefile.write(str(replacefile))
writefile.close()
yt.PlayVideo(videocheck, forcePlayer=True)
xbmc.sleep(500)
while xbmc.Player().isPlaying():
xbmc.sleep(500)
else:
pass
logged_in = weblogin.doLogin(cookiepath,username,password)
if login == 'true':
if logged_in == True:
unlocked = 'yes'
Notify('Login Successful', 'Welcome back '+username,'4000','tick.png')
elif logged_in == False:
dialog.ok('[COLOR=blue][B]T[/COLOR][COLOR=dodgerblue]R[/COLOR] [COLOR=white]Community Builds[/COLOR][/B]','There is an error with your login information, please check','your username and password, remember this i
|
Worldev/Mikicat-Antivirus
|
antivirus.py
|
Python
|
gpl-3.0
| 20,145
| 0.0076
|
#!/usr/bin/python3
#################################### BY MIKICAT ###############################################
###############################A WORLDEV AFFILIATE#############################################
###IF YOU DETECT BUGS, PLEASE OPEN AN ISSUE OR REPORT THEM TO http://mikicatantivirus.weebly.com/contact.html ##
import os
import platform
import time
import webbrowser
from tkinter import *
# Auto dir setup.
if platform.system() == "Windows":
linux = False
windows = True
elif platform.system() == "Linux":
linux = True
windows = False
else:
print("Mikicat Antivirus is not compatible with your operative system.")
os._exit(1)
home = os.path.expanduser('~')
extfiles = []
files = []
directory = ("{0}\AppData\Local".format(home) if windows else "{0}/.config".format(home))
directory2 = ("C:\Windows\system32" if windows else "/sbin")
directory3 = ("{0}\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup".format(home) if windows else "/etc/init.d")
directory4 = ("{0}\Downloads".format(home) if windows else "{0}/Downloads".format(home))
# Extensions
ext = '.bat'
ext2 = '.Gen'
ext3 = '.gen'
ext4 = '.vbs' # Do not scan it in system32
ext5 = '.inf' # Do not scan it in system32
ext6 = '.vbe' # Do not scan it in s
|
ystem32
ext7 = '.vb' # Do not scan it in system32
ext8 = '.gzquar'
e
|
xt9 = '.vexe'
ext10 = '.sys' # Important: Only detect this if found in Downloads. If it is in any other detection of any other part of the code, please delete the "or file.endswith(ext10)". If it is put in the system32, it will delete essential system files.
ext11 = '.aru'
ext12 = '.smtmp'
ext13 = '.ctbl'
ext14 = '.dxz'
ext15 = '.cih'
ext16 = '.kcd'
ext17 = '.sop'
ext18 = '.tsa'
ext19 = '.xir'
ext20 = '.fnr'
ext21 = '.dom'
ext22 = '.hlw'
ext23 = '.lik'
ext24 = '.s7p'
ext25 = '.rhk'
ext26 = '.dlb'
ext27 = '.bll'
ext28 = '.dyz'
ext29 = '.fag'
ext30 = '.xtbl'
ext31 = '.fjl'
ext32 = '.cryptolocker'
ext33 = '.mjz'
ext34 = '.osa'
ext35 = '.bxz'
ext36 = '.mfu'
ext37 = '.ezt'
ext38 = '.dyv'
ext39 = '.iws'
ext40 = '.xdu'
ext41 = '.dllx'
ext42 = '.uzy'
ext43 = '.ska'
ext44 = '.mjg'
ext45 = '.txs'
ext46 = '.upa'
ext47 = '.bls'
ext48 = '.cc'
ext49 = '.lkh'
ext50 = '.tko'
ext51 = '.tti'
ext52 = '.dli'
ext53 = '.ceo'
ext54 = '.rna'
ext55 = '.delf'
ext56 = '.spam'
ext57 = '.cxq'
ext58 = '.vzr'
ext59 = '.bmw'
ext60 = '.atm'
ext61 = '.fuj'
ext62 = '.ce0'
ext63 = '.lok'
ext64 = '.ssy'
ext65 = '.hts'
ext66 = '.hsq'
ext67 = '.qit'
ext68 = '.pid'
ext69 = '.aepl'
ext70 = '.xnt'
ext71 = '.aut'
ext72 = '.dx'
ext73 = '.zvz'
ext74 = '.bqf'
ext75 = '.iva'
ext76 = '.pr'
ext77 = '.let'
ext78 = '.cyw'
ext79 = '.bup'
ext80 = '.bps'
ext81 = '.epub.exe'
# Extensions
ideatxt = open("assets/idea.txt").read()
def idea():
print('opening')
tk = Tk()
tk.title("Idea --> Content from idea.txt")
tk.resizable(0, 0)
Label(tk, text=ideatxt).grid(row=1, sticky=W)
Button(tk, text="Quit", command=tk.destroy).grid(row=2, column=2, sticky=W)
def done4():
root = Tk()
root.title("Mikicat's Antivirus™: Finished")
root.resizable(0, 0)
Label(root, text="DONE: No virus found in %s." % directory4).grid(row=1, sticky=W)
Label(root, text="\n").grid(row=2, sticky=W)
Label(root, text="Thanks for using Miquel's Antivirus!").grid(row=3, sticky=W)
Button(root, text="Quit", command=root.destroy).grid(row=4, column=2, sticky=W)
Button(root, text="Idea", command=idea).grid(row=4, sticky=W)
print("4")
def finish():
root = Tk()
Label(root, text="Thanks for using Miquel's Antivirus!").grid(row=3, sticky=W)
Button(root, text="Quit", command=root.destroy).grid(row=4, column=2, sticky=W)
Button(root, text="Idea", command=idea).grid(row=4, sticky=W)
def yes4():
for item in extfiles:
os.remove(directory4 + ("/" if linux else "\\") + item)
del files[:]
del extfiles[:]
root = Tk()
root.title("Done")
root.resizable(0, 0)
Label(root, text="Done").grid(row=1, sticky=W)
Button(root, text="Finish", command=finish).grid(row=2, sticky=W)
print("Done")
def detection4():
del files[:]
del extfiles[:]
for file in os.listdir(directory4):
if file.endswith(ext) or file.endswith(ext2) or file.endswith(ext3) or file.endswith(ext4) or file.endswith(ext5)\
or file.endswith(ext6) or file.endswith(ext7) or file.endswith(ext8) or file.endswith(ext9) or file.endswith(ext10)\
or file.endswith(ext11) or file.endswith(ext12) or file.endswith(ext13) or file.endswith(ext14) or file.endswith(ext15)\
or file.endswith(ext16) or file.endswith(ext17) or file.endswith(ext18) or file.endswith(ext19) or file.endswith(ext20)\
or file.endswith(ext21) or file.endswith(ext22) or file.endswith(ext23) or file.endswith(ext24) or file.endswith(ext25)\
or file.endswith(ext26) or file.endswith(ext27) or file.endswith(ext28) or file.endswith(ext29) or file.endswith(ext30)\
or file.endswith(ext31) or file.endswith(ext32) or file.endswith(ext33) or file.endswith(ext34) or file.endswith(ext35)\
or file.endswith(ext36) or file.endswith(ext37) or file.endswith(ext38) or file.endswith(ext39) or file.endswith(ext40)\
or file.endswith(ext41) or file.endswith(ext42) or file.endswith(ext43) or file.endswith(ext44) or file.endswith(ext45)\
or file.endswith(ext46) or file.endswith(ext47) or file.endswith(ext48) or file.endswith(ext49) or file.endswith(ext50)\
or file.endswith(ext51) or file.endswith(ext52) or file.endswith(ext53) or file.endswith(ext54) or file.endswith(ext55)\
or file.endswith(ext56) or file.endswith(ext57) or file.endswith(ext58) or file.endswith(ext59) or file.endswith(ext60)\
or file.endswith(ext61) or file.endswith(ext62) or file.endswith(ext63) or file.endswith(ext64) or file.endswith(ext65)\
or file.endswith(ext66) or file.endswith(ext67) or file.endswith(ext68) or file.endswith(ext69) or file.endswith(ext70)\
or file.endswith(ext71) or file.endswith(ext72) or file.endswith(ext73) or file.endswith(ext74) or file.endswith(ext75)\
or file.endswith(ext76) or file.endswith(ext77) or file.endswith(ext78) or file.endswith(ext79) or file.endswith(ext80)\
or file.endswith(ext81):
extfiles.append(file)
for file in os.listdir(directory4):
files.append(file)
if extfiles != []:
tk = Tk()
tk.title("WARNING")
tk.resizable(0, 0)
Label(tk, text="WARNING: POSSIBLE VIRUS DETECTED").grid(row=1, sticky=W)
Label(tk, text="Possible virus: %s" % extfiles).grid(row=2, sticky=W)
Button(tk, text="Delete", command=yes4).grid(row=8, column=2, sticky=W)
Button(tk, text="Cancel", command=tk.destroy).grid(row=8, sticky=W)
if extfiles == []:
done4()
def done3():
root = Tk()
root.title("Mikicat's Antivirus™: Done")
root.resizable(0, 0)
Label(root, text="DONE: No virus found in %s" % directory3).grid(row=1, sticky=W)
Button(root, text="Continue", command=detection4).grid(row=3, column=2, sticky=W)
Button(root, text="Quit", command=root.destroy).grid(row=3, sticky=W)
print("3")
def yes3():
for item in extfiles:
os.remove(directory3 + ("/" if linux else "\\") + item)
del files[:]
del extfiles[:]
root = Tk()
root.title("Done")
root.resizable(0, 0)
Label(root, text="Done").grid(row=1, sticky=W)
Button(root, text="Continue", command=detection4).grid(row=2, column=2, sticky=W)
Button(root, text="Quit", command=root.destroy).grid(row=2, sticky=W)
print("Done")
def detection3():
del files[:]
del extfiles[:]
for file in os.listdir(directory3):
if file.endswith(ext) or file.endswith(ext2) or file.endswith(ext3) or file.endswith(ext8) or file.endswith(ext9)\
or file.endswith(ext11) or file.endswith(ext12) or file.endswith(ext13) or file.endswith(ext14) or file.endswith(ext15)\
or file.endswith(ext16) or file.endswith(ext17) or file.endswith(ext18) or file.endswith(ext19) or file.endswith(ext20)\
or file.endswith(ex
|
nuagenetworks/nuage-openstack-neutron
|
nuage_neutron/db/migration/alembic_migrations/versions/liberty_release.py
|
Python
|
apache-2.0
| 838
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is
|
distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Liberty
Revision ID: liberty
Revises: None
Create Date: 2015-11-13 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'liberty'
down_revision = None
def upgrade():
"""A no-op migration for marking th
|
e Liberty release."""
pass
|
sysadminmatmoz/odoo-clearcorp
|
purchase_prediction/__init__.py
|
Python
|
agpl-3.0
| 1,003
| 0.000997
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at
|
your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############
|
################################################################
from . import models
|
erudit/zenon
|
eruditorg/apps/userspace/library/authorization/apps.py
|
Python
|
gpl-3.0
| 196
| 0
|
# -*- coding: utf-8 -*-
from dja
|
ngo.apps import AppConfig
class AuthorizationConfig(AppConfig):
label = 'userspace_library_authorizations'
name = 'apps.userspace.lib
|
rary.authorization'
|
mdxs/gae-init
|
main/auth/twitter.py
|
Python
|
mit
| 1,532
| 0.00718
|
# coding: utf-8
from __future__ import absolute_import
import flask
import auth
import config
import model
import util
from main import app
twitter_config = dict(
access_token_url='https://api.twitter.com/oauth/access_token',
api_base_url='https://api.twitter.com/1.1/',
authorize_url='https://api.twitter.com/oauth/authenticate',
client_id=config.CONFIG_DB.twitter_consumer_key,
client_secret=config.CONFIG_DB.twitter_consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
signature_method='HMAC-SHA1',
save_request_token=auth.save_oauth1_request_to
|
ken,
fetch_request_token=auth.fetch_oauth1_request_token,
)
twitter = auth.create_oauth_app(twitter_config, 'twitter')
@app.route('/api/auth/callback/twitter/')
def twitter_authorized():
id_token = twitter.authorize_access_token()
if id_token is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
response = twitter.get('
|
account/verify_credentials.json')
user_db = retrieve_user_from_twitter(response.json())
return auth.signin_user_db(user_db)
@app.route('/signin/twitter/')
def signin_twitter():
return auth.signin_oauth(twitter)
def retrieve_user_from_twitter(response):
auth_id = 'twitter_%s' % response['id_str']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
return auth.create_user_db(
auth_id=auth_id,
name=response['name'] or response['screen_name'],
username=response['screen_name'],
)
|
jonathon-love/snapcraft
|
snapcraft/tests/test_plugin_maven.py
|
Python
|
gpl-3.0
| 19,419
| 0.000412
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import io
import os
from unittest import mock
from xml.etree import ElementTree
import fixtures
from testtools.matchers import HasLength
import snapcraft
from snapcraft import tests
from snapcraft.plugins import maven
class MavenPluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
class Options:
maven_options = []
maven_targets = ['']
self.options = Options()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.repo.Ubuntu')
self.ubuntu_mock = patcher.start()
self.addCleanup(patcher.stop)
@staticmethod
def _canonicalize_settings(settings):
with io.StringIO(settings) as f:
tree = ElementTree.parse(f)
for element in tree.iter():
if element.text is not None and element.text.isspace():
element.text = None
if element.tail is not None and element.tail.isspace():
element.tail = None
with io.StringIO() as f:
tree.write(
f, encoding='unicode',
default_namespace='http://maven.apache.org/SETTINGS/1.0.0')
return f.getvalue() + '\n'
def test_get_build_properties(self):
expected_build_properties = ['maven-options', 'maven-targets']
resulting_build_properties = maven.MavenPlugin.get_build_properties()
self.assertThat(resulting_build_properties,
HasLength(len(expected_build_properties)))
for property in expected_build_properties:
self.assertIn(property, resulting_build_properties)
def assertSettingsEqual(self, expected, observed):
print(repr(self._canonicalize_settings(expected)))
print(repr(self._canonicalize_settings(observed)))
self.assertEqual(
self._canonicalize_settings(expected),
self._canonicalize_settings(observed))
def test_schema(self):
schema = maven.MavenPlugin.schema()
properties = schema['properties']
self.assertTrue('maven-options' in properties,
'Expected "maven-options" to be included in '
'properties')
maven_options = properties['maven-options']
self.assertTrue(
'type' in maven_options,
'Expected "type" to be included in "maven-options"')
self.assertEqual(maven_options['type'], 'array',
'Expected "maven-options" "type" to be "array", but '
'it was "{}"'.format(maven_options['type']))
self.assertTrue(
'minitems' in maven_options,
'Expected "minitems" to be included in "maven-options"')
self.assertEqual(maven_options['minitems'], 1,
'Expected "maven-options" "minitems" to be 1, but '
'it was "{}"'.format(maven_options['minitems']))
self.assertTrue(
'uniqueItems' in maven_options,
'Expected "uniqueItems" to be included in "maven-options"')
self.assertTrue(
maven_options['uniqueItems'],
'Expected "maven-options" "uniqueItems" to be "True"')
maven_targets = properties['maven-targets']
self.assertTrue(
'type' in maven_targets,
'Expected "type" to be included in "maven-targets"')
self.assertEqual(maven_targets['type'], 'array',
'Expected "maven-targets" "type" to be "array", but '
'it was "{}"'.format(maven_targets['type']))
self.assertTrue(
'minitems' in maven_targets,
'Expected "minitems" to be included in "maven-targets"')
self.assertEqual(maven_targets['minitems'], 1,
'Expected "maven-targets" "minitems" to be 1, but '
'it was "{}"'.format(maven_targets['minitems']))
self.assertTrue(
'uniqueItems' in maven_targets,
'Expected "uniqueItems" to be included in "maven-targets"')
self.assertTrue(
maven_targets['uniqueItems'],
'Expected "maven-targets" "uniqueItems" to be "True"')
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.jar'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_fail(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.options,
self.project_options)
os.makedirs(plugin.sourcedir)
self.assertRaises(RuntimeError, plugin.build)
run_mock.assert_has_calls([
mock.call(['mvn
|
', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_war(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
plugin = maven.MavenPlugin('test-part', self.opt
|
ions,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir, 'target'))
open(os.path.join(plugin.builddir,
'target', 'dummy.war'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
])
@mock.patch.object(maven.MavenPlugin, 'run')
def test_build_with_targets(self, run_mock):
env_vars = (
('http_proxy', None),
('https_proxy', None),
)
for v in env_vars:
self.useFixture(fixtures.EnvironmentVariable(v[0], v[1]))
opts = self.options
opts.maven_targets = ['child1', 'child2']
plugin = maven.MavenPlugin('test-part', opts,
self.project_options)
def side(l):
os.makedirs(os.path.join(plugin.builddir,
'child1', 'target'))
os.makedirs(os.path.join(plugin.builddir,
'child2', 'target'))
open(os.path.join(plugin.builddir,
'child1', 'target', 'child1.jar'), 'w').close()
open(os.path.join(plugin.builddir,
'child2', 'target', 'child2.jar'), 'w').close()
run_mock.side_effect = side
os.makedirs(plugin.sourcedir)
plugin.build()
run_mock.assert_has_calls([
mock.call(['mvn', 'package']),
|
arubertoson/mayatest
|
mayatest/__main__.py
|
Python
|
mit
| 131
| 0
|
"""
Used as e
|
ntry point for mayatest from commandline
"""
if __name__ == "__main__":
from mayatest.cli import main
mai
|
n()
|
RyanJenkins/ISS
|
ISS/migrations/0022_poster_posts_per_page.py
|
Python
|
gpl-3.0
| 420
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ISS', '0021_poster_auto_subscribe'),
]
operations = [
migrations.Ad
|
dField(
model_name='poster',
name='posts_per_page',
field=models.PositiveSmallIntegerField(default=20),
|
),
]
|
nsdf/nsdf
|
benchmark/plot_profile_data.py
|
Python
|
gpl-3.0
| 6,981
| 0.008308
|
# plot_profile_data.py ---
#
# Filename: plot_profile_data.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Sat Sep 6 11:19:21 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import sys
from collections import namedtuple, defaultdict
import csv
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
sizeOfFont = 12
fontProperties = {'family':'sans-serif','sans-serif':['Arial'],
'weight' : 'normal', 'size' : sizeOfFont}
ticks_font = mpl.font_manager.FontProperties(family='Arial', style='normal',
size=sizeOfFont, weight='normal', stretch='normal')
mpl.rc('font',**fontProperties)
mpl.rc('figure', figsize=(17.35/(2.54*2), 23.35/2.54/3))
# mpl.rc('text', usetex=True)
# mpl.rc('text.latex', preamble=r'\usepackage{cmbright}')
# mpl.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
FIELDNAMES = ['dialect',
'compression',
'increment',
'mincol',
'maxcol',
'sampling',
'sources',
'variables',
'event_data',
'event_ds',
'nonuniform_data',
'nonuniform_ds',
'uniform_data',
'uniform_ds',
'write_data']
KEY_FIELDS = ['dialect',
'compression',
'increment',
# 'mincol',
# 'maxcol',
# 'sampling',
# 'sources',
# 'variables',
]
DATA_FIELDS = ['event_data',
'event_ds',
'nonuniform_data',
'nonuniform_ds',
'uniform_data',
'uniform_ds',
'write_data']
KeyTuple = namedtuple('BenchmarkCond', KEY_FIELDS)
COLORS = {'vlen': 'SteelBlue',
'oned': 'DarkRed',
'nan': 'Orange'}
POS = {'oned': 1,
'vlen': 2,
'nan': 3}
def plot_profile_data(filename):
"""Plot the processed profiling information for different dialect.
The profile data is processed into csv files containing the following
headers:
dialect: dialect of nsdf
compression: compression level. 0 is no compression 6 is medium
compression.
increment: number of columns written at each step for incremental
writing. 0 means fixed dataset.
mincol: minimum number of columns for generated event and
nonuniform data.
maxcol: maximum number of columns for generated event and
nonuniform data. This is also the number of columns for generated
nonuniform data.
sampling: kind of sampling. all means the benchmark script writes
all three kinds of data in a single run.
sources: number of data sources for each variable. This will be
the number of rows in the dataset.
variables: number of variables for each sampling type. Although
the variables could share the same sources, we create different
source populations for benchmarking purpose.
All the times below are cumulative, i.e. summed over multiple
calls of the function as required to write the entire dataset.
event_data: time to write event data
event_ds: time to write event data sources (dimension scale)
nonuniform_data: time to write nonuniform data
nonuniform_ds: time to write nonuniform data sources (dimension
scale)
uniform_data: time to write uniform data
uniform_ds: time to write uniform data sources (dimension scale)
write_data: total time to write data file (close to the sum of the
above times).
"""
with open(filename, 'rb') as datafile:
reader = csv.DictReader(datafile)
data = defaultdict(dict)
for row in reader:
print row
# return
kdict = {field: row[field] for field in KEY_FIELDS}
key = KeyTuple(**kdict)
for field in DATA_FIELDS:
print field, row[field]
values = data[key].get(field, [])
values.append(float(row[field]))
data[key][field] = values
for field in DATA_FIELDS:
fig = plt.figure(field)
# fig.suptitle(field)
axes_list = []
ax = None
for ii in range(4):
ax = fig.add_subplot(2, 2, ii+1,sharex=ax, sharey=ax)
ax.get_xaxis().set_visible(False)
axes_list.append(ax)
if ii // 2 == 0:
title = r'Compressed' if ii % 2 else r'Uncompres
|
sed'
ax.set_title(title, fontsize=12)
if ii % 2 == 0:
ylabel = '{}\nTime (s)'.format('Fixed' if ii // 2 == 0 else 'Incremental')
ax.set_ylabel(ylabel)
else:
ax.get_yaxis().set_visible(False)
plt.setp(ax, frame_on=False)
for iii, key in enumerate(data):
color = COLORS[key.dialect]
pos = POS[key.dialect]
|
col = 0 if key.compression == '0' else 1
row = 0 if key.increment == '0' else 1
ax = axes_list[row * 2 + col]
ax.bar([pos], np.mean(data[key][field]), yerr=np.std(data[key][field]),
color=color, ecolor='b', alpha=0.7,
label=key.dialect)
for ax in axes_list:
start, end = ax.get_ylim()
if end < 0.1:
step = 0.05
elif end < 1:
step = 0.5
elif end < 10:
step = 2
elif end < 50:
step = 10
elif end < 100:
step = 30
elif end < 200:
step = 50
else:
step = 100
ax.yaxis.set_ticks(np.arange(0, end + step/2, step))
fig.tight_layout()
fig.savefig('{}.svg'.format(field))
# pdfout = PdfPages('{}.pdf'.format(field))
# pdfout.savefig(fig)
# pdfout.close()
plt.show()
if __name__ == '__main__':
filename = sys.argv[1]
print 'Reading', filename
plot_profile_data(filename)
#
# plot_profile_data.py ends here
|
JessicaNgo/TeleGiphy
|
tele_giphy/tele_giphy/wsgi.py
|
Python
|
mit
| 426
| 0
|
"""
WSGI config for tele_giphy project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
# Standard Library
import os
# Django
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_
|
SETTINGS_MODULE", "tele_giphy.settings")
applica
|
tion = get_wsgi_application()
|
x86Labs/wal-e
|
wal_e/blobstore/s3/calling_format.py
|
Python
|
bsd-3-clause
| 10,819
| 0
|
import boto
import os
import re
import urlparse
from boto import s3
from boto.s3 import connection
from wal_e import log_help
from wal_e.exception import UserException
logger = log_help.WalELogger(__name__)
_S3_REGIONS = {
# A map like this is actually defined in boto.s3 in newer versions of boto
# but we reproduce it here for the folks (notably, Ubuntu 12.04) on older
# versions.
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-southeast-2': 's3-ap-southeast-2.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'us-standard': 's3.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
}
try:
# Override the hard-coded region map with boto's mappings if
# available.
from boto.s3 import regions
_S3_REGIONS.update(dict((r.name, r.endpoint) for r in regions()))
except ImportError:
pass
def _is_ipv4_like(s):
"""Find if a string superficially looks like an IPv4 address.
AWS documentation plays it fast and loose with this; in other
regions, it seems like even non-valid IPv4 addresses (in
particular, ones that possess decimal numbers out of range for
IPv4) are rejected.
"""
parts = s.split('.')
if len(parts) != 4:
return False
for part in parts:
try:
int(part)
except ValueError:
return False
return True
def _is_mostly_subdomain_compatible(bucket_name):
"""Returns True if SubdomainCallingFormat can be used...mostly
This checks to make sure that putting aside certificate validation
issues that a bucket_name is able to use the
SubdomainCallingFormat.
"""
return (bucket_name.lower() == bucket_name and
len(bucket_name) >= 3 and
len(bucket_name) <= 63 and
'_' not in bucket_name and
'..' not in bucket_name and
'-.' not in bucket_name and
'.-' not in bucket_name and
not bucket_name.startswith('-') and
not bucket_name.endswith('-') and
not bucket_name.startswith('.') and
not bucket_name.endswith('.') and
not _is_ipv4_like(bucket_name))
def _connect_secureish(*args, **kwargs):
"""Connect using the safest available options.
This turns on encryption (works in all supported boto versions)
and certificate validation (in the subset of supported boto
versions that can handle certificate validation, namely, those
after 2.6.0).
Versions below 2.6 don't support the validate_certs option to
S3Connection, and enable it via configuration option just seems to
cause an error.
"""
if tuple(int(x) for x in boto.__version__.split('.')) >= (2, 6, 0):
kwargs['validate_certs'] = True
kwargs['is_secure'] = True
return connection.S3Connection(*args, **kwargs)
def _s3connection_opts_from_uri(impl):
# 'impl' should look like:
#
# <protocol>+<calling_format>://[user:pass]@<host>[:port]
#
# A concrete example:
#
# https+virtualhost://user:pass@localhost:1235
o = urlparse.urlparse(impl, allow_fragments=False)
if o.scheme is not None:
proto_match = re.match(
r'(?P<protocol>http|https)\+'
r'(?P<format>virtualhost|path|subdomain)', o.scheme)
if proto_match is None:
raise UserException(
msg='WALE_S3_ENDPOINT URI scheme is invalid',
detail='The scheme defined is ' + repr(o.scheme),
hint='An example of a valid scheme is https+virtualhost.')
opts = {}
if proto_match.group('protocol') == 'http':
opts['is_secure'] = False
else:
# Constrained by prior regexp.
proto_match.group('protocol') == 'https'
opts['is_secure'] = True
f = proto_match.group('format')
if f == 'virtualhost':
opts['calling_format'] = connection.VHostCallingFormat()
elif f == 'path':
opts['calling_format'] = connection.OrdinaryCallingFormat()
elif f == 'subdomain':
opts['calling_format'] = connection.SubdomainCallingFormat()
else:
# Constrained by prior regexp.
assert False
if o.username is not None or o.password is not None:
raise UserException(
msg='WALE_S3_ENDPOINT does not support username or password')
if o.hostname is not None:
opts['host'] = o.hostname
if o.port is not None:
opts['port'] = o.port
if o.path:
raise UserException(
msg='WALE_S3_ENDPOINT does not support a URI path',
detail='Path is {0!r}'.format(o.path))
if o.query:
raise UserException(
msg='WALE_S3_ENDPOINT does not support query parameters')
return opts
class CallingInfo(object):
"""Encapsulate information used to produce a S3Connection."""
def __init__(self, bucket_name=None, calling_format=None, region=None,
ordinary_endpoint=None):
self.bucket_name = bucket_name
self.calling_format = calling_format
self.region = region
self.ordinary_endpoint = ordinary_endpoint
def __repr__(self):
return ('CallingInfo({bucket_name}, {calling_format!r}, {region!r}, '
'{ordinary_endpoint!r})'.format(**self.__dict__))
def __str__(self):
return repr(self)
def connect(self, creds):
"""Return a boto S3Connection set up with great care.
This includes TLS settings, calling format selection, and
region detection.
The credentials are applied by the caller because in many
cases (instance-profile IAM) it is possible for those
credentials to fluctuate rapidly. By comparison, region
fluctuations of a bucket name are not nearly so likely versus
the gains of not looking up a bucket's region over and over.
"""
def _conn_help(*args, **kwargs):
return _connect_secureish(
*args,
provider=creds,
calling_format=self.calling_format(),
**kwargs)
# If WALE_S3_ENDPOINT is set, do not attempt to guess
# the right calling conventions and instead honor the explicit
# settings within WALE_S3_ENDPOINT.
impl = os.getenv('WALE_S3_ENDPOINT')
if impl:
return connection.S3Connection(**_s3connection_opts_from_uri(impl))
# Check if subdomain format compatible; no need to go through
# any region detection mumbo-jumbo of any kind.
if self.calling_format is connection.SubdomainCallingFormat:
return _conn_help()
# Check if OrdinaryCallingFormat compatible, but also see if
# the endpoint has already been set, in which case only
# setting the host= flag is necessary.
assert self.calling_format is connection.OrdinaryCallingFormat
if self.ordinary_endpoint is not None:
return _conn_help(host=self.ordinary_endpoint)
# By this point, this is an OrdinaryCallingFormat bucket that
|
# has never had its region detected in this CallingInfo
# instan
|
ce. So, detect its region (this can happen without
# knowing the right regional endpoint) and store it to speed
# future calls.
assert self.calling_format is connection.OrdinaryCallingFormat
assert self.region is None
assert self.ordinary_endpoint is None
conn = _conn_help()
bucket = s3.bucket.Bucket(connection=conn,
name=self.bucket_name)
try:
loc = bucket.get_location()
except boto.exception.S3ResponseError, e:
if e.status == 403:
# A 403 can be caused by IAM keys that do not permit
# GetBucketLocation. To not change behavior for
# environments that do not have GetBucketLocation
# allowed, fall back to the default endpoint,
# preser
|
littlevgl/lvgl
|
scripts/release/main.py
|
Python
|
mit
| 1,494
| 0.005355
|
#!/usr/bin/env python
import os.path
from os import path
from datetime import date
import sys
import com
import release
import dev
import proj
upstream_org_url = "https://github.com/lvgl/"
workdir = "./release_tmp"
proj_list = [ "lv_sim_eclipse_sdl", "lv_sim_emscripten"]
def upstream(repo):
return upstream_org_url + repo + ".git"
def clone(repo):
com.cmd("git clone --recurse-submodules " + upstream(repo))
os.chdir("./" + repo)
com.cmd("git checkout master")
com.cmd("git remote update origin --prune")
com.cmd("git pull origin --tags")
os.chdir("..")
def clone_repos():
com.cmd("rm -fr " + workdir)
com.cmd("mkdir " + workdir)
os.chdir(workdir)
clone("lvgl")
clone("lv_examples")
clone("lv_drivers")
clone("docs")
clone("blog")
for p in proj_list:
clone(p)
def cleanup():
os.chdir("../")
com.cmd("rm -fr " + workdir)
if __name__ == '__main__':
prepare_type = ['major', 'minor', 'bugfix']
dev_prepare = 'minor'
# if(len(sys.argv) != 2):
# print("Missing argument. Usage ./release.py bugfix | minor | major")
# print("Use minor by default")
# else:
# dev_prepare = sys.argv[1]
if not (dev_prepare in prepare_type):
print("Invalid argument. Usage ./release.py bugfix | minor | ma
|
jor")
|
exit(1)
#os.chdir(workdir)
clone_repos()
release.make()
for p in proj_list:
proj.make(p, True)
dev.make(dev_prepare)
#cleanup()
|
google/fhir
|
py/google/fhir/utils/fhir_types_test.py
|
Python
|
apache-2.0
| 6,987
| 0.00458
|
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test fhir_types functionality."""
from absl.testing import absltest
from proto.google.fhir.proto.r4 import fhirproto_extensions_pb2
from proto.google.fhir.proto.r4 import uscore_pb2
from proto.google.fhir.proto.r4.core import datatypes_pb2
from proto.google.fhir.proto.r4.core.resources import patient_pb2
from google.fhir.utils import fhir_types
class FhirTypesTest(absltest.TestCase):
"""Tests functionality provided by the fhir_types module."""
def testIsCode_withCode_returnsTrue(self):
"""Tests that is_code returns True when given a Code."""
self.assertTrue(fhir_types.is_code(datatypes_pb2.Code()))
def testIsCode_withProfileOfCode_returnsFalse(self):
"""Tests that is_code returns False when given a profile of Code."""
self.assertFalse(fhir_types.is_code(datatypes_pb2.Address.UseCode()))
def testIsProfileOfCode_withProfileOfCode_returnsTrue(self):
"""Tests that is_profile_of_code returns True for a profile of Code."""
self.assertTrue(
fhir_types.is_profile_of_code(datatypes_pb2.Address.UseCode()))
def testIsProfileOfCode_withCode_returnsFalse(self):
"""Tests that is_profile_of_code returns False for a base Code."""
self.assertFalse(fhir_types.is_profile_of_code(datatypes_pb2.Code()))
def testIsTypeOrProfileOfCode_withProfileOfCode_returnsTrue(self):
"""Tests that is_type_or_profile_of_code returns True for a profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_code(datatypes_pb2.Address.UseCode()))
def testIsTypeOrProfileOfCode_withCode_returnsTrue(self):
"""Tests that is_type_or_profile_of_code returns True for a base Code."""
self.assertTrue(fhir_types.is_type_or_profile_of_code(datatypes_pb2.Code()))
def testIsTypeOrProfileOfCode_withNonCode_returnsFalse(self):
"""Tests that is_type_or_profile_of_code returns False for a non-Code."""
self.assertFalse(
fhir_types.is_type_or_profile_of_code(patient_pb2.Patient()))
def testIsCoding_withCoding_returnsTrue(self):
"""Tests that is_coding returns True when given a Coding instance."""
self.assertTrue(fhir_types.is_coding(datatypes_pb2.Coding()))
def testIsCoding_withProfileOfCoding_returnsFalse(self):
"""Tests that is_coding returns False when given a profile."""
self.assertFalse(fhir_types.is_coding(datatypes_pb2.CodingWithFixedCode()))
def testIsProfileOfCoding_withCoding_returnsTrue(self):
"""Tests that is_profile_of_coding returns True for a profile."""
self.assertTrue(
fhir_types.is_profile_of_coding(datatypes_pb2.CodingWithFixedCode()))
def testIsProfileOfCoding_withCoding_returnsFalse(self):
"""Tests that is_profile_of_coding returns False for a base Coding type."""
self.assertFalse(fhir_types.is_profile_of_coding(datatypes_pb2.Coding()))
def testIsTypeOrProfileOfCoding_withCoding_returnsTrue(self):
"""Tests that is_type_or_profile_of_coding returns True for profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_coding(
datatypes_pb2.CodingWithFixedCode()))
def testIsTypeOrProfileOfCoding_withNonCoding_returnsFalse(self):
"""Tests that is_type_or_profile_of_coding returns False for non-Coding."""
self.assertFalse(
fhir_types.is_type_or_profile_of_coding(patient_pb2.Patient()))
def testIsPeriod_withPeriod_returnsTrue(self):
"""Tests that is_period returns True when given a Period instance."""
self.assertTrue(fhir_types.is_period(datatypes_pb2.Period()))
def testIsPeriod_withCoding_returnsFalse(self):
"""Tests that is_period returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_period(datatypes_pb2.Coding()))
def testIsDateTime_withDateTime_returnsTrue(self):
"""Tests that is_date_time returns True when given a DateTime instance."""
self.assertTrue(fhir_types.is_date_time(datatypes_pb2.DateTime()))
def testIsDateTime_withCoding_returnsFalse(self):
"""Tests that is_date_time returns False when given a profile of Coding."""
self.assertFalse(fhir_types.is_date_time(datatypes_pb2.Coding()))
def testIsExtension_withExtension_returnsTrue(self):
"""Tests that is_extension returns True when given an Extension."""
self.assertTrue(fhir_types.is_extension(datatypes_pb2.Extension()))
def testIsExtension_withDateTime_returnsFalse(self):
"""Tests that is_extension returns False when given a DateTime."""
self.assertFalse(fhir_types.is_extension(datatypes_pb2.DateTime()))
def testIsProfileOfExtension_withBase64BinarySeparatorStride_returnsTrue(
self):
"""Tests that is_profile_of_extension returns True for valid profile."""
self.assertTrue(
fhir_types.is_profile_of_extension(
fhirproto_extensions_pb2.Base64BinarySeparatorStride()))
def testIsTypeOrProfileOfExtension_withExtension_returnsTrue(self):
"""Tests that is_type_or_profile_of_extension returns True for Extension."""
self.assertTrue(
|
fhir_types.is_type_or_profile_of_extension(datatypes_pb2.Extension()))
def testIsTypeOrProfileOfExtension_withExtensionProfile_returnsTrue(self):
"""Tests that is_type_or_profile_of_extension returns True for profile."""
self.assertTrue(
fhir_types.
|
is_type_or_profile_of_extension(
fhirproto_extensions_pb2.Base64BinarySeparatorStride()))
def testIsTypeOrProfileOfExtensions_withDateTime_returnsFalse(self):
"""Tests that is_type_or_profile_of_extension returns False for DateTime."""
self.assertFalse(
fhir_types.is_type_or_profile_of_extension(datatypes_pb2.DateTime()))
def testIsTypeOrProfileOfPatient_withPatient_returnsTrue(self):
"""Tests that IsTypeOfProfileOfPatient returns True for a Patient type."""
self.assertTrue(
fhir_types.is_type_or_profile_of_patient(patient_pb2.Patient()))
def testIsTypeOrProfileOfPatient_withCoding_returnsFalse(self):
"""Tests that IsTypeOfProfileOfPatient returns False for a Coding type."""
self.assertFalse(
fhir_types.is_type_or_profile_of_patient(datatypes_pb2.Coding()))
def testIsTypeOrProfileOfPatient_withPatientProfile_returnsTrue(self):
"""Tests that IsTypeOfProfileOfPatient returns True for Patient profile."""
self.assertTrue(
fhir_types.is_type_or_profile_of_patient(
uscore_pb2.USCorePatientProfile()))
if __name__ == '__main__':
absltest.main()
|
brainwane/zulip
|
zerver/management/commands/generate_multiuse_invite_link.py
|
Python
|
apache-2.0
| 1,624
| 0.001847
|
from argparse import ArgumentParser
from typing import Any, List
from zerver.lib.actions import do_create_multiuse_invite_link, ensure_stream
from zerver.lib.management import ZulipBaseCommand
from zerver.models import PreregistrationUser, Stream
class Command(ZulipBaseCommand):
help = "Generates invite link that can be used for inviting multiple users"
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True)
parser.add_argument(
'-s', '--streams',
dest='streams',
type=str,
|
help='A comma-separated list of stream names.')
parser.add_argument(
'--referred-by',
dest='referred_by',
type=str,
help='Email of referrer',
required=True,
)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensu
|
red by parser
streams: List[Stream] = []
if options["streams"]:
stream_names = {stream.strip() for stream in options["streams"].split(",")}
for stream_name in set(stream_names):
stream = ensure_stream(realm, stream_name, acting_user=None)
streams.append(stream)
referred_by = self.get_user(options['referred_by'], realm)
invite_as = PreregistrationUser.INVITE_AS['MEMBER']
invite_link = do_create_multiuse_invite_link(referred_by, invite_as, streams)
print(f"You can use {invite_link} to invite as many number of people to the organization.")
|
juanma1980/lliurex-scheduler
|
client-scheduler.install/usr/share/n4d/python-plugins/SchedulerClient.py
|
Python
|
gpl-3.0
| 4,262
| 0.056546
|
#!/usr/bin/env python
###
#
###
# -*- coding: utf-8 -*-
import os,socket
import threading
import time
from datetime import date
import xmlrpclib as xmlrpc
class SchedulerClient():
def __init__(self):
self.cron_dir='/etc/cron.d'
self.task_prefix='remote-' #Temp workaround->Must be declared on a n4d var
self.cron_dir='/etc/cron.d'
self.count=0
self.dbg=0
self.holidays_shell="/usr/bin/check_holidays.py"
self.pidfile="/tmp/taskscheduler.pid"
def startup(self,options):
t=threading.Thread(target=self._main_thread)
t.daemon=True
t.start()
def _debug(self,msg):
if self.dbg:
print("%s"%msg)
def _main_thread(self):
objects["VariablesManager"].register_trigger("SCHEDULED_TASKS","SchedulerClient",self.process_tasks)
tries=10
for x in range (0,tries):
self.scheduler_var=objects["VariablesManager"].get_variable("SCHEDULED_TASKS")
if self.scheduler_var!=self.count:
self.count=self.scheduler_var
self.process_tasks()
break
else:
time.sleep(1)
def process_tasks(self,data=None):
self._debug("Scheduling tasks")
today=date.today()
prefixes={'remote':True,'local':False}
tasks={}
try:
socket.gethostbyname('server')
except:
prefixes={'local':False}
for prefix,sw_remote in prefixes.iteritems():
if prefix=='remote':
n4d=xmlrpc.ServerProxy("https://server:9779")
tasks=n4d.get_remote_tasks("","SchedulerServer")['data'].copy()
else:
n4d=xmlrpc.ServerProxy("https://localhost:9779")
tasks=n4d.get_local_tasks("","SchedulerServer")['data'].copy()
#Delete files
for f in os.listdir(self.cron_dir):
if f.startswith(prefix):
os.remove(self.cron_dir+'/'+f)
#Create the cron files
for name in tasks.keys():
task_names={}
self._debug("Processing task: %s"%name)
for serial in tasks[name].keys():
self._debug("Item %s"%serial)
sw_pass=False
if 'autoremove' in ta
|
sks[name][serial]:
if (tasks[name][serial]['mon'].isdigit()):
mon=int(tasks[name][serial]['mon'])
if mon<today.month:
sw_pass=True
if sw_pass==False
|
:
if (tasks[name][serial]['dom'].isdigit()):
dom=int(tasks[name][serial]['dom'])
if dom<today.day:
sw_pass=True
if sw_pass:
continue
self._debug("Scheduling %s"%name)
fname=name.replace(' ','_')
task_names[fname]=tasks[name][serial].copy()
self._write_crontab_for_task(task_names,prefix)
#Launch refresh signal to gui
if os.path.isfile(self.pidfile):
with open(self.pidfile,'r') as p_file:
pid=p_file.read()
try:
os.kill(int(pid),signal.SIGUSR1)
except:
pass
#def process_tasks
def _write_crontab_for_task(self,ftask,prefix):
cron_array=[]
for task_name,task_data in ftask.iteritems():
self._debug("Writing data %s: %s"%(task_name,task_data))
fname=self.cron_dir+'/'+prefix+task_name.replace(' ','_')
m=task_data['m']
h=task_data['h']
dom=task_data['dom']
mon=task_data['mon']
if '/' in m:
m=m.replace('0/','*/')
if '/' in h:
h=h.replace('0/','*/')
if '/' in dom:
dom=dom.replace('1/','*/')
if '/' in mon:
mon=mon.replace('1/','*/')
cron_task=("%s %s %s %s %s root %s"%(m,h,dom,mon,task_data['dow'],u""+task_data['cmd']))
if 'holidays' in task_data.keys():
if task_data['holidays']:
cron_task=("%s %s %s %s %s root %s && %s"%(m,h,dom,mon,task_data['dow'],self.holidays_shell,u""+task_data['cmd']))
cron_array.append(cron_task)
if task_data:
if os.path.isfile(fname):
mode='a'
else:
mode='w'
with open(fname,mode) as data:
if mode=='w':
data.write('#Scheduler tasks\n')
data.write('SHELL=/bin/bash\n')
data.write('PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\n')
data.write('DISPLAY=:0\n')
data.write('XAUTHORITY=/var/run/lightdm/root/:0\n')
if 'https_proxy' in os.environ.keys():
https_proxy=os.environ['https_proxy']
data.write('https_proxy=%s\n'%https_proxy)
if 'http_proxy' in os.environ.keys():
http_proxy=os.environ['http_proxy']
data.write('http_proxy=%s\n'%http_proxy)
for cron_line in cron_array:
data.write(cron_line.encode('utf8')+"\n")
#def _write_crontab_for_task
|
ricardosiri68/patchcap
|
cam/wsse.py
|
Python
|
gpl-2.0
| 2,243
| 0.001783
|
from base64 import b64encode
from suds.wsse import UsernameToken, Token
try:
from hashlib import sha1, md5
except:
from sha import new as sha1
class UsernameDigestToken(UsernameToken):
"""
Represents a basic I{UsernameToken} WS-Security token with password digest
@ivar username: A username.
@type username: str
@ivar password: A password.
@type password: str
@ivar nonce: A set of bytes to prevent reply attacks.
@type nonce: str
@ivar created: The token created.
@type created: L{datetime}
@doc: http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0.pdf
"""
def __init__(self, username=None, password=None):
UsernameToken.__init__(self, username, password)
self.setcreated()
self.setnonce()
def setnonce(self, text=None):
"""
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
@override: Nonce save binary string to build digest password
"""
if text is None:
s = []
s.append(self.username)
s.append(self.password)
s.append(Token.sysdate())
m = md5()
m.update(':'.join(s))
self.raw_nonce = m.digest()
self.nonce = b64encode(self.raw_nonce)
else:
self.nonce = text
def xml(self):
usernametoken = UsernameToken.xml(self)
password = usernametoken.getChild('Password')
nonce = usernametoken.getChild('Nonce')
created = usernametoken.getChild('Created')
password.set
|
('Type', 'http://docs.oasis-open.org/wss/2004/01/'
'oasis-200401-wss-username-token-profile-1.0'
'#PasswordDigest')
s = sha1()
s.update(self.raw_nonce)
s.update(created.getText())
s.update(pas
|
sword.getText())
password.setText(b64encode(s.digest()))
nonce.set('EncodingType', 'http://docs.oasis-open.org/wss/2004'
'/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary')
return usernametoken
|
medo/Pandas-Farm
|
master/server.py
|
Python
|
mit
| 761
| 0.00657
|
import os, logging
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
from master.serve
|
r_handler import ServerHandler
from xmlrpc.client import Binary
HOST = "0.0.0.0"
PORT = int(os.getenv('PORT', 5555))
ENDPOINT = 'RPC2'
logging.basicConfig(level=logging.INFO)
class RequestHandler(SimpleXMLRPCRequest
|
Handler):
# rpc_paths = ('RPC2',)
def log_message(self, format, *args):
logging.debug(format)
def start():
server = SimpleXMLRPCServer((HOST, PORT), requestHandler=RequestHandler,
allow_none=True, use_builtin_types=True)
server.register_instance(ServerHandler())
logging.info("Server is listening on " + HOST + ":" + str(PORT) + "/" + ENDPOINT)
server.serve_forever()
|
smartyrad/Python-scripts-for-web-scraping
|
beauty.py
|
Python
|
gpl-3.0
| 314
| 0.012739
|
import urllib
import lxml.html
connection = urllib.urlopen('http://www.amazon.in/s/ref=nb_sb_noss?url=search-
|
alias%3Daps&field-keywords=iphone')
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//li[@id="result_0"]/@data-asin'): # select the url in h
|
ref for all a tags(links)
print link
|
tomislacker/python-iproute2
|
pyroute2/ipdb/common.py
|
Python
|
apache-2.0
| 272
| 0
|
# How long should we wait on EACH commit() checkpoint:
|
for ipaddr,
# ports etc. Tha
|
t's not total commit() timeout.
SYNC_TIMEOUT = 5
class DeprecationException(Exception):
pass
class CommitException(Exception):
pass
class CreateException(Exception):
pass
|
rkunze/ft-robo-snap
|
ftrobopy/examples/ftDigiCam.py
|
Python
|
agpl-3.0
| 6,070
| 0.020428
|
######################################
# Example: ftDigiCam.py
# Digital Camera with live
# video stream to TXT display
# and autofocus functionality
# (c) 2016 by Torsten Stuehn
# version 0.8 from 2016-02-12
######################################
# Python2/3 'print' compatibility
from __future__ import print_function
import ftrobopy
import ftrobopytools
f
|
rom os import system
import time
txt = ftrobopy.ftrobopy(host ='
|
127.0.0.1',
port = 65000,
update_interval = 0.01,
keep_connection_interval = 1.0)
run_ave_contrast = 8
hist_minlength = 10
hist_maxlength = 20
fname_prefix = 'PICT'
displayLiveStream = 1 # live video on TXT display, 0=no 1=yes
# definition of TXT outputs and inputs
FocusMotor = txt.motor(1) # the focus motor is connected to M1
Switch = txt.input(1) # the switch is connected to I1
if displayLiveStream:
# make backup copy of TXT display contents
with open('/dev/fb0', 'rb') as f:
framebuffer_backup=f.read()
try:
# initialize camera (/dev/video0) and
fps = 15 # frames per second
width = 320 # width of camera image
height = 240 # height of camera image
videv = ftrobopytools.camInit(fps, width, height, 0, 0)
if displayLiveStream:
# initialize Standard Display Library (SDL)
# for access to display of TXT
ftrobopytools.sdlInit()
# reset text/cmd console (compromized by SDL)
system('reset')
contrast = [0]
hist_contrast = [0]
hist_counter = [0]
contrast_counter_shift = 3
ave_contrast = 0
xtopleft = 10 # width / 2 - width / 4
ytopleft = 10 # height / 2 - height / 8
xbottomright = 310 # width / 2 + width / 8
ybottomright = 120 # height / 2 + height / 8
state = 0
dir = -1
for i in range(1000):
contr = ftrobopytools.measureContrast(videv,
width, height,
xtopleft, ytopleft, xbottomright, ybottomright,
displayLiveStream)
if contr:
contrast.append(contr)
if len(contrast) > run_ave_contrast:
contrast = contrast[1:]
ave_contrast = sum(contrast)/len(contrast)
motor_counter = FocusMotor.getCurrentDistance()
contrast_variation = 0
for i in contrast:
if i != ave_contrast:
contrast_variation = 1
hist_contrast.append(ave_contrast)
if len(hist_contrast) > hist_maxlength:
hist_contrast = hist_contrast[1:]
hist_counter.append(motor_counter)
if len(hist_counter) > hist_maxlength:
hist_counter = hist_counter[1:]
#if state == 2 or state == 3 or state == 4:
if True:
print(hist_contrast)
#print(hist_counter)
if state == 0:
if Switch.state() != 0:
# dir = -dir
state = 1
if state == 1:
print("state 1: start focus motor")
# start increasing focus
FocusMotor.setDistance(3000)
FocusMotor.setSpeed(512*dir)
hist_contrast = [0]
hist_counter = [0]
state = 2
if state == 2:
if len(hist_contrast) > hist_minlength and ( hist_contrast[-1] < hist_contrast[-2] or contrast_variation == 0 ):
print("state 2: contrast_variation",contrast_variation)
hist_contrast = [0]
hist_counter = [0]
FocusMotor.stop()
# start decreasing focus
FocusMotor.setDistance(3000)
FocusMotor.setSpeed(-512*dir)
state = 3
if state == 3:
if len(hist_contrast) > hist_minlength and ( hist_contrast[-1] < hist_contrast[-2] or contrast_variation == 0 ):
print("state 3: contrast_variation",contrast_variation)
FocusMotor.stop()
# increase focus to maximum contrast
idx = hist_contrast.index(max(hist_contrast))
bestfocus_counter = hist_counter[idx]
#FocusMotor.setDistance(hist_counter[-(1+contrast_counter_shift)] - bestfocus_counter)
FocusMotor.setDistance(300)
FocusMotor.setSpeed(512*dir)
state = 4
if state == 4:
if FocusMotor.finished():
# save jpeg in high resolution (1280x720)
print("taking snapshot at high resolution ...")
# close (low resolution) camera device
ftrobopytools.camClose(videv, 0)
# open (high resolution) camera device
high_fps = 5 # 5 is the lowest possible framerate of the TXT camera
high_width = 1280 # 1280 is the maximum horizontal resolution of the TXT camera
high_height = 720 # 720 is the maximum vertical resolution of the TXT camera
videv = ftrobopytools.camInit(high_fps, high_width, high_height, 0, 0)
# get high resolution snapshot as jpg image
jpg = ftrobopytools.getJPGImage(videv)
# close (high resolution) camera device
ftrobopytools.camClose(videv, 0)
# restore resolution for liveStreaming
videv = ftrobopytools.camInit(fps, width, height, 0, 0)
# save jpeg to file and increment picture count index
try:
with open(fname_prefix+'IDX','r') as f:
pict_number = int(f.read())
except:
pict_number = 0
with open(fname_prefix + '%04i' % pict_number +'.JPG', 'wb') as f:
f.write(jpg)
with open(fname_prefix+'IDX','w') as f:
f.write(str(pict_number + 1))
# ready for the next picture
hist_contrast = [0]
hist_counter = [0]
state = 0
except ftrobopytools as error:
print(error)
finally:
# close camera device
ftrobopytools.camClose(videv, 0)
if displayLiveStream:
# close Standard Display Library
ftrobopytools.sdlClose()
# restore TXT display
with open('/dev/fb0', 'wb') as f:
f.write(framebuffer_backup)
|
beernarrd/gramps
|
gramps/plugins/export/exportvcard.py
|
Python
|
gpl-2.0
| 12,904
| 0.00279
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004 Martin Hawlisch
# Copyright (C) 2005-2008 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export Persons to vCard (RFC 2426)."
#-------------------------------------------------------------------------
#
# Standard P
|
ython Modules
#
#-------------------------------------------------------------------------
import sys
from textwrap import TextWrapper
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
import collections
log = logging.getLogger(".ExportVCard")
#----------------------------------------------
|
---------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.const import PROGRAM_NAME
from gramps.version import VERSION
from gramps.gen.lib import Date, Person
from gramps.gen.lib.urltype import UrlType
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.lib.eventtype import EventType
from gramps.gen.display.name import displayer as _nd
from gramps.gen.plug.utils import OpenFileOrStdout
#-------------------------------------------------------------------------
#
# Support Functions
#
#-------------------------------------------------------------------------
def exportData(database, filename, user, option_box=None):
"""Function called by Gramps to export data on persons in VCard format."""
cardw = VCardWriter(database, filename, option_box, user)
try:
cardw.export_data()
except EnvironmentError as msg:
user.notify_error(_("Could not create %s") % filename, str(msg))
return False
except:
# Export shouldn't bring Gramps down.
user.notify_error(_("Could not create %s") % filename)
return False
return True
#-------------------------------------------------------------------------
#
# VCardWriter class
#
#-------------------------------------------------------------------------
class VCardWriter:
"""Class to create a file with data in VCard format."""
LINELENGTH = 73 # unclear if the 75 chars of spec includes \r\n.
ESCAPE_CHAR = '\\'
TOBE_ESCAPED = ['\\', ',', ';'] # order is important
LINE_CONTINUATION = [' ', '\t']
@staticmethod
def esc(data):
"""Escape the special chars of the VCard protocol."""
if isinstance(data, str):
for char in VCardWriter.TOBE_ESCAPED:
data = data.replace(char, VCardWriter.ESCAPE_CHAR + char)
return data
elif type(data) == type([]):
return list(map(VCardWriter.esc, data))
elif type(data) == type(()):
return tuple(map(VCardWriter.esc, data))
else:
raise TypeError("VCard escaping is not implemented for "
"data type %s." % str(type(data)))
def __init__(self, database, filename, option_box=None, user=None):
self.db = database
self.filename = filename
self.user = user
self.filehandle = None
self.option_box = option_box
if isinstance(self.user.callback, collections.Callable): # callback is really callable
self.update = self.update_real
else:
self.update = self.update_empty
if option_box:
self.option_box.parse_options()
self.db = option_box.get_filtered_database(self.db)
self.txtwrp = TextWrapper(width=self.LINELENGTH,
expand_tabs=False,
replace_whitespace=False,
drop_whitespace=False,
subsequent_indent=self.LINE_CONTINUATION[0])
self.count = 0
self.total = 0
def update_empty(self):
"""Progress can't be reported."""
pass
def update_real(self):
"""Report progress."""
self.count += 1
newval = int(100*self.count/self.total)
if newval != self.oldval:
self.user.callback(newval)
self.oldval = newval
def writeln(self, text):
"""
Write a property of the VCard to file.
Can't cope with nested VCards, section 2.4.2 of RFC 2426.
"""
self.filehandle.write('%s\r\n' % '\r\n'.join(
[line for line in self.txtwrp.wrap(text)]))
def export_data(self):
"""Open the file and loop over everyone too write their VCards."""
with OpenFileOrStdout(self.filename, encoding='utf-8',
errors='strict', newline='') as self.filehandle:
if self.filehandle:
self.count = 0
self.oldval = 0
self.total = self.db.get_number_of_people()
for key in sorted(list(self.db.iter_person_handles())):
self.write_person(key)
self.update()
return True
def write_person(self, person_handle):
"""Create a VCard for the specified person."""
person = self.db.get_person_from_handle(person_handle)
if person:
self.write_header()
prname = person.get_primary_name()
self.write_formatted_name(prname)
self.write_name(prname)
self.write_sortstring(prname)
self.write_nicknames(person, prname)
self.write_gender(person)
self.write_birthdate(person)
self.write_addresses(person)
self.write_urls(person)
self.write_occupation(person)
self.write_footer()
def write_header(self):
"""Write the opening lines of a VCard."""
self.writeln("BEGIN:VCARD")
self.writeln("VERSION:3.0")
self.writeln("PRODID:-//Gramps//NONSGML %s %s//EN" %
(PROGRAM_NAME, VERSION))
def write_footer(self):
"""Write the closing lines of a VCard."""
self.writeln("END:VCARD")
self.writeln("")
def write_formatted_name(self, prname):
"""Write the compulsory FN property of VCard."""
regular_name = prname.get_regular_name().strip()
title = prname.get_title()
if title:
regular_name = "%s %s" % (title, regular_name)
self.writeln("FN:%s" % self.esc(regular_name))
def write_name(self, prname):
"""Write the compulsory N property of a VCard."""
family_name = ''
given_name = ''
additional_names = ''
hon_prefix = ''
suffix = ''
primary_surname = prname.get_primary_surname()
surname_list = prname.get_surname_list()
if not surname_list[0].get_primary():
surname_list.remove(primary_surname)
surname_list.insert(0, primary_surname)
family_name = ','.join(self.esc([("%s %s %s" % (surname.get_prefix(),
surname.get_surname(), surname.get_connector())).strip()
for surname in surname_list]))
call_name = prname.get_call_name()
if call_name:
given_name = self.esc(call_name)
additional_name_list = prname.get_first_name().split()
if call_name in additi
|
priya-pp/Tacker
|
tacker/tests/unit/test_auth.py
|
Python
|
apache-2.0
| 4,418
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_middleware import request_id
import webob
from tacker import auth
from tacker.tests import base
class TackerKeystoneContextTestCase(base.BaseTestCase):
def setUp(self):
super(TackerKeystoneContextTestCase, self).setUp()
@webob.dec.wsgify
def fake_app(req):
self.context = req.environ['tacker.context']
return webob.Response()
self.context = None
self.middleware = auth.TackerKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
def test_no_user_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_with_user_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
self.assertEqual(self.context.user, 'testuserid')
def test_with_tenant_id(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'test_user_id'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.tenant_id, 'testtenantid')
self.assertEqual(self.context.tenant, 'testtenantid')
def test_roles_no_admin(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
|
self.request.headers['X_ROLES'] = 'role1, role2 , role3,role4,role5
|
'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5'])
self.assertEqual(self.context.is_admin, False)
def test_roles_with_admin(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_ROLES'] = ('role1, role2 , role3,role4,role5,'
'AdMiN')
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.roles, ['role1', 'role2', 'role3',
'role4', 'role5', 'AdMiN'])
self.assertEqual(self.context.is_admin, True)
def test_with_user_tenant_name(self):
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_PROJECT_NAME'] = 'testtenantname'
self.request.headers['X_USER_NAME'] = 'testusername'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
self.assertEqual(self.context.user_name, 'testusername')
self.assertEqual(self.context.tenant_id, 'testtenantid')
self.assertEqual(self.context.tenant_name, 'testtenantname')
def test_request_id_extracted_from_env(self):
req_id = 'dummy-request-id'
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.environ[request_id.ENV_REQUEST_ID] = req_id
self.request.get_response(self.middleware)
self.assertEqual(req_id, self.context.request_id)
|
sonofeft/DigiPlot
|
digiplot/sample_img.py
|
Python
|
gpl-3.0
| 72,038
| 0.000028
|
#!/usr/bin/env python
# -*- coding: ascii -*-
from __future__ import absolute_import
from __future__ import print_function
import sys
import base64
# use a base64 image as default/test image
TEST_IMAGE = """iVBORw0KGgoAAAANSUhEUgAAA8AAAALQCAYAAABfdxm0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz
AAASdAAAEnQB3mYfeAAAIABJREFUeJzs3XdUVFf3P/73OfTeEWwgNsTesZcnElusT2JDij1qTDQa
NfGJIaaoMbboJ2qMiIomlhRjiy1iNAajYoktgrEkoKh0lDbs3x/+5n4ZZ4YmzCDs11qshfeUu8+Z
OyN77r3nCiIiMMYYY4wxxhhjlZw0dgCMMcYYY4wxxpghcALMGGOMMcYYY6xK4ASYMcYYY4wxxliV
wAkwY4wxxhhjjLEqgRNgxhhjjDHGGGNVAifAjDHGGGOMMcaqBE6AGWOMMcYYY4xVCZwAM8YYY4wx
xhirEjgBZowxxhhjjDFWJXACzBhjjDHGGGOsSuAEmDHGGGOMMcZYlcAJMGOMMcYYY4yxKoETYMYY
Y4wxxhhjVQInwIwxxhhjjDHGqgROgBljjDHGGGOMVQmcADPGGGOMMcYYqxI4AWaMsSoiKioKUkr4
+PiUab8RERGQUqJnz55Gj+v27duQUsLExOS5+2LG4+3tDSkljh8/buxQivS8xz9jjDHD4gSYMcYq
uNDQUEgpNX7Mzc3h6uqK+vXrY/Dgwfj0009x69YtY4eqJSIiAmFhYbh48aKxQ6nwjh49ilGjRqFu
3bqwtraGra0t6tWrh+7du2Pu3Ln4+eefkZeXZ+wwDUIIASGEQffZvXt3rfeZiYkJnJyc0L59eyxY
sADJyckGi+eDDz5AWFgY0tLSDLZPxhirCkyNHQBjjLHiMTc3h7OzMwCAiJCWlobk5GTcvHkTP/74
I+bNm4f//ve/+L//+z+4uLhotbe2toavry9q1qxZpnE5ODjA19cXXl5eWmUbN27E8ePHUadOHTRr
1kxn+7KMy8zMDL6+vgZPnp5Hfn4+xo8fj/DwcCVuU1NT2NjY4M6dO/j7779x/PhxLF68GDExMXrn
kT0fddJtaWkJBwcHAIBKpcLDhw9x5swZ/PHHH1i3bh2OHj2K+vXrl3s8H374IYQQCA0Nhb29fbnv
jzHGqgo+A8wYYy+Ijh07Ij4+HvHx8UhISEBmZiaSk5Oxf/9+DB8+HFJK7NixAy1atEB8fLxW+7Zt
2+LKlSs4ePBgmcY1aNAgXLlyBeHh4aVqX5ZxVa9eHVeuXMHly5efuy9DWbRokZL8Tp48GVevXkV2
djYePHiAJ0+e4I8//kBYWBi8vb2NHWqVMGzYMOV9dv/+faSmpmLZsmWwtLREfHw8hg8fbuwQGWOM
PQdOgBlj7AVmb2+PgIAAbN26FXv37lX+SP/vf/9r7NBYMa1atQpCCEydOhVffPEFGjRooJSZmJig
VatW+N///oe4uDj4+fkZMdKqydbWFtOmTcO7774LIsL58+cRHR1t7LAYY4yVEifAjDFWSQQEBGDJ
kiUgIkRHR2Pv3r0a5cVZbCoiIgL+/v6wtbWFi4sLevbsqfSjb2EiXYsAqbdFRUWBiBASEqJxb2XB
GPTF9WwbfT9jxoxR2hS2CJa6vw8//BD5+flYvnw5mjdvDhsbG7i4uOCVV17B2bNnC53jkydPol+/
fnBxcYGtrS1atGiBFStWaIzxww8/LLSPgh4+fIiEhAQAQL9+/Yqsb2qqfedSTEwM5syZgy5dusDL
ywuWlpZwdXVFjx498PXXXyM/P19nX2FhYRrzt3r1arRs2RJ2dnaoXr06QkJC8O+//yr1Y2NjERwc
jFq1asHKygpNmzbF+vXrdfb97Gv6008/oUePHnB2doadnR06duyIbdu2FTlefXJzc7Fq1Sp07doV
Li4usLS0hLe3N8aOHYtr166Vut/CjBgxQvm9qOPkWd999x169+4Nd3d3WFpaolatWggMDERMTIxW
XfVxJIQAESnvO13HO2OMsZLje4AZY6wSGT9+PBYsWIDExERs3bq1WElVwbZff/01hBCQUsLCwgLH
jx9HVFQUli1bVqKFiaysrODh4YGkpCTk5ubC3t4eVlZWSrm7u3uRfTg6OsLDw0Nv+cOHD6FSqYoV
D/D/7vHMy8tD3759cfDgQZibm8PCwgIpKSnYu3cvjh49iqNHj6J9+/Za7Tdt2oQxY8aAiJT4rl69
ihkzZuD48eOwt7d/rnuPCyabJREQEICkpCQAT++ntrGxQXJysvLa/fDDD/jxxx8hpfZ33up4R4wY
gW+//RYWFhYwMzPD/fv3sWnTJpw4cQLR0dG4ceMG+vbti9TUVDg4OCA3NxdXrlzBhAkTkJqairff
fltvfCtWrMD06dMhpYSDgwOysrIQHR2N33//HadOncLKlStLNN579+6hd+/euHjxonKs2tjY4O7d
uwgPD8e2bdsQGRmJwYMHl6jfotSoUUP5vbgLU6m/GNm8eTOEEDAxMYGdnR3i4+OxdetWfPPNN1i1
ahUmTZqktFEf9/fu3YMQAq6urhpf6Dg6OpbdoBhjrCoixhhjFVpISAgJIahHjx7Fqj9y5EgSQlCt
WrU0th87doyEEFSnTh2tNhs2bCAhBEkpad68eZSWlkZERA8ePKDx48eTubk52djYkJSSoqKiNNpu
3LhRb3zdu3cnKSVFRETojbewuPTZt28fmZiYkJSStm/frmy/deuWMo5nqefRycmJXF1daefOnZSb
m0tERJcuXaJmzZqRlJLat2+v1fbatWtkYWFBUkp65ZVX6M6dO0RElJWVRatWrSIzMzNycnIiKSWF
hYUVexxERN7e3sr4L126VKK2RESjRo2i7du30/3795Vtjx8/psjISKpevTpJKWnJkiVa7T744AMS
QpCjoyPZ29vTtm3blPk4ceIEeXp6kpSSJk2aRN7e3jRw4EC6desWERGlp6fT5MmTSQhB1tbWlJSU
pNG3+jW1sbEhc3NzCg0NpcTERCIiSklJoVmzZimv07Zt23TOia5jLTc3l9q2bUtSSgoICKDo6GjK
y8sjIqJ79+7RjBkzSAhBtra2dPPmzRLNo/pYDQ0N1Vl+/fp1JeYvv/xS2V7Y8f/pp5+SEIJMTEzo
k08+oYyMDCIiio+Pp2HDhpEQgkxNTenXX3/Vaqvel/pYY4wxVjY4AWaMsQqupAmw+o9uKaWSHBAV
nmjWqVNHSXZ06devn9KnsRPga9eukYODA0kpae7cuRplxUmApZT022+/aZWfPXtWKb97965GWVBQ
EAkhqHnz5kqSWNBnn32mtC1pAhweHk5SShJCkBCCWrduTW+++SZt2bKFYmNjS9TXs06cOEFCCPLx
8dEqUyfAUkravHmzVvnmzZuVmHx9fUmlUmmU5+fnU/369XW2V7+mUkrq3bu3ztjUr0eDBg20yvQl
wF999RUJIah79+5a8ahNmjSJpJT0xhtv6CzXp6gE+O2331bGdO7cOWW7vuM/IyNDOU7fe+89rf5U
KhV16dKFpJTUrVs3rXL1vm7fvl2icTDGGCsc3wPMGGOVjPpRSQCUS2MLc+7cOeUZwrNmzdJZZ/bs
2WUS2/NKS0vDgAEDkJ6ejr59++KTTz4pcR9dunRBhw4dtLa3atVKeRTTn3/+qWwnIvz4448QQuCt
t97SeR/ulClTYGNjU+JYgKf3fH799ddwd3eHEAIxMTFYuXIlRo8ejfr168PHxweffPIJHj9+XOK+
O3XqBEdHR9y6dQv37t3TWadmzZoIDAzU2v7SSy8BeHqZ9MyZM7UuoRZCoEePHgA05+tZc+fO1bn9
vffeA/D03uLiPic6IiICQghMmzZN5yXdA
|
DBq1CgQEQ4dOlSsPguTn5+P2NhYzJ07FytWrIAQAh07
dkTLli2LbHvo0CGkpaXB3Nxc5/tKSon//e9/ICL8+uuvSExMfO54G
|
WOMFY0TYMYYq2To/79HtbjU
C/F4eHjoXSDL398fZmZmzx3b8yAiDB8+HDdu3ICvry+2bt1a4j6EEGjbtq3ecvV9nsnJycq2mzdv
Kvd8durUSWc7KysrtG7dusTxqIWEhOD27dvYsWMHJk2ahFatWsHCwgJCCNy+fRvz5s1D27Zt8eDB
A53td+zYgcGDB8PLywvW1tYaiyalpKQAgM5HYwHQu7J0wfu0mzRporNOtWrVQEQa81WQmZkZOnbs
qLOsXr168PT0BPD0S5iiqFQq/PHHHwCACRMmwNPTU+fPkCFDAAB3794tss9nERE2btyozJ2pqSka
NGiARYsWIT8/H76+vsVevEs9pubNmyvPFX5W165dlft7izMHjDHGnh8vgsUYY5VMwWSk4NlgfR4+
fAgASjKii5mZGVxcXHD//v3nD7CUZs+ejQMHDsDZ2Rm7d++GnZ1dqfoprJ2lpSWAp6sMq6nnByh8
jqpXr16qeNQsLCwwZMgQJYHLysrCkSNHsHDhQvz222+4du0aJk2ahF27diltVCoVXn31Vfzwww/K
glYWFhZwc3NTEqvExEQQETIzM3XuV9+YCp5h1VdHvY+C81WQq6urzjPmajVq1MC9e/f0JvYFJSUl
IScnB0KIIq9sEEIgKyuryD51sbKyUhJWKSXs7OxQv3599O/fH0FBQcoxUhT1mAounvUsCwsLuLq6
IjExsVhzwBhj7PlxAswYY5WM+nLSmjVr6nwc0Ito69atWLJkCUxNTfHNN9+gbt26xg6p3FlaWqJf
v37o168fXn75ZRw6dAg//vgjkpOT4eTkBABYt24dfvjhB9jY2GDRokUYNGiQViJeu3Zt/PvvvyW+
MqCiKfg4p/Pnz6Np06Zlvg8hBIYNG4YNGzaUWZ+lTcQZY4yVD74EmjHGKpHc3FwcPXoUQgh06dKl
WG1cXV0BQHkerb5+Hz16VCYxltTZs2cxfvx4CCGwePFi5d5UQ1HPD1D4HBVW9rxCQ0MBPL1ENzY2
Vtm+c+dOCCHw/vvvY/LkyVrJb35+vsYZbEN7+PAh8vLy9JarL8t2c3Mrsi8XFxflC53bt2+XTYDl
SD2mO3fu6K2TnZ2tvK+KMweMMcaeHyfAjDFWiaxbt05ZTGfUqFHFaqNe0OfevXv4+++/ddaJjo7W
e5lrYdSX0Zb27GNiYiIGDRqErKwsBAUF4a233ipVP8/Dx8cH9vb2AIATJ07orJOVlYWzZ8+WWwwF
F9gyNzdXfv/nn38AAC1atNDZ7sSJE0Y9A5mbm4tTp07pLIuLi1MS4FatWhXZl6mpKdq0aQMA2L9/
f9kFWU7UY7px44beL0eioqKULwienQP1Je0v+pl7xhiraDgBZoyxSuLnn3/GO++8o6xU26dPn2K1
a9myJby8vAAAS5Ys0Vln0aJFpYpJnTiqF2IqidzcXAwZMgTx8fFo37491q5dW6oYnpcQAgMHDgQR
YcWKFVCpVFp1Vq9ejYyMjBL3nZubi+PHjxdZLzIyEsDT+1MbNmyobFffq3rp0iWtNiqVCvPmzStx
TGXt008/1bldvYJ3gwYN0KxZs2L1FRISoixUpWvMBZXmmCtLAQEBsLe3R25uLj777DOt8vz8fCxY
sADA08WwCi46Bjzfe4cxxph+nAAzxtgLLC0tDQcPHsSIESPQr18/ZGVloXbt2tixY0ex+xBCKI9j
WbNmDd5//32kp6cDeHoJ64QJE3Do0CFYW1uXOL7GjRuDiPDdd98pKykX1xtvvIHffvsN1atXx3ff
fadx5tPQ5s6dC3Nzc1y6dAlDhgxRLmvNzs7G6tWrMXfuXOW+3JLIyclB9+7d0bFjR3z55Ze4ceOG
UpaXl4ezZ8/i1Vdfxfbt2yGEwPjx4zUWYerVqxeICAsWLMDu3buV+2SvXbuG/v3748yZM6V+PFNZ
sLa2xpEjRzB27FhlkafU1FTMnj0b4eHhEELggw8+KHZ/Y8eOhb+/P548eYIePXpg/fr1yrEKPL0M
PSIiAl27dsXKlSvLejglYm1tjXfffRdEhJUrV+KTTz5RFiKLj4/H8OHDcfLkSZiYmOCjjz7Sat+4
cWMAwKZNmzTuf2aMMfZ8OAFmjLEXxMmTJ5VHvXh4eMDGxgaOjo7o3bs3tm/fDgAYNmwYzp49Cw8P
jxL1PWbMGIwZMwYA8NFHH8HZ2RkuLi6oVq0avv76ayxZskS5F9bCwqLY/Y4ePRrm5uY4ceIEXF1d
UbNmTdSpU6dY9yf//PPPAJ6uat2yZUu9j72ZPn16seMp7eWkvr6+WLNmDYQQ2LNnD7y9veHi4gJ7
e3tMmzYNAwcOxCuvvAKgZPOjftROdHQ0pkyZgoYNGyorA1tYWKBt27bYtWsXhBAYMmSI1pn4mTNn
ol69ekhLS8OgQYNgZWUFR0dH+Pn54ciRI1izZo3GPcyG5ubmhs8++wzh4eHw8PCAi4sLXFxc8Nln
n0EIgalTp2LYsGHF7s/U1BS7d+9G586dkZycjAkTJsDJyQmurq6wtbVFjRo1EBoaipMnTyqXEJdE
WV9uPHPmTAQHB4OIMG/ePDg6OsLFxQW1atXCzp07YWJiglWrVul8vNa4ceNARFi2bBlsbW3h7e2N
OnXq4J133inTGBljrKrhBJgxxl4AQgjk5eUhMTERiYmJSEpKgpWVFerWrYuBAwfi448/xs2bN7F1
69ZCH30khNCbGKxfvx4bNmxAu3btlLOMPXr0wJ49ezB16lTlDK6jo2Ox+23YsCEOHz6M3r17w9HR
Effv38edO3e0nkmrr70QAk+ePFHGretH15llfWMsTVKkFhISguPHjytjycnJgZ+fH5YtW4YdO3Yg
NTUVgO750cfKygoJCQnYsGEDgoOD0axZM1hbWyMtLQ22trbw9fVFUFAQDhw4gB07dmidBXdycsLv
v/+O119/HbVq1YIQAtbW1hgyZAiOHz+OoKCgQsdd2PFQsE5R5YXVmTZtGn766Sd0794dRAQrKyt0
7NgRkZGRWLFiRYn36+rqiqioKERGRqJfv35wd3dHRkYGpJRo1KgRgoODsX37dsyZM6fQuEszlpK2
k1IiPDwcO3fuxMsvvwwnJydkZmaievXqGDVqFE6fPo2JEyfq7DMkJATr169H+/btYWZmhn/++Qd3
7twx2mJ0jDFWWQji1RUYY4wV4ebNm6hXrx4sLCyQnp5e6LNdqyovLy/8888/+OWXX9C1a1djh2NU
UVFR6NGjB7y9vXHz5k1jh8MYY4wp+AwwY4yxIqkvve3WrRsnvzps27YNd+/ehb29Pdq3b2/scBhj
jDGmByfAjDHGADy9D3jXrl1ISkpStt26dQuTJ0/GV199BSEE3n77bSNGaFyffvopVq1ahX/++Ue5
VzQlJQUrVqzAuHHjIITAlClTSnQPMGOMMcYMi7/GZ4wxBgA4dOgQNm7cCODpc2ellMoKu+qVonv1
6mXECI3rypUriIyMxLRp02Bubg4bGxukpKSAiCCEQK9evfD+++8bO0zGGGOMFYITYMYYYwCePgP4
xx9/RExMDO7fv4/Hjx+jRo0a6NixIyZPnoxu3boZO0SjmjJlChwcHHDixAkkJCQgJSUFLi4uaNas
GUaPHo3Ro0dDSr6wSq20C0oxxhhj5YkXwWKMMcYYY4wxViXwV9WMMcYYY4wxxqoEToAZY4wxxhhj
jFUJnAAzxhhjjDHGGKsSOAFmjDEdOnfuDCkltm7dqrM8Ojoa/fr1g6urK0xMTCClxCeffGLgKCsv
Dw8PSClx+vRpY4dS5rKzsyGlhImJCRITEw3engH+/v6QUmL79u3GDuWFsWvXLkgp0a5dO62yJk2a
QEqJffv2aWw/e/YspJRwd3c3VJiMMVYkToAZYwYTGhoKKSWaNGlS7DarV6+GlBLW1tZIS0srx+g0
FbaC7fXr19GzZ08cOHAAqampcHNzg4eHB2xtbcs1JnVSXvDHxMQEjo6OaN26NebMmYP4+PhyjcFQ
9M1/XFwcwsLCsHr1aiNEVf7Onj2LsLAwREZGGmX
|
enzzzy/netmiko
|
netmiko/cisco/cisco_wlc_ssh.py
|
Python
|
mit
| 3,082
| 0.004218
|
from __future__ import print_function
from __future__ import unicode_literals
import time
from netmiko.ssh_connection import BaseSSHConnection
from netmiko.netmiko_globals import MAX_BUFFER
from netmiko.ssh_exception import NetMikoTimeoutException, NetMikoAuthenticationException
import paramiko
import socket
class CiscoWlcSSH(BaseSSHConnection):
def establish_connection(self, sleep_time=3, verbose=True, timeout=8, use_keys=False):
'''
Establish SSH connection to the network device
Timeout will generate a NetmikoTimeoutException
Authentication failure will generate a NetmikoAuthenticationException
WLC presents with the following on login
login as: user
(Cisco Controller)
User: user
Password:****
Manually send username/password to work around this.
'''
# Cr
|
eate instance of SSHClient object
self.remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure appropriate for your environment)
self.remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection
if verbose:
print("SSH connection established to {0}:{1}".format(self.ip, self.port))
try:
self.remote_conn
|
_pre.connect(hostname=self.ip, port=self.port,
username=self.username, password=self.password,
look_for_keys=use_keys, allow_agent=False,
timeout=timeout)
except socket.error as e:
msg = "Connection to device timed-out: {device_type} {ip}:{port}".format(
device_type=self.device_type, ip=self.ip, port=self.port)
raise NetMikoTimeoutException(msg)
except paramiko.ssh_exception.AuthenticationException as e:
msg = "Authentication failure: unable to connect {device_type} {ip}:{port}".format(
device_type=self.device_type, ip=self.ip, port=self.port)
msg += '\n' + str(e)
raise NetMikoAuthenticationException(msg)
# Use invoke_shell to establish an 'interactive session'
self.remote_conn = self.remote_conn_pre.invoke_shell()
# Handle WLCs extra
self.remote_conn.send(self.username + '\n')
time.sleep(.2)
self.remote_conn.send(self.password + '\n')
if verbose:
print("Interactive SSH session established")
# Strip the initial router prompt
time.sleep(sleep_time)
return self.remote_conn.recv(MAX_BUFFER)
def session_preparation(self):
'''
Prepare the session after the connection has been established
Cisco WLC uses "config paging disable" to disable paging
'''
self.disable_paging(command="config paging disable\n")
self.set_base_prompt()
def cleanup(self):
'''
Reset WLC back to normal paging
'''
self.send_command("config paging enable\n")
|
PuZheng/lejian-backend
|
lejian/apis/tag.py
|
Python
|
mit
| 229
| 0
|
# -*- coding: UTF-8 -*-
from .model_wrapper import ModelWrapper
class TagWrapper(ModelWrapper)
|
:
@property
def spu(self):
return self.sku.spu
@property
def vendor(self):
return self.sp
|
u.vendor
|
donpiekarz/Stocker
|
stocker/SEP/processor.py
|
Python
|
gpl-3.0
| 3,868
| 0.002844
|
import csv
import decimal
import os
import datetime
from stocker.common.events import EventStreamNew, EventStockOpen, EventStockClose
from stocker.common.orders import OrderBuy, OrderSell
from stocker.common.utils import Stream
class CompanyProcessor(object):
def __init__(self, dirname, company_id):
self.dirname = os.path.joi
|
n(dirname, company_id)
self.company_id = company_id
def get_dates(self):
files = [os.path.splitext(fi)[0] for fi in os.walk(self.dirname).next()[2]]
return files
def get_row(self, date):
filename = os.path.join(self.dirname, date) + ".csv"
try:
with open(filename, 'r') as f:
for row in reversed(list(csv.reader(f, delimiter=';'))):
try:
desc = row[5]
|
if desc.startswith('TRANSAKCJA'):
yield (row, self.company_id)
except IndexError:
pass
except IOError as e:
return
class Processor(object):
def build_stream(self, dirname_in, filename_out):
self.stream = Stream()
self.stream.begin(filename_out)
self.__process_companies(dirname_in)
self.stream.end()
def __process_companies(self, dirname):
companies = []
for company in os.walk(dirname).next()[1]:
companies.append(CompanyProcessor(dirname, company))
dates_set = set()
for company in companies:
dates_set.update(company.get_dates())
dates_ordered = sorted(dates_set, key=lambda date: datetime.datetime.strptime(date, "%Y-%m-%d"))
for date in dates_ordered:
self.__process_date(date, companies)
def __process_date(self, date, companies):
rows = []
correct_generators = []
correct_day = False
generators = [company.get_row(date) for company in companies]
for generator in generators:
try:
row, company_id = generator.next()
row = (company_id, row, generator)
rows.append(row)
correct_generators.append(generator)
except StopIteration as e:
pass
if correct_generators:
# correct day (have transactions)
correct_day = True
if correct_day:
self.stream.add_event(EventStockOpen(
datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(9, 0))))
# main loop, multiplexing rows
while correct_generators:
row_data = min(rows, key=lambda row: datetime.datetime.strptime(row[1][0], "%H:%M:%S"))
rows.remove(row_data)
company_id, row, generator = row_data
self.__process_row(row, date, company_id)
try:
row, company_id = generator.next()
row = (company_id, row, generator)
rows.append(row)
except StopIteration as e:
correct_generators.remove(generator)
if correct_day:
self.stream.add_event(EventStockClose(
datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(18, 0))))
def __process_row(self, row, date, company_id):
amount = int(row[3])
limit_price = decimal.Decimal(row[1].replace(',', '.'))
timestamp = datetime.datetime.strptime("%s %s" % (date, row[0]), "%Y-%m-%d %H:%M:%S")
expiration_date = timestamp + datetime.timedelta(days=1)
self.stream.add_event(
EventStreamNew(timestamp, OrderBuy(company_id, amount, limit_price, expiration_date)))
self.stream.add_event(
EventStreamNew(timestamp, OrderSell(company_id, amount, limit_price, expiration_date)))
|
jhogsett/linkit
|
python/gaydar4.py
|
Python
|
mit
| 3,902
| 0.027166
|
#!/usr/bin/python
import serial
import time
import random
import sys
s = None
num_leds = 93
ticks = 96
sleep_time = 0.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s, sleep_time
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
choose_colors()
|
command(":::pau:clr:pau")
command("6:zon:red:8:cpy")
|
command("5:zon:org:6:cpy")
command("4:zon:yel:4:cpy")
command("3:zon:grn:3:cpy")
command("2:zon:blu:2:cpy")
command("1:zon:pur")
command("flu")
if len(sys.argv) > 1:
command(sys.argv[1])
if len(sys.argv) > 2:
sleep_time = float(sys.argv[2])
num_colors = 12
colors = [ "red", "org", "yel", "lgr", "grn", "sea", "cyn", "lbl", "blu", "pur", "mag", "pnk", "blk", "rnd" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
chosen_colors = [0,1,2,3,4,5]
def random_color():
r = random.randrange(0, num_colors)
return colors[r]
def choose_colors():
global chosen_colors
for i in range(0, 6):
chosen_colors[i] = random_color()
def shift_colors():
global chosen_colors
for i in xrange(5, 0, -1):
chosen_colors[i] = chosen_colors[i-1]
def clear_colors():
for j in range(0,6):
chosen_colors[j] = "black"
def place_color(zone, color):
command(str(zone) + ":zone:" + color + ":flood")
def place_colors():
place_color(6, chosen_colors[0])
place_color(5, chosen_colors[1])
place_color(4, chosen_colors[2])
place_color(3, chosen_colors[3])
place_color(2, chosen_colors[4])
place_color(1, chosen_colors[5])
def display():
place_colors()
command("flush")
global idx
idx = -1
def loop():
global idx
idx = idx + 1
do_flush = False
if (idx % 3 == 0):
command("6:zon:rot")
do_flush = True
if (idx % 4 == 0):
command("5:zon:rot")
do_flush = True
if (idx % 6 == 0):
command("4:zon:rot")
do_flush = True
if (idx % 8 == 0):
command("3:zon:rot")
do_flush = True
if (idx % 12 == 0):
command("2:zon:rot")
do_flush = True
if do_flush == True:
command("flu")
time.sleep(sleep_time)
if __name__ == '__main__':
setup()
while True:
loop()
|
google/grumpy
|
third_party/stdlib/json/encoder.py
|
Python
|
apache-2.0
| 16,692
| 0.001737
|
"""Implementation of JSONEncoder
"""
import re
# try:
# from _json import encode_basestring_ascii as c_encode_basestring_ascii
# except ImportError:
# c_encode_basestring_ascii = None
c_encode_basestring_ascii = None
# try:
# from _json import make_encoder as c_make_encoder
# except ImportError:
# c_make_encoder = None
c_make_encoder = None
def x4(i):
return ("000%x" % i)[-4:]
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
# ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
# ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# ESCAPE_DCT.setdefault(chr(i), '\\u' + x4(i))
ESCAPE_DCT[chr(i)] = '\\u' + x4(i)
INFINITY = float('inf')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
|
except Key
|
Error:
n = ord(s)
if n < 0x10000:
# return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
return '\\u' + x4(n)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
# return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
#return '\\u%04x\\u%04x' % (s1, s2)
return '\\u' + x4(s1) + '\\u' + x4(s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If *ensure_ascii* is true (the default), all non-ASCII
characters in the output are escaped with \uXXXX sequences,
and the results are str instances consisting of ASCII
characters only. If ensure_ascii is False, a result may be a
unicode instance. This usually happens if the input contains
unicode strings or the *encoding* parameter is used.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation. Since the default
item separator is ', ', the output might include trailing
whitespace when indent is specified. You can use
separators=(',', ': ') to avoid this.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
|
fredrik-johansson/mpmath
|
mpmath/tests/test_quad.py
|
Python
|
bsd-3-clause
| 3,893
| 0.008477
|
import pytest
from mpmath import *
def ae(a, b):
return abs(a-b) < 10**(-mp.dps+5)
def test_basic_integrals():
for prec in [15, 30, 100]:
mp.dps = prec
assert ae(quadts(lambda x: x**3 - 3*x**2, [-2, 4]), -12)
assert ae(quadgl(lambda x: x**3 - 3*x**2, [-2, 4]), -12)
assert ae(quadts(sin, [0, pi]), 2)
assert ae(quadts(sin, [0, 2*pi]), 0)
assert ae(quadts(exp, [-inf, -1]), 1/e)
assert ae(quadts(lambda x: exp(-x), [0, inf]), 1)
assert ae(quadts(lambda x: exp(-x*x), [-inf, inf]), sqrt(pi))
assert ae(quadts(lambda x: 1/(1+x*x), [-1, 1]), pi/2)
assert ae(quadts(lambda x: 1/(1+x*x), [-inf, inf]), pi)
assert ae(quadts(lambda x: 2*sqrt(1-x*x), [-1, 1]), pi)
mp.dps = 15
def test_multiple_intervals():
y,err = quad(lambda x: sign(x), [-0.5, 0.9, 1], maxdegree=2, error=True)
assert abs(y-0.5) < 2*err
def test_quad_symmetry():
assert quadts(sin, [-1, 1]) == 0
assert quadgl(sin, [-1, 1]) == 0
def test_quad_infinite_mirror():
# Check mirrored infinite interval
assert ae(quad(lambda x: exp(-x*x), [inf,-inf]), -sqrt(pi))
assert ae(quad(lambda x: exp(x), [0,-inf]), -1)
def test_quadgl_linear():
assert quadgl(lambda x: x, [0, 1], maxdegree=1).ae(0.5)
def test_complex_integration():
assert quadts(lambda x: x, [0, 1+j]).ae(j)
def test_quadosc():
mp.dps = 15
assert quadosc(lambda x: sin(x)/x, [0, inf], period=2*pi).ae(pi/2)
# Double integrals
def test_double_trivial():
assert ae(quadts(lambda x, y: x, [0, 1], [0, 1]), 0.5)
assert ae(quadts(lambda x, y: x, [-1, 1], [-1, 1]), 0.0)
def test_double_1():
assert ae(quadts(lambda x, y: cos(x+y/2), [-pi/2, pi/2], [0, pi]), 4)
def test_double_2():
assert ae(quadts(lambda x, y: (x-1)/((1-x*y)*log(x*y)), [0, 1], [0, 1]), euler)
def test_double_3():
assert ae(quadts(lambda x, y: 1/sqrt(1+x*x+y*y), [-1, 1], [-1, 1]), 4*log(2+sqrt(3))-2*pi/3)
def test_double_4():
assert ae(quadts(lambda x, y: 1/(1-x*x * y*y), [0, 1], [0, 1]), pi**2 / 8)
def test_double_5():
assert ae(quadts(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]), pi**2 / 6)
def test_double_6():
assert ae(quadts(lambda x, y: exp(-(x+y)), [0, inf], [0, inf]), 1)
def test_double_7():
assert ae(quadts(lambda x, y: exp(-x*x-y*y), [-inf, inf], [-inf, inf]), pi)
# Test integrals from "Experimentation in Mathematics" by Borwein,
# Bailey & Girgensohn
def test_expmath_integrals():
for prec in [15, 30, 50]:
mp.dps = prec
assert ae(quadts(lambda x: x/sinh(x), [0, inf]), pi**2 / 4)
assert ae(quadts(lambda x: log(x)**2 / (1+x**2), [0, inf]), pi**3 / 8)
assert ae(quadts(lambda x: (1+x**2)/(1+x**4), [0, inf]), pi/sqrt(2))
assert ae(quadts(lambda x: log(x)/cosh(x)**2, [0, inf]), log(pi)-2*log(2)-euler)
assert ae(quadts(lambda x: log(1+x**3)/(1-x+x**2), [0, inf]), 2*pi*log(3)/sqrt(3))
assert ae(quadts(lambda x: log(x)**2 / (x**2+x+1), [0, 1]), 8*pi**3 / (81*sqrt(3)))
assert ae(quadts(lambda x: log(cos(x))**2, [0, pi/2]), pi/2 * (log(2)**2+pi**2/12))
assert ae(quadts(lambda x: x**2 / sin(x)**2, [0, pi/2]), pi*log(2))
assert ae(quadts(lambda x: x**2/sqrt(exp(x)-1), [0, inf]), 4*pi*(log(2)**2 + pi**2/12))
assert ae
|
(quadts(lambda x: x*exp(-x)*sqrt(1-exp(-2*x)), [0, inf]), pi*(1+2*log(2))/8)
mp.dps = 15
# Do not reach full accuracy
@pytest.mark.xfail
def test_expmath_fail():
assert ae(quadts(lambda x: sqrt(tan(x)), [0, pi/2]),
|
pi*sqrt(2)/2)
assert ae(quadts(lambda x: atan(x)/(x*sqrt(1-x**2)), [0, 1]), pi*log(1+sqrt(2))/2)
assert ae(quadts(lambda x: log(1+x**2)/x**2, [0, 1]), pi/2-log(2))
assert ae(quadts(lambda x: x**2/((1+x**4)*sqrt(1-x**4)), [0, 1]), pi/8)
|
mfherbst/spack
|
var/spack/repos/builtin/packages/slepc/package.py
|
Python
|
lgpl-2.1
| 4,929
| 0.002435
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import sys
from spack import *
class Slepc(Package):
"""Scalable Library for Eigenvalue Problem Computations."""
homepage = "http://www.grycap.upv.es/slepc"
url = "http://slepc.upv.es/download/distrib/slepc-3.6.2.tar.gz"
git = "https://bitbucket.org/slepc/slepc.git"
version('develop', branch='master')
version('3.9.1', 'e174ea7c127d9161eef976b0288f0c56d443a58d6ab2dc8af1e8bd66f156ce17')
version('3.9.0', '1f3930db56b4065aaf214ea758ddff1a70bf19d45544cbdfd19d2787db4bfe0b')
version('3.8.2', '1e7d20d20eb26da307d36017461fe4a55f40e947e232739179dbe6412e22ed13')
version('3.8.0', 'c58ccc4e852d1da01112466c48efa41f0839649f3a265925788237d76cd3d963')
version('3.7.4', '2fb782844e3bc265a8d181c3c3e2632a4ca073111c874c654f1365d33ca2eb8a')
version('3.7.3', '3ef9bcc645a10c1779d56b3500472ceb66df692e389d635087d30e7c46424df9')
version('3.7.1', '670216f263e3074b21e0623c01bc0f562fdc0bffcd7bd42dd5d8edbe73a532c2')
version('3.6.3', '384939d009546db37bc05ed81260c8b5ba451093bf891391d32eb7109ccff876')
version('3.6.2', '2ab4311bed26ccf7771818665991b2ea3a9b15f97e29fd13911ab1293e8e65df')
variant('arpack', default=True, description='Enables Arpack wrappers')
variant('blopex', default=False, description='Enables BLOPEX wrappers')
# NOTE: make sure PETSc and SLEPc use the same python.
depends_on('python@2.6:2.8', type='build')
# Cannot mix release and development versions of SLEPc and PETSc:
depends_on('petsc@develop', when='@develop')
depends_on('petsc@3.9:3.9.99', when='@3.9:3.9.99')
depends_on('petsc@3.8:3.8.99', when='@3.8:3.8.99')
depends_on('petsc@3.7:3.7.7', when='@3.7.1:3.7.4')
depends_on('petsc@3.6.3:3.6.4', when='@3.6.2:3.6.3')
depends_on('arpack-ng~mpi', when='+arpack^petsc~mpi~int64')
depends_on('arpack-ng+mpi', when='+arpack^petsc+mpi~int64')
patch('install_name_371.patch', when='@3.7.1')
# Arpack can not be used with 64bit integers.
conflicts('+arpack', when='^petsc+int64')
resource(name='blopex',
url='http://slepc.upv.es/download/external/blopex-1.1.2.tar
|
.gz',
sha256='0081ee4c4242e635a8113b32f655910ada057c59043f29af4b613508a762f3ac',
destination=join_path('installed-arch-' + sys.platform + '-c-opt',
'e
|
xternalpackages'),
when='+blopex')
def install(self, spec, prefix):
# set SLEPC_DIR for installation
# Note that one should set the current (temporary) directory instead
# its symlink in spack/stage/ !
os.environ['SLEPC_DIR'] = os.getcwd()
options = []
if '+arpack' in spec:
options.extend([
'--with-arpack-dir=%s' % spec['arpack-ng'].prefix.lib,
])
if 'arpack-ng~mpi' in spec:
options.extend([
'--with-arpack-flags=-larpack'
])
else:
options.extend([
'--with-arpack-flags=-lparpack,-larpack'
])
# It isn't possible to install BLOPEX separately and link to it;
# BLOPEX has to be downloaded with SLEPc at configure time
if '+blopex' in spec:
options.append('--download-blopex')
configure('--prefix=%s' % prefix, *options)
make('MAKE_NP=%s' % make_jobs, parallel=False)
if self.run_tests:
make('test', parallel=False)
make('install', parallel=False)
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
# set up SLEPC_DIR for everyone using SLEPc package
spack_env.set('SLEPC_DIR', self.prefix)
|
Eveler/libs
|
__Python__/edv/edv/imagescanner/backends/test/__init__.py
|
Python
|
gpl-3.0
| 751
| 0.007989
|
"""Test backend
$Id$"""
import os
import Image
from imagescanner.backends import base
class ScannerManager(base.ScannerManager):
def _refresh(self):
self._devices = []
scanner = Scanner('test-0', "Pyscan", "Test Device")
self._devices.append(scanner)
c
|
lass Scanner(base.Scanner):
def __init__(self, scanner_id, manufacturer, name):
self.id = scanner_id
self.manufacturer = manufacturer
self.name = name
def __repr__(self):
return "<%s: %s - %s>" % (self.id, self.manufacturer, self.name)
def scan(self, dpi=200):
imgpath = os.path.join(os.path.dirname(__file__), 'data', 'img1.t
|
iff')
return Image.open(imgpath)
def status(self):
pass
|
ubik2/PEGAS-kRPC
|
kRPC/plane_error.py
|
Python
|
mit
| 987
| 0.00304
|
import numpy as np
def plane_error(results, target):
"""
Computes angle between target orbital plane and actually achieved plane.
:param results: Results struct as output by flight_manager (NOT flight_sim_3d).
:param target: Target struct as output by launch_targeting.
:return: Angle between the
|
two orbital planes.
"""
inc = results.powered[results.n-1].orbit.inc
lan = results.powered[results.n-1].orbit.lan
Rx = np.array([[1, 0, 0],
[0, np.cos(np.deg2rad(inc)), -np.sin(np.deg2rad(inc))],
[0, np.sin(np.deg2rad(inc)), np.cos(np.deg2rad(inc))]])
Rz = np.array([[np.cos(np.deg2rad(lan)), -np.sin(np.deg2rad(lan)), 0],
[np.sin(np.deg2rad(lan)), np.cos(np.deg2rad(la
|
n)), 0],
[0, 0, 1]])
reached = np.matmul(Rz, np.matmul(Rx, np.array([0, 0, -1])))
error = np.rad2deg(np.arccos(np.vdot(target.normal, reached)))
return error
|
verma-varsha/zulip
|
zerver/webhooks/github_webhook/tests.py
|
Python
|
apache-2.0
| 22,570
| 0.004386
|
import ujson
from mock import patch, MagicMock
from typing import Dict, Optional, Text
from zerver.models import Message
from zerver.lib.webhooks.git import COMMITS_LIMIT
from zerver.lib.test_classes import WebhookTestCase
class GithubWebhookTest(WebhookTestCase):
STREAM_NAME = 'github'
URL_TEMPLATE = "/api/v1/external/github?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'github_webhook'
EXPECTED_SUBJECT_REPO_EVENTS = u"public-repo"
EXPECTED_SUBJECT_ISSUE_EVENTS = u"public-repo / Issue #2 Spelling error in the README file"
EXPECTED_SUBJECT_PR_EVENTS = u"public-repo / PR #1 Update the README with new information"
EXPECTED_SUBJECT_DEPLOYMENT_EVENTS = u"public-repo / Deployment on production"
EXPECTED_SUBJECT_ORGANIZATION_EVENTS = u"baxterandthehackers organization"
EXPECTED_SUBJECT_BRANCH_EVENTS = u"public-repo / changes"
EXPECTED_SUBJECT_WIKI_EVENTS = u"public-repo / Wiki Pages"
def test_ping_event(self):
# type: () -> None
expected_message = u"GitHub webhook has been successfully configured by TomaszKolek"
self.send_and_test_stream_message('ping', self.EXPECTED_SUBJECT_REPO_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_ping_organization_event(self):
# type: () -> None
expected_message = u"GitHub webhook has been successfully configured by eeshangarg"
self.send_and_test_stream_message('ping_organization', 'zulip-test-org', expected_message, HTTP_X_GITHUB_EVENT='ping')
def test_push_delete_branch(self):
# type: () -> None
expected_message = u"eeshangarg [deleted](https://github.com/eeshangarg/public-repo/compare/2e8cf535fb38...000000000000) the branch feature."
self.send_and_test_stream_message('push_delete_branch', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_local_branch_without_commits(self):
# type: () -> None
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/feature) the branch feature."
self.send_and_test_stream_message('push_local_branch_without_commits', u"public-repo / feature", expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit(self):
# type: () -> None
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_without_username(self):
# type: () -> None
expected_message = u"eeshangarg [pushed](https://github.com/eeshangarg/public-repo/compare/0383613da871...2e8cf535fb38) 1 commit to branch changes. Commits by John Snow (1).\n\n* Update the README ([2e8cf53](https://github.com/eeshangarg/public-repo/commit/2e8cf535fb38a3dab2476cdf856efda904ad4c94))"
self.send_and_test_stream_message('push_1_commit_without_username', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_1_commit_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 1 commit to branch changes.\n\n* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))"
self.send_and_test_stream_message('push_1_commit', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters(self):
# type: () -> None
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by Tomasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others(self):
# type: () -> None
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 6 commits to branch changes. Commits by T
|
omasz (3), Ben (2) and baxterthehacker (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6
|
ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 5)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_multiple_comitters_with_others_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url('master,changes')
commits_info = u'* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n'
expected_message = u"""baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 10 commits to branch changes. Commits by Tomasz (4), Ben (3), James (2) and others (1).\n\n{}* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))""".format(commits_info * 9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits(self):
# type: () -> None
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\n\n{}[and 30 more commit(s)]".format(
commit_info * COMMITS_LIMIT
)
self.send_and_test_stream_message('push_50_commits', self.EXPECTED_SUBJECT_BRANCH_EVENTS, expected_message, HTTP_X_GITHUB_EVENT='push')
def test_push_50_commits_filtered_by_branches(self):
# type: () -> None
self.url = self.build_webhook_url(branches='master,changes')
commit_info = "* Update README.md ([0d1a26e](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\n"
expected_message = u"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f12
|
gannicus-yu/pyutils
|
setup.py
|
Python
|
apache-2.0
| 546
| 0.001832
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by heyu on 17/3/1
"""
try:
from setuptools import setup
except:
from distutils.core import setup
setup(
name="pyutils",
version="0.0.1",
aut
|
hor="heyu",
author_
|
email="gannicus_yu@163.com",
description="easy and convenient tools written in Python",
long_description=__doc__,
install_requires=["MySQL-python", "docopt"],
url="https://github.com/gannicus-yu/pyutils",
packages=["myutils"],
platforms=['all'],
# test_suite="tests"
)
|
TurkuNLP/CAFA3
|
sequence_features/process_NCBI_Taxonomy.py
|
Python
|
lgpl-3.0
| 3,092
| 0.005498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from common_processing import *
import tarfile
import sys
import glob
def untar(ftp_link, out_folder):
tar = tarfile.open(out_folder + ftp_link.split("/")[-1])
tar.extractall(path=out_folder)
tar.close()
def process_nodes_dmp(out_folder):
"""
extract data from file:nodes.dmp
create 2 map_tables:
map_organism2organism
map_organism2rank
"""
map_organism2organism = ""
map_organism2rank = ""
parent_tax_dict = dict()
tax_tree_dict = dict()
with open(out_folder + 'nodes.dmp', 'rb') as f:
for line in f:
tax_id, parent_tax_id, rank, embl_code, division_id, inherited_div_flag, genetic_code_id, inherited_gc_flag, mitochondrial_genetic_code_id, inherited_mgc_flag, genbank_hidden_flag, hidden_subtree_root_flag, comments = line.split("\t|\t")
map_organism2rank += str(tax_id) + "\t" + rank + "\n"
parent_tax_dict.setdefault(tax_id, parent_tax_id)
for tax_id, parent_tax_id in parent_tax_dict.iteritems():
tax_tree_dict.setdefault(tax_id, []).append(parent_tax_id)
while parent_tax_dict[tax_tree_dict[tax_id][-1]] != tax_tree_dict[tax_id][-1]:
tax_tree_dict[tax_id].append(parent_tax_dict[tax_tree_dict[tax_id][-1]])
for tax_id, parent_tax_ids in tax_tree_dict.iteritems():
map_organism2organism += '{}\t{}\t{}\n'.format(tax_id, tax_id, 0)
for level, parent_tax_id in enumerate(parent_tax_ids):
map_organism2organism += '{}\t{}\t{}\n'.format(tax_id, parent_tax_id, level+1)
with open(out_folder + "map_organism2organism.tsv", "wb") as f:
f.write(map_organism2organism)
with open(out_folder + "map_organism2rank.tsv", "wb") as f:
f.write(map_organism2rank)
def process_names_dmp(out_folder):
"""
extract data from file:names.dmp
map_symbol2organism
name type included: scientific name, synonym, acronym, anamorph, misspelling, misnomer, common name,
"""
map_symbol2organism = ''
non_unique_name = set()
with open(out_folder + "names.dmp", "rb") as f:
for line in f:
tax_id, name_txt, unique_name, name_class = line.split("\t|\t")
map_symbol2organism += "
|
{}\t{}\t{}".format(tax_id, name_txt, name_class.split("|")[0].replace("\t", "\n"))
with open(out_folder + "map_symbol2organism.tsv", "wb") as f:
f.write(map_symbol2organism)
def argument_parser():
parser = argparse.ArgumentParser(description="download the Taxonomy PubMed from ftp")
parser.add_argument("-f", "--ftp_link", type=str, help="ftp url link to the file")
parser.add_argument("
|
-o", "--out_folder", type=str, help="target folder of downloaded file")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = argument_parser()
print "processing Taxonomy data"
ftp_download(args.ftp_link, args.out_folder)
untar(args.ftp_link, args.out_folder)
process_nodes_dmp(args.out_folder)
process_names_dmp(args.out_folder)
|
fergalmoran/Chrome2Kindle
|
server/reportlab/platypus/tableofcontents.py
|
Python
|
mit
| 19,645
| 0.006058
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/tableofcontents.py
__version__=''' $Id: tableofcontents.py 3627 2010-01-06 14:06:36Z rgbecker $ '''
__doc__="""Experimental class to generate Tables of Contents easily
This module defines a single TableOfContents() class that can be used to
create automatically a table of tontents for Platypus documents like
this:
story = []
toc = TableOfContents()
story.append(toc)
# some heading paragraphs here...
doc = MyTemplate(path)
doc.multiBuild(story)
The data needed to create the table is a list of (level, text, pageNum)
triplets, plus some paragraph styles for each level of the table itself.
The triplets will usually be created in a document template's method
like afterFlowable(), making notification calls using the notify()
method with appropriate data like this:
(level, text, pageNum) = ...
self.notify('TOCEntry', (level, text, pageNum))
Optionally the list can contain four items in which case the last item
is a destination key which the entry should point to. A bookmark
with this key needs to be created first like this:
key = 'ch%s' % self.seq.nextf('chapter')
self.canv.bookmarkPage(key)
self.notify('TOCEntry', (level, text, pageNum, key))
As the table of contents need at least two passes over the Platypus
story which is why the moultiBuild0() method must be called.
The level<NUMBER>ParaStyle variables are the paragraph styles used
to format the entries in the table of contents. Their indentation
is calculated like this: each entry starts at a multiple of some
constant named delta. If one entry spans more than one line, all
lines after the first are indented by the same constant named
epsilon.
"""
from reportlab.lib import enums
from reportlab.lib.units import cm
from reportlab.lib.utils import commasplit
from reportlab.lib.styles import ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.tables import TableStyle, Table
from reportlab.platypus.flowables import Spacer, Flowable
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.pdfgen import canvas
from base64 import encodestring, decodestring
try:
import cPickle as pickle
except ImportError:
import pickle
dumps = pickle.dumps
loads = pickle.loads
def unquote(txt):
from xml.sax.saxutils import unescape
return unescape(txt, {"'": "'", """: '"'})
try:
set
except:
class set(list):
def add(self,x):
if x not in self:
list.append(self,x)
def drawPageNumbers(canvas, style, pages, availWidth, availHeight, dot=' . '):
'''
Draws pagestr on the canvas using the given style.
If dot is None, pagestr is drawn at the current position in the canvas.
If dot is a string, pagestr is drawn right-aligned. If the string is not empty,
the gap is filled with it.
'''
pages.sort()
pagestr = ', '.join([str(p) for p, _ in pages])
x, y = canvas._curr_tx_info['cur_x'], canvas._curr_tx_info['cur_y']
pagestrw = stringWidth(pagestr, style.fontName, style.fontSize)
if isinstance(dot, basestring):
if dot:
dotw = stringWidth(dot, style.fontName, style.fontSize)
dotsn = int((availWidth-x-pagestrw)/dotw)
else:
dotsn = dotw = 0
|
text = '%s%s' % (dotsn * dot, pagestr)
newx = availWidth - dotsn*dotw - pagestrw
pagex = availWidth - pagestrw
elif dot is None:
text = ', ' + pagestr
newx = x
pagex = newx
else:
raise TypeError('Argument dot should either be None or an instance of basestring.')
tx = canvas.beginText(newx, y)
tx.setFont(style.fontName, style.fontSize)
tx.setFillColor(style.textColor)
tx.textLine(text)
can
|
vas.drawText(tx)
commaw = stringWidth(', ', style.fontName, style.fontSize)
for p, key in pages:
if not key:
continue
w = stringWidth(str(p), style.fontName, style.fontSize)
canvas.linkRect('', key, (pagex, y, pagex+w, y+style.leading), relative=1)
pagex += w + commaw
# Default paragraph styles for tables of contents.
# (This could also be generated automatically or even
# on-demand if it is not known how many levels the
# TOC will finally need to display...)
delta = 1*cm
epsilon = 0.5*cm
defaultLevelStyles = [
ParagraphStyle(
name='Level 0',
fontName='Times-Roman',
fontSize=10,
leading=11,
firstLineIndent = 0,
leftIndent = epsilon)]
defaultTableStyle = \
TableStyle([
('VALIGN', (0,0), (-1,-1), 'TOP'),
('RIGHTPADDING', (0,0), (-1,-1), 0),
('LEFTPADDING', (0,0), (-1,-1), 0),
])
class TableOfContents(IndexingFlowable):
"""This creates a formatted table of contents.
It presumes a correct block of data is passed in.
The data block contains a list of (level, text, pageNumber)
triplets. You can supply a paragraph style for each level
(starting at zero).
Set dotsMinLevel to determine from which level on a line of
dots should be drawn between the text and the page number.
If dotsMinLevel is set to a negative value, no dotted lines are drawn.
"""
def __init__(self):
self.rightColumnWidth = 72
self.levelStyles = defaultLevelStyles
self.tableStyle = defaultTableStyle
self.dotsMinLevel = 1
self._table = None
self._entries = []
self._lastEntries = []
def beforeBuild(self):
# keep track of the last run
self._lastEntries = self._entries[:]
self.clearEntries()
def isIndexing(self):
return 1
def isSatisfied(self):
return (self._entries == self._lastEntries)
def notify(self, kind, stuff):
"""The notification hook called to register all kinds of events.
Here we are interested in 'TOCEntry' events only.
"""
if kind == 'TOCEntry':
self.addEntry(*stuff)
def clearEntries(self):
self._entries = []
def getLevelStyle(self, n):
'''Returns the style for level n, generating and caching styles on demand if not present.'''
try:
return self.levelStyles[n]
except IndexError:
prevstyle = self.getLevelStyle(n-1)
self.levelStyles.append(ParagraphStyle(
name='%s-%d-indented' % (prevstyle.name, n),
parent=prevstyle,
firstLineIndent = prevstyle.firstLineIndent+delta,
leftIndent = prevstyle.leftIndent+delta))
return self.levelStyles[n]
def addEntry(self, level, text, pageNum, key=None):
"""Adds one entry to the table of contents.
This allows incremental buildup by a doctemplate.
Requires that enough styles are defined."""
assert type(level) == type(1), "Level must be an integer"
self._entries.append((level, text, pageNum, key))
def addEntries(self, listOfEntries):
"""Bulk creation of entries in the table of contents.
If you knew the titles but not the page numbers, you could
supply them to get sensible output on the first run."""
for entryargs in listOfEntries:
self.addEntry(*entryargs)
def wrap(self, availWidth, availHeight):
"All table properties should be known by now."
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0,'Placeholder for table of contents',0,None)]
else:
_tempEntries = self._lastEntries
def drawTOCEntryEnd(canvas, kind, label):
'''Callback to draw dots and page numbers after each entry.'''
label = label.split(',')
page, level, key = int(label[
|
quchunguang/test
|
testpy3/pyqt5_tetris.py
|
Python
|
mit
| 10,805
| 0.000278
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import random
from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication
from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal
from PyQt5.QtGui import QPainter, QColor
class Tetris(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.tboard = Board(self)
self.setCentralWidget(self.tboard)
self.statusbar = self.statusBar()
self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.tboard.start()
self.resize(180, 380)
self.center()
self.setWindowTitle('Tetris')
self.show()
def center(self):
screen = QDesktopWidget().screenGeometry()
size = self.geometry()
self.move((screen.width() - size.width()) / 2,
(screen.height() - size.height()) / 2)
class Board(QFrame):
msg2Statusbar = pyqtSignal(str)
BoardWidth = 10
BoardHeight = 22
Speed = 300
def __init__(self, parent):
super().__init__(parent)
self.initBoard()
def initBoard(self):
self.timer = QBasicTimer()
self.isWaitingAfterLine = False
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.board = []
self.setFocusPolicy(Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
def shapeAt(self, x, y):
return self.board[(y * Board.BoardWidth) + x]
def setShapeAt(self, x, y, shape):
self.board[(y * Board.BoardWidth) + x] = shape
def squareWidth(self):
return self.contentsRect().width() // Board.BoardWidth
def squareHeight(self):
return self.contentsRect().height() // Board.BoardHeight
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.clearBoard()
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.newPiece()
self.timer.start(Board.Speed, self)
def pause(self):
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
s
|
elf.msg2Statusbar.emit("paused")
else:
self.timer.start(Board.Speed, self)
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.update()
def paintEvent(self, event):
painter = QPainter(self)
rect = self.contentsRect()
boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()
for i in range(Board.BoardHeight):
|
for j in range(Board.BoardWidth):
shape = self.shapeAt(j, Board.BoardHeight - i - 1)
if shape != Tetrominoe.NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
boardTop + i * self.squareHeight(), shape)
if self.curPiece.shape() != Tetrominoe.NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.drawSquare(
painter, rect.left() + x * self.squareWidth(),
boardTop + (Board.BoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
if not self.isStarted or self.curPiece.shape() == Tetrominoe.NoShape:
super(Board, self).keyPressEvent(event)
return
key = event.key()
if key == Qt.Key_P:
self.pause()
return
if self.isPaused:
return
elif key == Qt.Key_Left:
self.tryMove(self.curPiece, self.curX - 1, self.curY)
elif key == Qt.Key_Right:
self.tryMove(self.curPiece, self.curX + 1, self.curY)
elif key == Qt.Key_Down:
self.tryMove(self.curPiece.rotateRight(), self.curX, self.curY)
elif key == Qt.Key_Up:
self.tryMove(self.curPiece.rotateLeft(), self.curX, self.curY)
elif key == Qt.Key_Space:
self.dropDown()
elif key == Qt.Key_D:
self.oneLineDown()
else:
super(Board, self).keyPressEvent(event)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
self.oneLineDown()
else:
super(Board, self).timerEvent(event)
def clearBoard(self):
for i in range(Board.BoardHeight * Board.BoardWidth):
self.board.append(Tetrominoe.NoShape)
def dropDown(self):
newY = self.curY
while newY > 0:
if not self.tryMove(self.curPiece, self.curX, newY - 1):
break
newY -= 1
self.pieceDropped()
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
numFullLines = 0
rowsToRemove = []
for i in range(Board.BoardHeight):
n = 0
for j in range(Board.BoardWidth):
if not self.shapeAt(j, i) == Tetrominoe.NoShape:
n = n + 1
if n == 10:
rowsToRemove.append(i)
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, Board.BoardHeight):
for l in range(Board.BoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k + 1))
numFullLines = numFullLines + len(rowsToRemove)
if numFullLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numFullLines
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.isWaitingAfterLine = True
self.curPiece.setShape(Tetrominoe.NoShape)
self.update()
def newPiece(self):
self.curPiece = Shape()
self.curPiece.setRandomShape()
self.curX = Board.BoardWidth // 2 + 1
self.curY = Board.BoardHeight - 1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Tetrominoe.NoShape)
self.timer.stop()
self.isStarted = False
self.msg2Statusbar.emit("Game over")
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.x(i)
y = newY - newPiece.y(i)
if x < 0 or x >= Board.BoardWidth or \
y < 0 or y >= Board.BoardHeight:
return False
if self.shapeAt(x, y) != Tetrominoe.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.update()
return True
def drawSquare(self, painter, x, y, shape):
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(
x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(
x + self.squareWidth() - 1,
y + self
|
mjg/PyX
|
test/unit/test_data.py
|
Python
|
gpl-2.0
| 3,991
| 0.002255
|
import sys
if sys.path[0] != "../..":
sys.path.insert(0, "../..")
import unittest
import io
from pyx.graph import data
class DataTestCase(unittest.TestCase):
def testPoints(self):
mydata = data.points([[1, 2, 3], [4, 5, 6]], a=1, b=2)
self.assertEqual(mydata.columndata[0], [1, 2])
self.assertEqual(mydata.columns["a"], [1, 4])
self.assertEqual(mydata.columndata[2], [2, 5])
self.assertEqual("c" in list(mydata.columns.keys()), 0)
def testValues(self):
mydata = data.values(a=[1, 4])
self.assertEqual(mydata.columns["a"], [1, 4])
self.assertEqual("c" in list(mydata.columns.keys()), 0)
def testData(self):
mydata = data.points([[1], [2]], a=1)
mydata2 = data.data(mydata, a="2*a", b="2*$1*a", c="4*$(i)*a*$(-1)", context={"i":1})
self.assertEqual(mydata.columns["a"], [1, 2])
self.assertAlmostEqual(mydata2.columns["a"][0], 2.0)
self.assertAlmostEqual(mydata2.columns["a"][1], 4.0)
self.assertAlmostEqual(mydata2.columns["b"][0], 2.0)
self.assertAlmostEqual(mydata2.columns["b"][1], 8.0)
self.assertAlmostEqual(mydata2.columns["c"][0], 4.0)
self.assertAlmostEqual(mydata2.columns["c"][1], 32.0)
mydata3 = data.data(mydata2, a="b", b="2*c")
self.assertEqual(mydata3.columns["a"], mydata2.columns["b"])
self.assertAlmostEqual(mydata3.columns["b"][0], 2*mydata2.columns["c"][0])
self.assertAlmostEqual(mydata3.columns["b"][1], 2*mydata2.columns["c"][1])
a = "nothing"
two = 2
f = lambda x: x*x
mydata = data.points([[1], [2]], a=1)
mydata2 = data.data(mydata, b="two*a", c="two*$1*a", d="f($1)", context=locals())
self.assertEqual(mydata.columndata[0], [1, 2])
self.assertAlmostEqual(mydata2.columns["b"][0], 2.0)
self.assertAlmostEqual(mydata2.columns["b"][1], 4.0)
self.assertAlmostEqual(mydata2.columns["c"][0], 2.0)
self.assertAlmostEqual(mydata2.columns["c"][1], 8.0)
self.assertAlmostEqual(mydata2.columns["d"][0], 1.0)
self.assertAlmostEqual(mydata2.columns["d"][1], 4.0)
def testFile(self):
testfile = io.StringIO("""#a
0
1 eins
2 "2"
3 x"x""")
mydata = data.file(testfile, row=0, a="a", b=2)
self.assertEqual(mydata.columns["row"], [1, 2, 3, 4])
self.assertAlmostEqual(mydata.columns["a"][0], 0.0)
self.assertAlmostEqual(mydata.columns["a"][1], 1.0)
self.assertAlmostEqual(mydata.columns["a"][2], 2.0)
self.assertAlmostEqual(mydata.columns["a"][3], 3.0)
self.assertEqual(mydata.columns["b"][0], None)
self.assertEqual(mydata.co
|
lumns["b"][1], "eins")
self.assertEqual(mydata.columns["b"][2], "2")
self.assertEqual(mydata.columns["b"][3], "x\"x")
testfile = io.StringIO("""#a
0
1
2
3
4
5
6
7
8
9""")
m
|
ydata = data.file(testfile, title="title", skiphead=3, skiptail=2, every=2, row=0)
self.assertEqual(mydata.columns["row"], [4, 6, 8])
self.assertEqual(mydata.title, "title")
def testSec(self):
testfile = io.StringIO("""[sec1]
opt1=a1
opt2=a2
val=1
val=2
[sec2]
opt1=a4
opt2=a5
val=2
val=1
[sec1]
opt3=a3""")
mydata = data.conffile(testfile, sec=0, a="opt1", b="opt2", c="opt3", d="val")
self.assertEqual(mydata.columns["sec"], ["sec1", "sec2"])
self.assertEqual(mydata.columns["a"], ["a1", "a4"])
self.assertEqual(mydata.columns["b"], ["a2", "a5"])
self.assertEqual(mydata.columns["c"], ["a3", None])
self.assertAlmostEqual(mydata.columns["d"][0], 2.0)
self.assertAlmostEqual(mydata.columns["d"][1], 1.0)
def testParamfunction(self):
mydata = data.paramfunction("k", 0, 9, "x, y = k, -k", points=10)
for i in range(10):
self.assertEqual(mydata.columns["x"][i], i)
self.assertEqual(mydata.columns["y"][i], -i)
if __name__ == "__main__":
unittest.main()
|
magne-max/zipline-ja
|
zipline/examples/buy_test.py
|
Python
|
apache-2.0
| 888
| 0
|
from zipline.api import sid, symbol, order, record, get_datetime
import logbook
import pandas as pd
log = logbook.Logger("ZiplineLog")
def initialize(context):
context.set_benchmark(symbol('TOPIX'))
context.assets = [
symbol(sym_str)
for sym_str in [
'2121',
'4689',
'7162',
]
]
def handle_data(context, data):
# log.info(data.history(context.assets, "price", 20, "1d"))
# log.info(data.current(context.assets, "volume"))
|
# exchange_ts = pd.Timestamp(get_datetime()).tz_convert('Asia/Tokyo')
# exchange_ts = pd.Timestamp(get_datetime())
log.info(pd.Timestamp(get_datetime()).tz_convert('Asia/Tokyo'))
log.info(str(data[symbol('TOPIX')].p
|
rice))
order(symbol('4689'), -10)
record(Yahoo=data[symbol('4689')].price)
def analyze(context, perf):
pass # print(perf.iloc[-1].T)
|
tzpBingo/github-trending
|
codespace/python/tencentcloud/iottid/v20190411/models.py
|
Python
|
mit
| 15,254
| 0.002742
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AuthTestTidRequest(AbstractModel):
"""AuthTestTid请求参数结构体
"""
def __init__(self):
r"""
:param Data: 设备端SDK填入测试TID参数后生成的加密数据串
:type Data: str
"""
self.Data = None
def _deserialize(self, params):
self.Data = params.get("Data")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AuthTestTidResponse(AbstractModel):
"""AuthTestTid返回参数结构体
"""
def __init__(self):
r"""
:param Pass: 认证结果
:type Pass: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Pass = None
s
|
elf.RequestId = None
def _deserialize(self, params):
self.Pass = params.get("Pass")
self.RequestId = params.get("RequestId")
class BurnTidNotifyRequest(AbstractModel):
"""BurnTidNotify请求参数结构体
"""
def __init__(se
|
lf):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class BurnTidNotifyResponse(AbstractModel):
"""BurnTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param Tid: 接收回执成功的TID
:type Tid: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tid = None
self.RequestId = None
def _deserialize(self, params):
self.Tid = params.get("Tid")
self.RequestId = params.get("RequestId")
class DeliverTidNotifyRequest(AbstractModel):
"""DeliverTidNotify请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
:param Tid: TID编号
:type Tid: str
"""
self.OrderId = None
self.Tid = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Tid = params.get("Tid")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidNotifyResponse(AbstractModel):
"""DeliverTidNotify返回参数结构体
"""
def __init__(self):
r"""
:param RemaindCount: 剩余空发数量
:type RemaindCount: int
:param Tid: 已回执的TID编码
:type Tid: str
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RemaindCount = None
self.Tid = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
self.RemaindCount = params.get("RemaindCount")
self.Tid = params.get("Tid")
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DeliverTidsRequest(AbstractModel):
"""DeliverTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单ID
:type OrderId: str
:param Quantity: 数量,1~100
:type Quantity: int
"""
self.OrderId = None
self.Quantity = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
self.Quantity = params.get("Quantity")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeliverTidsResponse(AbstractModel):
"""DeliverTids返回参数结构体
"""
def __init__(self):
r"""
:param TidSet: 空发的TID信息
注意:此字段可能返回 null,表示取不到有效值。
:type TidSet: list of TidKeysInfo
:param ProductKey: 产品公钥
:type ProductKey: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TidSet = None
self.ProductKey = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TidSet") is not None:
self.TidSet = []
for item in params.get("TidSet"):
obj = TidKeysInfo()
obj._deserialize(item)
self.TidSet.append(obj)
self.ProductKey = params.get("ProductKey")
self.RequestId = params.get("RequestId")
class DescribeAvailableLibCountRequest(AbstractModel):
"""DescribeAvailableLibCount请求参数结构体
"""
def __init__(self):
r"""
:param OrderId: 订单编号
:type OrderId: str
"""
self.OrderId = None
def _deserialize(self, params):
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAvailableLibCountResponse(AbstractModel):
"""DescribeAvailableLibCount返回参数结构体
"""
def __init__(self):
r"""
:param Quantity: 可空发的白盒密钥数量
:type Quantity: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Quantity = None
self.RequestId = None
def _deserialize(self, params):
self.Quantity = params.get("Quantity")
self.RequestId = params.get("RequestId")
class DescribePermissionRequest(AbstractModel):
"""DescribePermission请求参数结构体
"""
class DescribePermissionResponse(AbstractModel):
"""DescribePermission返回参数结构体
"""
def __init__(self):
r"""
:param EnterpriseUser: 企业用户
:type EnterpriseUser: bool
:param DownloadPermission: 下载控制台权限
:type DownloadPermission: str
:param UsePermission: 使用控制台权限
:type UsePermission: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EnterpriseUser = None
self.DownloadPermission = None
self.UsePermission = None
self.RequestId = None
def _deserialize(self, params):
self.EnterpriseUser = params.get("EnterpriseUser")
self.DownloadPermission = params.get("DownloadPermission")
self.UsePermission = params.get("UsePermission")
self.RequestId = params.get("RequestId")
class DownloadTidsRequest(AbstractModel):
"""DownloadTids请求参数结构体
"""
def __init__(self):
r"""
:param OrderI
|
WGBH/FixIt
|
mla_game/apps/transcript/tests.py
|
Python
|
mit
| 2,490
| 0
|
from datetime import time
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from .models import Transcript, TranscriptPhrase
class TranscriptTestCase(TestCase):
def setUp(self):
fake_file = SimpleUploadedFile(
'not-really-a-file.txt',
'this is what\'s in the file that isn\'t a file'.encode()
)
Transcript.objects.create(
name='test transcript',
original_transcript=fake_file
)
def test_transcript_file(self):
fake = Transcript.objects.get(name='test transcript')
self.assertEqual(
fake.name,
'test transcript'
)
self.assertEqual(
fake.original_transcript.name,
'original_transcripts/not-really-a-file.txt',
)
self.assertEqual(
fake.original_transcript.read(),
'this is what\'s in the file that isn\'t a file'.encode()
)
def tearDown(self):
fake = Transcript.objects.get(name='test transcript')
fake.original_transcript.delete(False)
class TranscriptPhraseTestCase(TestCase):
def setUp(self):
fake_file = SimpleUploadedFile(
'not-really-a-file2.txt',
'this is what\'s in the file that isn\'t a file'.encode()
)
Transcript.objects.create(
name='test transcript',
original_transcript=fake_file
)
TranscriptPhrase.objects.create(
original_phrase='old and wrong',
time_begin=time(0, 1, 0),
time_end=time(0, 2, 10),
transcript=Transcript.objects.get(
name='test transcript'
)
)
def test_transcript_phrase(self):
fake_transcript = Transcript.objects.get(name='test transcript')
fake_phrase = TranscriptPhrase.objects.get(
original_phrase='old and wrong'
)
|
self.assertEqual(
fake_phrase.original_phrase,
'old and wrong'
)
self.assertEqual(
fake_phrase.time_begin,
time(0, 1, 0)
|
)
self.assertEqual(
fake_phrase.time_end,
time(0, 2, 10)
)
self.assertEqual(
fake_phrase.transcript,
fake_transcript
)
def tearDown(self):
fake = Transcript.objects.get(name='test transcript')
fake.original_transcript.delete(False)
|
miptliot/edx-platform
|
lms/djangoapps/shoppingcart/management/tests/test_retire_order.py
|
Python
|
agpl-3.0
| 2,853
| 0
|
"""Tests for the retire_order command"""
from tempfile import NamedTemporaryFile
from django.core.management import call_command
from course_modes.models import CourseMode
from shoppingcart.models import CertificateItem, Order
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestRetireOrder(ModuleStoreTestCase):
"""Test the retire_order command"""
def setUp(self):
super(TestRetireOrder, self).setUp()
course = CourseFactory.create()
self.course_key = course.id
CourseMode.objects.create(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name=CourseMode.HONOR
)
# set up test carts
self.cart, __ = self._create_cart()
self.paying, __ = self._create_cart()
self.paying.start_purchase()
self.already_defunct_cart, __ = self._create_cart()
self.already_defunct_cart.retire()
self.purchased, self.purchased_item = self._create_cart()
self.purchased.status = "purchased"
self.purchased.save()
self.purchased_item.status = "purchased"
self.purchased.save()
def test_retire_order(self):
"""Test the retire_order command"""
nonexistent_id = max(order.id for order in Order.objects.all()) + 1
order_ids = [
self.cart.id,
self.paying.id,
self.already_defunct_cart.id,
self.purchased.id,
nonexistent_id
]
self._create_tempfile_and_call_command(order_ids)
self.assertEqual(
Order.objects.get(id=self.cart.id).status, "defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.paying.id).status, "defunct-paying"
)
self.assertEqual(
Order.objects.get(id=self.already_defunct_cart.id).status,
"defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.purchased.id).status, "purchased"
)
def _create_tempfile_and_call_command(self, order_ids):
"""
Takes a list of order_ids, writes them to a tempfile, and then runs the
"retire_order" command on the tempfile
"""
with NamedTemporaryFile() as temp:
temp.write("\n".join(str(order_id) for order_id in order_ids))
temp.se
|
ek(0)
call_command('retire_order', temp.name)
def _create_cart(self):
"""Creates a cart and adds a CertificateItem to it"""
cart = Order.get_cart_for_user(UserFact
|
ory.create())
item = CertificateItem.add_to_order(
cart, self.course_key, 10, 'honor', currency='usd'
)
return cart, item
|
paltman/django-configurations
|
configurations/__init__.py
|
Python
|
bsd-3-clause
| 175
| 0
|
# flake8: noqa
from .base import Settings, Configuration
from .decorators
|
import pristinemethod
__
|
version__ = '0.5'
__all__ = ['Configuration', 'pristinemethod', 'Settings']
|
jalr/privacyidea
|
privacyidea/lib/policy.py
|
Python
|
agpl-3.0
| 57,193
| 0.000385
|
# -*- coding: utf-8 -*-
#
# 2016-05-07 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add realm dropdown
# 2016-04-06 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add time dependency in policy
# 2016-02-22 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add RADIUS passthru policy
# 2016-02-05 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add tokenwizard in scope UI
# 2015-12-30 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add password reset policy
# 2015-12-28 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add registration policy
# 2015-12-16 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add tokenissuer policy
# 2015-11-29 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add getchallenges policy
# 2015-10-31 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add last_auth policy.
# 2015-10-30 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Display user details in token list
# 2015-10-26 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add default token type for enrollment
# 2015-10-14 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add auth_max_success and auth_max_fail actions to
# scope authorization
# 2015-10-09 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add token_page_size and user_page_size policy
# 2015-09-06 Cornelius Kölbel <cornelius.koelbel@netkngihts.it>
# Add challenge_response authentication policy
# 2015-06-30 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add the OTP PIN handling
# 2015-06-29 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add the mangle policy
# 2015-04-03 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add WebUI logout time.
# 2015-03-27 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PIN policies in USER scope
# 2015-02-06 Cornelius Kölbel <cornelius@privacyidea.org>
# Rewrite for flask migration.
# Policies are not handled by decorators as
# 1. precondition for API calls
# 2. internal modifications of LIB-functions
# 3. postcondition for API calls
#
# Jul 07, 2014 add check_machine_policy, Cornelius Kölbel
# May 08, 2014 Cornelius Kölbel
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# privacyIDEA is a fork of LinOTP
# Copyright (C) 20
|
10 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# v
|
ersion 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Base function to handle the policy entries in the database.
This module only depends on the db/models.py
The functions of this module are tested in tests/test_lib_policy.py
A policy has the attributes
* name
* scope
* action
* realm
* resolver
* user
* client
* active
``name`` is the unique identifier of a policy. ``scope`` is the area,
where this policy is meant for. This can be values like admin, selfservice,
authentication...
``scope`` takes only one value.
``active`` is bool and indicates, whether a policy is active or not.
``action``, ``realm``, ``resolver``, ``user`` and ``client`` can take a comma
separated list of values.
realm and resolver
------------------
If these are empty '*', this policy matches each requested realm.
user
----
If the user is empty or '*', this policy matches each user.
You can exclude users from matching this policy, by prepending a '-' or a '!'.
``*, -admin`` will match for all users except the admin.
client
------
The client is identified by its IP address. A policy can contain a list of
IP addresses or subnets.
You can exclude clients from subnets by prepending the client with a '-' or
a '!'.
``172.16.0.0/24, -172.16.0.17`` will match each client in the subnet except
the 172.16.0.17.
time
----
You can specify a time in which the policy should be active.
Time formats are
<dow>-<dow>:<hh>:<mm>-<hh>:<mm>, ...
<dow>:<hh>:<mm>-<hh>:<mm>
<dow>:<hh>-<hh>
and any combination of it. "dow" being day of week Mon, Tue, Wed, Thu, Fri,
Sat, Sun.
"""
from .log import log_with
from configobj import ConfigObj
from netaddr import IPAddress
from netaddr import IPNetwork
from gettext import gettext as _
import logging
from ..models import (Policy, db)
from privacyidea.lib.config import (get_token_classes, get_token_types)
from privacyidea.lib.error import ParameterError, PolicyError
from privacyidea.lib.realm import get_realms
from privacyidea.lib.resolver import get_resolver_list
from privacyidea.lib.smtpserver import get_smtpservers
from privacyidea.lib.radiusserver import get_radiusservers
from privacyidea.lib.utils import check_time_in_range
log = logging.getLogger(__name__)
optional = True
required = False
class SCOPE(object):
__doc__ = """This is the list of the allowed scopes that can be used in
policy definitions.
"""
AUTHZ = "authorization"
ADMIN = "admin"
AUTH = "authentication"
AUDIT = "audit"
USER = "user" # was selfservice
ENROLL = "enrollment"
GETTOKEN = "gettoken"
WEBUI = "webui"
REGISTER = "register"
class ACTION(object):
__doc__ = """This is the list of usual actions."""
ASSIGN = "assign"
AUDIT = "auditlog"
AUTHITEMS = "fetch_authentication_items"
AUTHMAXSUCCESS = "auth_max_success"
AUTHMAXFAIL = "auth_max_fail"
AUTOASSIGN = "autoassignment"
CACONNECTORREAD = "caconnectorread"
CACONNECTORWRITE = "caconnectorwrite"
CACONNECTORDELETE = "caconnectordelete"
CHALLENGERESPONSE = "challenge_response"
GETCHALLENGES = "getchallenges"
COPYTOKENPIN = "copytokenpin"
COPYTOKENUSER = "copytokenuser"
DEFAULT_TOKENTYPE = "default_tokentype"
DELETE = "delete"
DISABLE = "disable"
EMAILCONFIG = "smtpconfig"
ENABLE = "enable"
ENCRYPTPIN = "encrypt_pin"
GETSERIAL = "getserial"
GETRANDOM = "getrandom"
IMPORT = "importtokens"
LASTAUTH = "last_auth"
LOGINMODE = "login_mode"
LOGOUTTIME = "logout_time"
LOSTTOKEN = 'losttoken'
LOSTTOKENPWLEN = "losttoken_PW_length"
LOSTTOKENPWCONTENTS = "losttoken_PW_contents"
LOSTTOKENVALID = "losttoken_valid"
MACHINERESOLVERWRITE = "mresolverwrite"
MACHINERESOLVERDELETE = "mresolverdelete"
MACHINELIST = "machinelist"
MACHINETOKENS = "manage_machine_tokens"
MANGLE = "mangle"
MAXTOKENREALM = "max_token_per_realm"
MAXTOKENUSER = "max_token_per_user"
NODETAILSUCCESS = "no_detail_on_success"
NODETAILFAIL = "no_detail_on_fail"
OTPPIN = "otppin"
OTPPINRANDOM = "otp_pin_random"
OTPPINMAXLEN = 'otp_pin_maxlength'
OTPPINMINLEN = 'otp_pin_minlength'
OTPPINCONTENTS = 'otp_pin_contents'
PASSNOTOKEN = "passOnNoToken"
PASSNOUSER = "passOnNoUser"
PASSTHRU = "passthru"
PASSWORDRESET = "password_reset"
PINHANDLING = "pinhandling"
POLICYDELETE = "policydelete"
POLICYWRITE = "policywrite"
POLICYTEMPLATEURL = "policy_template_url"
REALM = "realm"
REMOTE_USER = "remote_user"
REQUIREDEMAIL = "requiredemail"
RESET = "reset"
RESOLVERDELETE = "resolverdelete"
RESOLVERWRITE = "resolverwrite"
RESOLVER = "resolver"
RESYNC = "resync"
REVOKE = "revoke"
SET = "set"
SETPIN = "setpin"
SETREALM = "setrealm"
SERIAL = "serial"
SYSTEMDELETE = "configdelete"
SYSTEMWRITE = "configwrite"
CONFI
|
DisposaBoy/GoSublime-next
|
gosubl/nineo_builtins.py
|
Python
|
mit
| 3,455
| 0.041389
|
from . import about
from . import gs
from . import gsq
from . import mg9
from . import nineo
from . import vu
import os
import pprint
import sublime
def gs_init(_={}):
g = globals()
p = 'bi_'
l = len(p)
for nm in list(g.keys()):
if nm.startswith(p):
k = nm[l:].replace('__', '.').replace('_', '-')
nineo.builtin(k, g[nm])
def _do_cl(c, k):
if c.args:
c.ok = k != 'any'
for cn in c.args:
c.cl[k].append(c.sess.cmd(cn, set_stream=c.set_stream))
else:
c.ok = True
c.resume()
def bi_all(c):
_do_cl(c, 'all')
def bi_any(c):
_do_cl(c, 'any')
def bi_each(c):
_do_cl(c, 'each')
def bi_version(c):
c.done(about.VERSION)
def bi_true(c):
c.done()
def bi_false(c):
c.fail()
def bi_confirm(c):
if c.args:
c.resume(sublime.ok_cancel_dialog(' '.join(c.args)))
else:
c.fail('Usage: confirm <message>')
def _dbg_c(c, keys):
d = c.__dict__
if keys:
v = {}
for k in keys:
v[k] = d.get(k)
else:
v = d
return pprint.pformat(v)
def bi_gs__cmdump(c):
if len(c.args) == 0 or not gs.is_a(c.args[0], []):
c.fail('Usage: gs.cmdump <keys-list> [cmd [args...]]')
return
keys = c.args[0]
args = c.args[1:]
s = _dbg_c(c, keys)
print('gs.cmdump: %s' % s)
c.done(s)
def bi_gs__cmdebug(c):
if len(c.args) == 0 or not gs.is_a(c.args[0], []):
c.fail('Usage: gs.cmdebug <keys-list> [cmd [args...]]')
return
keys = c.args[0]
args = c.args[1:]
def cb(x):
print('gs.cmdebug: %s' % _dbg_c(x, keys))
x.resume()
c.resume(x.ok)
c.sess.cmd(args, cb=cb, set_stream=c.set_stream).start()
def bi_echo(c, ok=True):
c.sess.write(' '.join(c.args))
c.resume(ok)
def bi_fail(c):
bi_echo(c, False)
def bi_gs__synchk(c):
def f(res, err):
errs = res.get('Errors', [])
if errs:
for e in errs:
c.attrs.append({
'fn': e.get('Fn', ''),
'message': e.get('Message', ''),
'pos': '%s:%s' % (e.get('Line', -1), e.get('Column', 0)),
})
c.fail()
else:
c.done()
if c.args:
files = [{'Fn': fn} for fn in c.args]
else:
vv = c.sess.vv
fn = vv.fn()
if fn and not vv.view().is_dirty():
files = [{'Fn': fn}]
else:
files = [{'Src': vv.src()}]
if not c.hl:
c.hl = {
'ctx': 'gs.synchk:%s' % vv.vfn(),
}
mg9.acall('synchk', {'Files': files}, f)
def bi_go(c):
if c.args and c.args[0] in ('build', 'install', 'run', 'test', 'vet'):
c.sess.save_all(c.wd)
if not c.hl.get('ctx'):
s = 'compile'
if c.args[0] == 'vet':
s = 'vet'
c.hl['ctx'] = ' '.join(('go', s, c.env.get('_wd_or_vfn', '')))
# note: do *not* resume c, we're *switching* to exec_c, not *starting* a new command
nineo.exec_c(c)
def bi_cd(c):
try:
wd = gs.abspath(' '.join(c.args), dir=c.wd)
os.chdir(wd)
c.sess.wr.vv.view().run_command('gs9o_init', {'wd': wd})
c.done()
except Exception as ex:
c.fail('Cannot chdir: %s' % ex)
def bi_help(c):
vu.open(gs.dist_path('9o.md'))
c.done()
def bi_share(c):
vv = vu.active()
view = vv.view()
if view is None or view.score_selector(0, 'source.go') <= 0:
c.fail('not sharing non-go src')
return
def
|
f(res, err):
if err:
c.fail(err)
else:
s = res.get('Url'
|
, '').strip()
if s:
sublime.set_clipboard(s)
c.done(s + ' (url copied to the clipboard)')
else:
c.fail('no url received')
mg9.share(vv.src(), f)
def bi_gs__build_margo(c):
def f():
out = mg9.build_mg()
if out == 'ok':
mg9.killSrv()
c.done('ok')
else:
c.fail(out)
gsq.do('GoSublime', f, msg='Rebuilding MarGo')
|
thor/django-localflavor
|
localflavor/au/models.py
|
Python
|
bsd-3-clause
| 5,128
| 0.001755
|
from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
from localflavor.deprecation import DeprecatedPhoneNumberField
from . import forms
from .au_states import STATE_CHOICES
from .validators import AUBusinessNumberFieldValidator, AUCompanyNumberFieldValidator, AUTaxFileNumberFieldValidator
class AUStateField(CharField):
"""
A model field that stores the three-letter Australian state abbreviation in the database.
It is represented with :data:`~localflavor.au.au_states.STATE_CHOICES`` choices.
"""
description = _("Australian State")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 3
super(AUStateField, self).__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(AUStateField, self).deconstruct()
del kwargs['choices']
return name, path, args, kwargs
class AUPostCodeField(CharField):
"""
A model field that stores the four-digit Australian postcode in the database.
This field is represented by forms as a :class:`~localflavor.au.forms.AUPostCodeField` field.
"""
description = _("Australian Postcode")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 4
super(AUPostCodeField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUPostCodeField}
defaults.update(kwargs)
return super(AUPostCodeField, self).formfield(**defaults)
class AUPhoneNumberField(CharField, DeprecatedPhoneNumberField):
"""
A model field that checks that the value is a valid Australian phone number (ten digits).
.. deprecated:: 1.4
Use the django-phonenumber-field_ library instead.
.. _django-phonenumber-field: https://github.com/stefanfoulis/django-phonenumber-field
"""
description = _("Australian Phone number")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 20
super(AUPhoneNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUPhoneNumberField}
defaults.update(kwargs)
return super(AUPhoneNumberField, self).formfield(**defaults)
class AUBusinessNumberField(CharField):
"""
A model field that checks that the value is a valid Australian Business Number (ABN).
.. versionadded:: 1.3
"""
description = _("Australian Business Number")
validators = [AUBusinessNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 11
super(AUBusinessNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUBusinessNumberField}
defaults.update(kwargs)
return super(AUBusinessNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the ABN is stored without spaces."""
value = super(AUBusinessNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
class AUCompanyNumberField(CharField):
"""
A model field that checks that the value is a valid Australian Company Number (ACN).
.. versionadded:: 1.5
"""
description = _("Australian Company Number")
validators = [AUCompanyNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 9
super(AUCompanyNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUCompanyNumberField}
defaults.update(kwargs)
return super(AUCompanyNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the ACN is stored without spaces."""
value = super(AUCompanyNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
class AUTax
|
FileNumberField(CharField):
"""
A model field that checks that the value is a valid Tax File Number (TFN).
A TFN is a number issued to a person by the Commissioner of Taxation and
is used to verify client identity and establish their income level
|
s.
It is a eight or nine digit number without any embedded meaning.
.. versionadded:: 1.4
"""
description = _("Australian Tax File Number")
validators = [AUTaxFileNumberFieldValidator()]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 11
super(AUTaxFileNumberField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.AUTaxFileNumberField}
defaults.update(kwargs)
return super(AUTaxFileNumberField, self).formfield(**defaults)
def to_python(self, value):
"""Ensure the TFN is stored without spaces."""
value = super(AUTaxFileNumberField, self).to_python(value)
if value is not None:
return ''.join(value.split())
return value
|
Crystal-SDS/filter-middleware
|
crystal_filter_middleware/filters/storlet.py
|
Python
|
gpl-3.0
| 6,161
| 0.000812
|
'''
A Mini-implementation of the Storlet middleware filter.
@author: josep sampe
'''
from swift.common.utils import get_logger
from swift.common.utils import register_swift_info
from swift.common.swob import Request
from swift.common.utils import config_true_value
from storlets.swift_middleware.handlers.base import SwiftFileManager
from swift.common.swob import wsgify
class Storle
|
tFilter(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.exec_server = self.conf.get('execution_server')
self.logger = get_logger(self.conf, log_route='storlet_filter')
self.filter_data = self.conf['filter_data']
self.parameters = self.filter_data['
|
params']
self.gateway_class = self.conf['storlets_gateway_module']
self.sreq_class = self.gateway_class.request_class
self.storlet_container = conf.get('storlet_container')
self.storlet_dependency = conf.get('storlet_dependency')
self.log_container = conf.get('storlet_logcontainer')
self.client_conf_file = '/etc/swift/storlet-proxy-server.conf'
self.register_info()
def register_info(self):
register_swift_info('storlet_filter')
def _setup_gateway(self):
"""
Setup gateway instance
"""
self.gateway = self.gateway_class(self.conf, self.logger, self.scope)
def _augment_storlet_request(self, req):
"""
Add to request the storlet parameters to be used in case the request
is forwarded to the data node (GET case)
:param params: parameters to be augmented to request
"""
req.headers['X-Storlet-Language'] = self.filter_data['language']
req.headers['X-Storlet-Main'] = self.filter_data['main']
req.headers['X-Storlet-Dependency'] = self.filter_data['dependencies']
req.headers['X-Storlet-Content-Length'] = self.filter_data['size']
req.headers['X-Storlet-Generate-Log'] = False
req.headers['X-Storlet-X-Timestamp'] = 0
def _get_storlet_invocation_options(self, req):
options = dict()
filtered_key = ['X-Storlet-Range', 'X-Storlet-Generate-Log']
for key in req.headers:
prefix = 'X-Storlet-'
if key.startswith(prefix) and key not in filtered_key:
new_key = 'storlet_' + \
key[len(prefix):].lower().replace('-', '_')
options[new_key] = req.headers.get(key)
generate_log = req.headers.get('X-Storlet-Generate-Log')
options['generate_log'] = config_true_value(generate_log)
options['scope'] = self.scope
options['file_manager'] = \
SwiftFileManager(self.account, self.storlet_container,
self.storlet_dependency, self.log_container,
self.client_conf_file, self.logger)
return options
def _build_storlet_request(self, req_resp, params, data_iter):
storlet_id = self.storlet_name
new_env = dict(req_resp.environ)
req = Request.blank(new_env['PATH_INFO'], new_env)
req.headers['X-Run-Storlet'] = self.storlet_name
self._augment_storlet_request(req)
options = self._get_storlet_invocation_options(req)
if hasattr(data_iter, '_fp'):
sreq = self.sreq_class(storlet_id, params, dict(),
data_fd=data_iter._fp.fileno(),
options=options)
else:
sreq = self.sreq_class(storlet_id, params, dict(),
data_iter, options=options)
return sreq
def _call_gateway(self, req_resp, params, crystal_iter):
sreq = self._build_storlet_request(req_resp, params, crystal_iter)
sresp = self.gateway.invocation_flow(sreq)
return sresp.data_iter
@wsgify
def __call__(self, req):
if req.method in ('GET', 'PUT'):
storlet = self.filter_data.pop('name')
params = self.parameters
self.storlet_name = storlet
etag = None
try:
if self.exec_server == 'proxy':
_, self.account, _, _ = req.split_path(4, 4, rest_with_last=True)
elif self.exec_server == 'object':
_, _, self.account, _, _ = req.split_path(5, 5, rest_with_last=True)
except:
# No object Request
return req.get_response(self.app)
self.scope = self.account[5:18]
self.logger.info('Go to execute ' + storlet +
' storlet with parameters "' + str(params) + '"')
self._setup_gateway()
if 'Etag' in req.headers.keys():
etag = req.headers.pop('Etag')
if req.method == 'GET':
response = req.get_response(self.app)
data_iter = response.app_iter
response.app_iter = self._call_gateway(response, params, data_iter)
if 'Content-Length' in response.headers:
response.headers.pop('Content-Length')
if 'Transfer-Encoding' in response.headers:
response.headers.pop('Transfer-Encoding')
elif req.method == 'PUT':
reader = req.environ['wsgi.input'].read
data_iter = iter(lambda: reader(65536), '')
req.environ['wsgi.input'] = self._call_gateway(req, params, data_iter)
if 'CONTENT_LENGTH' in req.environ:
req.environ.pop('CONTENT_LENGTH')
req.headers['Transfer-Encoding'] = 'chunked'
response = req.get_response(self.app)
if etag:
response.headers['etag'] = etag
else:
response.headers['etag'] = ''
return response
return req.get_response(self.app)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def storlet_filter(app):
return StorletFilter(app, conf)
return storlet_filter
|
a358003542/expython
|
expython/pattern/__init__.py
|
Python
|
mit
| 1,439
| 0.002429
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import UserList
import logging
logger = logging.getLogger(__name__)
class CycleList(UserList):
"""
一个无限循环输出元素的可迭代对象,如果是on-fly模式
(主要指for循环中的删除动作)推荐使用 `remove_item` 方法
"""
def __init__(self, data):
super().__init__(data)
def __iter__(self):
self.index = 0
while
|
True:
if self.index == len(self.data):
self.index = 0
yield self.data[self.index]
self.index += 1
def remove_item(self, item):
"""
主要是用于 on-fly 模式的列表更改移除操作的修正,
不
|
是on-fly 动态模式,就直接用列表原来的remove方法即可
为了保持和原来的remove方法一致,并没有捕捉异常。
"""
self.data.remove(item)
self.index -= 1
def last_out_game(data, number):
test = CycleList(data)
count = 1
for i in test:
logger.debug('testing', i)
if len(test.data) <= 1:
break
if count == number:
try:
test.remove_item(i)
logger.debug('removing', i)
except ValueError:
pass
count = 0
count += 1
return test.data[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.