code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""
This module offers one templatetag called ``include_snippet``.
``include_snippet`` acts like an ``{% include %}``, that loads a template
and renders it with the current context, but the template content
comes from database.
It accepts 2 parameter:
slug
The slug/key of the text (for example 'comment_list'). There
are two ways you can pass the slug to the templatetag: (1) by
its name or (2) as a variable.
If you want to pass it by name, you have to use quotes on it.
Otherwise just use the variable name.
cache_time
The number of seconds that text should get cached after it has
been fetched from the database.
This field is option and defaults to no caching.
Syntax::
{% include_snippet [name] ([cache]) %}
Example usage::
{% load snippets %}
...
{% include_snippet "comment_list" %}
{% include_snippet name_in_variable 3600 %}
.. note::
If you use a snippet that doesn't exist, ``include_snippet`` will insert
the value of the SNIPPET_STRING_IF_INVALID setting, which is '' (the empty
string) by default.
"""
from django import template
from django.conf import settings
from django.db import models
from django.core.cache import cache
register = template.Library()
Snippet = models.get_model('snippets', 'snippet')
CACHE_PREFIX = "snippet_"
class SnippetNode(template.Node):
def __init__(self, slug, is_variable, cache_time=0):
self.slug = slug
self.is_variable = is_variable
self.cache_time = cache_time
def render(self, context):
if self.is_variable:
real_slug = template.Variable(self.slug).resolve(context)
else:
real_slug = self.slug
cache_key = CACHE_PREFIX + real_slug
snippet = cache.get(cache_key)
if snippet is None:
try:
snippet = Snippet.objects.get(slug=real_slug)
except Snippet.DoesNotExist:
return getattr(settings, 'SNIPPET_STRING_IF_INVALID', '')
cache.set(cache_key, snippet, int(self.cache_time))
t = template.Template(snippet.content)
return t.render(context)
class BasicSnippetWrapper(object):
def prepare(self, parser, token):
tokens = token.split_contents()
self.is_variable = False
self.slug = None
if len(tokens) < 2 or len(tokens) > 3:
raise template.TemplateSyntaxError, \
"%r tag should have either 2 or 3 arguments" % (tokens[0],)
if len(tokens) == 2:
tag_name, slug = tokens
self.cache_time = 0
if len(tokens) == 3:
tag_name, slug, self.cache_time = tokens
# Check to see if the slug is properly double/single quoted
if not (slug[0] == slug[-1] and slug[0] in ('"', "'")):
self.is_variable = True
self.slug = slug
else:
self.slug = slug[1:-1]
def __call__(self, parser, token):
self.prepare(parser, token)
return SnippetNode(self.slug, self.is_variable, self.cache_time)
do_include_snippet = BasicSnippetWrapper()
register.tag('include_snippet', do_include_snippet)
| semente/django-snippets | snippets/templatetags/snippets.py | Python | bsd-3-clause | 3,246 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C code generation backend."""
import re
import textwrap
from makani.lib.python import string_util
from makani.lib.python.pack2 import backend
class BackendC(backend.Backend):
"""C code generation backend."""
_primary_type_map = {
'uint8': 'uint8_t',
'int8': 'int8_t',
'uint16': 'uint16_t',
'int16': 'int16_t',
'uint32': 'uint32_t',
'int32': 'int32_t',
'float32': 'float',
'date': 'uint32_t',
}
_param_section_map = {
'Config': '&ldscript_config_param_data',
'Calib': '&ldscript_calib_param_data',
'Serial': '&ldscript_serial_param_data',
}
def __init__(self, header_path):
super(self.__class__, self).__init__()
self.header_path = header_path
self._StartSource()
self._StartHeader()
def _StartSource(self):
self.source_string = textwrap.dedent("""\
// This file is automatically generated. Do not edit.
#include "{header_path}"
#include <stdint.h>
#ifdef PACK2_FLASH_POINTERS
#include "avionics/firmware/startup/ldscript.h"
#endif
""").format(header_path=self.header_path)
def _FinalizeSource(self):
pass
def _StartHeader(self):
self.header_string = textwrap.dedent("""\
// This file is automatically generated. Do not edit.
#ifndef {guard}
#define {guard}
#include <stdint.h>
""").format(guard=self._HeaderGuard())
def _FinalizeHeader(self):
self.header_string += textwrap.dedent("""\
#endif // {guard}
""").format(guard=self._HeaderGuard())
def _HeaderGuard(self):
return re.sub('[/.]', '_', self.header_path).upper() + '_'
def AddInclude(self, path):
self.header_string += '#include "%s.h"\n' % path
def AddBitfield(self, bitfield):
raise NotImplementedError('Bitfields not implemented for %s'
% self.__class__.__name__)
def AddEnum(self, enum):
header = 'typedef enum {\n'
values = sorted(enum.body.value_map.keys())
needs_sign_force = not [v for v in values if v < 0]
if needs_sign_force:
header += ' k{name}ForceSigned = -1,\n'.format(name=enum.name)
for value in values:
# C enum value names are of the form kEnumNameValueName.
value_name = enum.body.value_map[value]
name = 'k' + enum.name + value_name
header += ' {name} = {value},\n'.format(name=name, value=value)
bits = enum.width * 8 - 1
max_value = (1 << bits) - 1
if values[0] == 0 and values[-1] + 1 == len(values):
plural_name = string_util.GetPlural(enum.name)
header += ' kNum{name} = {num},\n'.format(name=plural_name,
num=len(values))
if max_value not in values:
header += ' k{name}ForceSize = 0x{val:x},\n'.format(name=enum.name,
val=max_value)
header += textwrap.dedent("""\
}} __attribute__((packed)) {name};
""").format(name=enum.name)
self.header_string += header
def AddStruct(self, struct):
self.header_string += 'typedef struct {\n'
for field in struct.body.fields:
type_name = field.type_obj.name
if type_name in self._primary_type_map:
type_name = self._primary_type_map[type_name]
if type_name == 'string':
self.header_string += ' char {name}[{size}];\n'.format(
name=field.name, size=field.type_obj.width)
elif field.extent == 1:
self.header_string += ' {type_name} {name};\n'.format(
type_name=type_name, name=field.name)
else:
self.header_string += ' {type_name} {name}[{extent}];\n'.format(
type_name=type_name, name=field.name, extent=field.extent)
self.header_string += '}} {type_name};\n\n'.format(type_name=struct.name)
def AddScaled(self, bitfield):
raise NotImplementedError('Scaleds not implemented for %s'
% self.__class__.__name__)
def AddHeader(self, header):
self.AddStruct(header)
def AddParam(self, param):
match = re.search(r'(Config|Calib|Serial)Params(V[0-9]+)?$', param.name)
if not match:
raise ValueError("Can't determine params type from name %s" % param.name)
param_type = match.group(1)
self.AddStruct(param)
self.header_string += textwrap.dedent("""\
#ifdef PACK2_FLASH_POINTERS
extern const {type_name} *k{type_name};
#endif
static const uint32_t k{type_name}Crc = 0x{crc:08x};
static inline uint32_t {type_name}GetTypeVersion(void) {{
return k{type_name}Crc;
}}
""").format(type_name=param.name,
crc=param.Crc32())
# Make sure we don't redefine the base param symbols.
if param.name not in ['ConfigParams', 'CalibPrams', 'SerialParams']:
self.source_string += textwrap.dedent("""\
#ifdef PACK2_FLASH_POINTERS
const {type_name} *k{type_name} = {section};
#endif
""").format(type_name=param.name,
section=self._param_section_map[param_type])
def Finalize(self):
self._FinalizeSource()
self._FinalizeHeader()
def GetSourceString(self, name):
if name == 'header':
return self.header_string
elif name == 'source':
return self.source_string
else:
raise ValueError('Unknown source %s.' % name)
| google/makani | lib/python/pack2/backend_c.py | Python | apache-2.0 | 6,023 |
from django.conf.urls import url
from rest_framework import routers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from api import views as api_views
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
)
)
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
))
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class FormhubApi(APIView):
"""
## JSON Rest API
Formhub provides the following JSON api endpoints:
* [/api/v1/users](/api/v1/users) - List, Retrieve username, first
and last name
* [/api/v1/profiles](/api/v1/profiles) - List, Create,
Update, user information
* [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create,
Update organization and organization info
* [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create,
Update organization projects, forms
* [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create,
Update teams
* [/api/v1/forms](/api/v1/forms) - List, Retrieve
xlsforms information
* [/api/v1/data](/api/v1/data) - List, Retrieve submission data
## Status Codes
* **200** - Successful [`GET`, `PATCH`, `PUT`]
* **201** - Resource successfully created [`POST`]
* **204** - Resouce successfully deleted [`DELETE`]
* **403** - Permission denied to resource
* **404** - Resource was not found
## Authentication
Formhub JSON API enpoints support both Basic authentication
and API Token Authentication through the `Authorization` header.
### Basic Authentication
Example using curl:
curl -X GET https://formhub.org/api/v1 -u username:password
### Token Authentication
Example using curl:
curl -X GET https://formhub.org/api/v1 -H "Authorization: Token TOKEN_KEY"
### Formhub Tagging API
* [Filter form list by
* tags.](/api/v1/forms#get-list-of-forms-with-specific-tags)
* [List Tags for a specific form.](/api/v1/forms#get-list-of-tags-for-a-specific-form)
* [Tag Forms.](/api/v1/forms#tag-forms)
* [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag)
* [List form data by tag.](/api/v1/data#query-submitted-data-of-a-specific-form-using-tags)
* [Tag a specific submission](/api/v1/data#tag-a-submission-data-point)
## Using Oauth2 with formhub API
You can learn more about oauth2 from [http://tools.ietf.org/html/rfc6749](http://tools.ietf.org/html/rfc6749).
### 1. Register your client application with formhub - [register](/o/applications/register/)
- `name` - name of your application
- `client_type` - Client Type: select confidential
- `authorization_grant_type` - Authorization grant type: Authorization code
- `redirect_uri` - Redirect urls: redirection endpoint
Keep note of the `client_id` and the `client_secret`, it is required when
requesting for an `access_token`.
### 2. Authorize client application.
The authorization url is of the form:
`GET` /o/authorize?client_id=XXXXXX&response_type=code&state=abc
example:
http://localhost:8000/o/authorize?client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&response_type=code&state=xyz
Note: Providing the url to any user will prompt for a password and
request for read and write permission for the application whose `client_id` is specified.
Where:
- `client_id` - is the client application id - ensure its urlencoded
- `response_type` - should be code
- `state` - a random state string that you client application will get when redirection happens
What happens:
1. a login page is presented, the username used to login determines the account that provides access.
2. redirection to the client application occurs, the url is of the form:
REDIRECT_URI/?state=abc&code=YYYYYYYYY
example redirect uri
http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd
- `code` - is the code to use to request for `access_token`
- `state` - same state string used during authorization request
Your client application should use the `code` to request for an access_token.
### 3. Request for access token.
Request:
`POST` /o/token
Payload:
grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX&redirect_uri=http://redirect/uri/path
curl example:
curl -X POST -d "grant_type=authorization_code&code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8&client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/" --user "e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv:xo7i4LNpMjH2lqHQQnBrLOVh7dZkK8qffn68dwtwd4iAq6uSwKA8d5u2YSqD3N7AFivV3cGAkJyQiAneu6BgCdG9YjEdYi0eA0O9KhLtjtPIYOjVCTGeCgbfNQaeuxe1"
Response:
{
"access_token": "Q6dJBs9Vkf7a2lVI7NKLT8F7c6DfLD",
"token_type": "Bearer", "expires_in": 36000,
"refresh_token": "53yF3uz79K1fif2TPtNBUFJSFhgnpE",
"scope": "read write groups"
}
Where:
- `access_token` - access token - expires
- `refresh_token` - token to use to request a new `access_token` in case it has expored.
Now that you have an `access_token` you can make API calls.
### 4. Accessing the Formhub API using the `access_token`.
Example using curl:
curl -X GET https://formhub.org/api/v1 -H "Authorization: Bearer ACCESS_TOKEN"
"""
_ignore_model_permissions = True
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
return Response(ret)
return FormhubApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret)
return ret
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', api_views.UserViewSet)
router.register(r'profiles', api_views.UserProfileViewSet)
router.register(r'orgs', api_views.OrgProfileViewSet)
router.register(r'forms', api_views.XFormViewSet)
router.register(r'projects', api_views.ProjectViewSet)
router.register(r'teams', api_views.TeamViewSet)
router.register(r'data', api_views.DataViewSet, base_name='data')
| eHealthAfrica/formhub | api/urls.py | Python | bsd-2-clause | 13,371 |
#!/usr/bin/env python
'''
===============================================================================
Script: xyz2aac.py
Author: Sebastian Weigand
Email: sab@sab.systems
Current: June 2018
Copyright: 2012-2018, Sebastian Weigand
License: MIT
Description: A script which converts various audio files to MPEG4/AAC files and
copies over song metadata, utilizing Apple's CoreAudio
framework for better AAC quality and speed.
Version: 2.3.1 (Multiple Input Processing)
Requirements:
OS: Mac OS X, v10.5+ [afconvert]
Platform: Python 2.6+ [multiprocessing]
Binaries: flac [decoding]
Python Lib: mutagen [metadata]
===============================================================================
'''
import logging
import os
from shutil import rmtree
logger = logging.getLogger('2aac')
console = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] | %(levelname)-8s | %(funcName)20s() | %(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
logger.setLevel(logging.INFO)
# =============================================================================
# Sanity Checking:
# =============================================================================
try:
import mutagen
import argparse
from subprocess import call, Popen, PIPE
from multiprocessing import Pool
from scandir import walk
except ImportError, e:
exit('Error: Unable to import requisite modules: %s' % e)
def is_progam_valid(program):
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
if os.access(os.path.join(path, program), os.X_OK):
return True
return False
for program in ['flac', 'afconvert']:
if not is_progam_valid(program):
exit('Error: Unable to execute/find "%s" from your PATH.' % program)
afconvert_help_formats = Popen(
['afconvert', '-hf'], stderr=PIPE).communicate()[1]
data_formats = [
format for format in ['aac', 'aace', 'aacf', 'aach', 'aacl', 'aacp']
if format in afconvert_help_formats
]
def fix_path(path):
return os.path.realpath(os.path.expanduser(path))
# =============================================================================
# Argument Parsing:
# =============================================================================
parser = argparse.ArgumentParser(
description=
'Converts FLAC to MPEG4/AAC via CoreAudio and transfers metadata using Mutagen.',
epilog=
'Note: Mac OS X v10.5+ is required for HE AAC (aach), and 10.7 is required for HE AAC v2 (aacp).'
)
parser.add_argument(
'location',
nargs='?',
default='.',
type=str,
help='the location to search for media files [.]')
parser.add_argument(
'-q',
'--quality',
type=int,
default=75,
help='VBR quality, in percent [75]')
parser.add_argument(
'-b',
'--bitrate',
type=int,
default=128,
help='average bitrate, in KB/s [128]')
parser.add_argument(
'-l',
'--lossless',
action="store_true",
default=False,
help='encode in Apple Lossless (ALAC), overrides codec options [no]')
parser.add_argument(
'-c',
'--codec',
choices=data_formats,
default='aac',
help='codec to use, if available on your platform [aac]')
parser.add_argument(
'--debug', action="store_true", default=False, help='enable debug logging')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug('Debug mode enabled.')
# Fix up the arguments:
bitrate = args.bitrate * 1000
quality = str(int(args.quality / 100.0 * 127))
location = fix_path(args.location)
codec = args.codec
# Make sure we've got good paths:
if os.path.isdir(location):
if not os.access(location, os.W_OK and os.R_OK):
exit('Cannot read/write: %s' % location)
else:
exit('Requested location does not exist: %s' % location)
if args.lossless:
logger.debug(
'We will be transcoding into Apple Lossless, matching the quality and sample rate.'
)
else:
logger.debug(
'The following variables have been translated: "bitrate": "%s"; "quality": "%s"; "location": "%s".'
% (bitrate, quality, location))
# =============================================================================
# Transcoding:
# =============================================================================
# First create our output directory:
output_location = os.path.join(location, 'converted_audio')
tmp_location = '/tmp/intermediate_audio/'
logger.debug('Will place converted files into: "%s".' % output_location)
for loc in (output_location, tmp_location):
if not os.path.exists(loc):
os.mkdir(loc)
logger.debug('Created "%s".' % loc)
def get_audio_files(location):
for path, dirs, files in walk(location):
for f in files:
if (f.endswith('.m4a') or f.endswith('.mp3')
or f.endswith('.flac')) and not f.startswith('.'):
print 'Got audio file:', f
yield os.path.join(path, f)
def convert_flac_to_wav(flac_file, wav_file):
logger.debug('Converting FLAC file to intermediate WAV file: "%s".' %
os.path.basename(flac_file))
call(['flac', '-s', '-f', '-d', flac_file, '-o', wav_file])
def convert_wav_to_aac(wav_file, m4a_file, lossless=args.lossless):
wav_file_name = os.path.basename(wav_file)
if args.lossless:
# TODO: Fix multi-channel audio issues, enable proper conversion:
# For 5.1 channel FLAC, cannot use soundcheck, must specify chanel layout:
# call(['afconvert', '-f', 'm4af', '-d', 'alac', '-l', 'MPEG_5_1_A', wav_file, m4a_file])
logger.debug('Converting "%s" to Apple-lossless M4A.' % wav_file_name)
call([
'afconvert', '-f', 'm4af', '-d', 'alac', '--soundcheck-generate',
wav_file, m4a_file
])
else:
logger.debug('Converting "%s" to a "%s"-B/s, "%s"%% quality "%s"-M4A.'
% (wav_file_name, bitrate, quality, codec))
call([
'afconvert', '-f', 'm4af', '-d', codec, '-b',
str(bitrate), '--src-complexity', 'bats', '-u', 'vbrq', quality,
'--soundcheck-generate', wav_file, m4a_file
])
def convert_audio_to_aac(audio_file, m4a_file):
logger.debug('Converting "%s" to a "%s"-B/s, "%s"%% quality "%s"-M4A.' %
(os.path.basename(audio_file), bitrate, quality, codec))
call([
'afconvert', '-f', 'm4af', '-d', codec, '-b',
str(bitrate), '--soundcheck-generate', audio_file, m4a_file
])
def transfer_metadata(source_file, target_file):
target_file_name = os.path.basename(target_file)
# Open the file with "easy tags" to standardize tagging:
metadata = mutagen.File(source_file, easy=True)
logger.debug('Read metadata from: "%s".' % os.path.basename(source_file))
valid_keys = [
'album', 'albumartist', 'albumartistsort', 'albumsort', 'artist',
'artistsort', 'APIC:'
'comment', 'composersort', 'covr', 'copyright', 'date', 'description',
'discnumber', 'genre', 'grouping', 'musicbrainz_albumartistid',
'musicbrainz_albumid', 'musicbrainz_albumstatus',
'musicbrainz_albumtype', 'musicbrainz_artistid', 'musicbrainz_trackid',
'pictures', 'title', 'titlesort', 'tracknumber'
]
for key in metadata.keys():
if key not in valid_keys:
del metadata[key]
m4a_data = mutagen.File(target_file, easy=True)
m4a_data.update(metadata)
m4a_data.save()
logger.debug('Saved initial metadata for "%s".' % target_file_name)
# Open the file again with extended metadata for album art:
additional_metadata = mutagen.File(source_file, easy=False)
m4a_data = mutagen.File(target_file, easy=False)
if type(additional_metadata) == mutagen.flac.FLAC:
logger.debug('Examining FLAC metadata for cover art...')
if hasattr(additional_metadata, 'pictures'):
logger.debug(
'Converting FLAC cover art for: "%s"' % target_file_name)
m4a_data['covr'] = [
mutagen.mp4.MP4Cover(pic.data,
mutagen.mp4.MP4Cover.FORMAT_JPEG)
if 'jpeg' in pic.mime else mutagen.mp4.MP4Cover(
pic.data, mutagen.mp4.MP4Cover.FORMAT_PNG)
for pic in additional_metadata.pictures
]
elif type(additional_metadata) == mutagen.mp3.MP3:
logger.debug('Examining MP3/ID3 metadata for cover art...')
if 'APIC:' in additional_metadata:
logger.debug(
'Converting MP3/ID3 cover art for: "%s"' % target_file_name)
m4a_data['covr'] = [
mutagen.mp4.MP4Cover(
additional_metadata['APIC:'].data,
mutagen.mp4.MP4Cover.FORMAT_JPEG
if 'jpeg' in additional_metadata['APIC:'].mime else
mutagen.mp4.MP4Cover.FORMAT_PNG)
]
elif type(additional_metadata) == mutagen.mp4.MP4:
logger.debug('Examining MP4 metadata for cover art...')
if 'covr' in additional_metadata:
logger.debug(
'Converting MP4 cover art for: "%s"' % target_file_name)
m4a_data['covr'] = additional_metadata['covr']
m4a_data.save()
logger.debug('Finalized metadata for: "%s"' % target_file_name)
def process_audio_file(audio_file):
logger.debug('Began processing: "%s"' % os.path.basename(audio_file))
if audio_file.endswith('.flac'):
m4a_file = os.path.join(output_location,
os.path.basename(audio_file[:-5])) + '.m4a'
wav_file = os.path.join(tmp_location, os.path.basename(
audio_file[:-5])) + '.wav'
convert_flac_to_wav(audio_file, wav_file)
convert_wav_to_aac(wav_file, m4a_file)
os.remove(wav_file)
logger.debug('Removed intermediate WAV file: "%s".' %
os.path.basename(wav_file))
else:
m4a_file = os.path.join(output_location,
os.path.basename(audio_file[:-4])) + '.m4a'
convert_audio_to_aac(audio_file, m4a_file)
transfer_metadata(audio_file, m4a_file)
print 'Finished file:', m4a_file
# =============================================================================
# Main:
# =============================================================================
print 'Processing: %s...' % location
try:
pool = Pool()
p = pool.map_async(process_audio_file, get_audio_files(location))
p.get(0xFFFF)
except KeyboardInterrupt:
exit('Aborting.')
# Clean-up:
rmtree(tmp_location, ignore_errors=True)
logger.debug('Recursively removed temporary directory: "%s".' % tmp_location)
print 'Done.'
# EOF
| swdd/multimedia | xyz2aac.py | Python | mit | 10,963 |
#!/usr/bin/env python
import rpm
ts = rpm.TransactionSet()
mi = ts.dbMatch()
for h in mi:
fi = h.fiFromHeader()
for f in fi:
print '%s\t%s' % (f[0], h['NAME'])
| redhat-cip/pitstop | pitstop/rpmqal.py | Python | apache-2.0 | 178 |
# Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Basic routing implementation.
Tornado routes HTTP requests to appropriate handlers using `Router` class implementations.
`Router` interface extends `~.httputil.HTTPServerConnectionDelegate` to provide additional
routing capabilities. This also means that any `Router` implementation can be used directly
as a ``request_callback`` for `~.httpserver.HTTPServer` constructor.
`Router` subclass must implement a ``find_handler`` method to provide a suitable
`~.httputil.HTTPMessageDelegate` instance to handle the request:
.. code-block:: python
class CustomRouter(Router):
def find_handler(self, request, **kwargs):
# some routing logic providing a suitable HTTPMessageDelegate instance
return MessageDelegate(request.connection)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def finish(self):
self.connection.write_headers(
ResponseStartLine("HTTP/1.1", 200, "OK"),
HTTPHeaders({"Content-Length": "2"}),
b"OK")
self.connection.finish()
router = CustomRouter()
server = HTTPServer(router)
The main responsibility of `Router` implementation is to provide a mapping from a request
to `~.httputil.HTTPMessageDelegate` instance that will handle this request. In the example above
we can see that routing is possible even without instantiating an `~.web.Application`.
For routing to `~.web.RequestHandler` implementations we need an `~.web.Application` instance.
`~.web.Application.get_handler_delegate` provides a convenient way to create
`~.httputil.HTTPMessageDelegate` for a given request and `~.web.RequestHandler`.
Here is a simple example of how we can we route to `~.web.RequestHandler` subclasses
by HTTP method:
.. code-block:: python
resources = {}
class GetResource(RequestHandler):
def get(self, path):
if path not in resources:
raise HTTPError(404)
self.finish(resources[path])
class PostResource(RequestHandler):
def post(self, path):
resources[path] = self.request.body
class HTTPMethodRouter(Router):
def __init__(self, app):
self.app = app
def find_handler(self, request, **kwargs):
handler = GetResource if request.method == "GET" else PostResource
return self.app.get_handler_delegate(request, handler, path_args=[request.path])
router = HTTPMethodRouter(Application())
server = HTTPServer(router)
`ReversibleRouter` interface adds the ability to distinguish between the routes and
reverse them to the original urls using route's name and additional arguments.
`~.web.Application` is itself an implementation of `ReversibleRouter` class.
`RuleRouter` and `ReversibleRuleRouter` are implementations of `Router` and `ReversibleRouter`
interfaces and can be used for creating rule-based routing configurations.
Rules are instances of `Rule` class. They contain a `Matcher`, which provides the logic for
determining whether the rule is a match for a particular request and a target, which can be
one of the following.
1) An instance of `~.httputil.HTTPServerConnectionDelegate`:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/handler"), ConnectionDelegate()),
# ... more rules
])
class ConnectionDelegate(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return MessageDelegate(request_conn)
2) A callable accepting a single argument of `~.httputil.HTTPServerRequest` type:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/callable"), request_callable)
])
def request_callable(request):
request.write(b"HTTP/1.1 200 OK\\r\\nContent-Length: 2\\r\\n\\r\\nOK")
request.finish()
3) Another `Router` instance:
.. code-block:: python
router = RuleRouter([
Rule(PathMatches("/router.*"), CustomRouter())
])
Of course a nested `RuleRouter` or a `~.web.Application` is allowed:
.. code-block:: python
router = RuleRouter([
Rule(HostMatches("example.com"), RuleRouter([
Rule(PathMatches("/app1/.*"), Application([(r"/app1/handler", Handler)]))),
]))
])
server = HTTPServer(router)
In the example below `RuleRouter` is used to route between applications:
.. code-block:: python
app1 = Application([
(r"/app1/handler", Handler1),
# other handlers ...
])
app2 = Application([
(r"/app2/handler", Handler2),
# other handlers ...
])
router = RuleRouter([
Rule(PathMatches("/app1.*"), app1),
Rule(PathMatches("/app2.*"), app2)
])
server = HTTPServer(router)
For more information on application-level routing see docs for `~.web.Application`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
from functools import partial
from tornado import httputil
from tornado.httpserver import _CallableAdapter
from tornado.escape import url_escape, url_unescape, utf8
from tornado.log import app_log
from tornado.util import basestring_type, import_object, re_unescape, unicode_type
try:
import typing # noqa
except ImportError:
pass
class Router(httputil.HTTPServerConnectionDelegate):
"""Abstract router interface."""
def find_handler(self, request, **kwargs):
# type: (httputil.HTTPServerRequest, typing.Any)->httputil.HTTPMessageDelegate
"""Must be implemented to return an appropriate instance of `~.httputil.HTTPMessageDelegate`
that can serve the request.
Routing implementations may pass additional kwargs to extend the routing logic.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg kwargs: additional keyword arguments passed by routing implementation.
:returns: an instance of `~.httputil.HTTPMessageDelegate` that will be used to
process the request.
"""
raise NotImplementedError()
def start_request(self, server_conn, request_conn):
return _RoutingDelegate(self, server_conn, request_conn)
class ReversibleRouter(Router):
"""Abstract router interface for routers that can handle named routes
and support reversing them to original urls.
"""
def reverse_url(self, name, *args):
"""Returns url string for a given route name and arguments
or ``None`` if no match is found.
:arg str name: route name.
:arg args: url parameters.
:returns: parametrized url string for a given route name (or ``None``).
"""
raise NotImplementedError()
class _RoutingDelegate(httputil.HTTPMessageDelegate):
def __init__(self, router, server_conn, request_conn):
self.server_conn = server_conn
self.request_conn = request_conn
self.delegate = None
self.router = router # type: Router
def headers_received(self, start_line, headers):
request = httputil.HTTPServerRequest(
connection=self.request_conn,
server_connection=self.server_conn,
start_line=start_line, headers=headers)
self.delegate = self.router.find_handler(request)
return self.delegate.headers_received(start_line, headers)
def data_received(self, chunk):
return self.delegate.data_received(chunk)
def finish(self):
self.delegate.finish()
def on_connection_close(self):
self.delegate.on_connection_close()
class RuleRouter(Router):
"""Rule-based router implementation."""
def __init__(self, rules=None):
"""Constructs a router from an ordered list of rules::
RuleRouter([
Rule(PathMatches("/handler"), Target),
# ... more rules
])
You can also omit explicit `Rule` constructor and use tuples of arguments::
RuleRouter([
(PathMatches("/handler"), Target),
])
`PathMatches` is a default matcher, so the example above can be simplified::
RuleRouter([
("/handler", Target),
])
In the examples above, ``Target`` can be a nested `Router` instance, an instance of
`~.httputil.HTTPServerConnectionDelegate` or an old-style callable, accepting a request argument.
:arg rules: a list of `Rule` instances or tuples of `Rule`
constructor arguments.
"""
self.rules = [] # type: typing.List[Rule]
if rules:
self.add_rules(rules)
def add_rules(self, rules):
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule))
def process_rule(self, rule):
"""Override this method for additional preprocessing of each rule.
:arg Rule rule: a rule to be processed.
:returns: the same or modified Rule instance.
"""
return rule
def find_handler(self, request, **kwargs):
for rule in self.rules:
target_params = rule.matcher.match(request)
if target_params is not None:
if rule.target_kwargs:
target_params['target_kwargs'] = rule.target_kwargs
delegate = self.get_target_delegate(
rule.target, request, **target_params)
if delegate is not None:
return delegate
return None
def get_target_delegate(self, target, request, **target_params):
"""Returns an instance of `~.httputil.HTTPMessageDelegate` for a
Rule's target. This method is called by `~.find_handler` and can be
extended to provide additional target types.
:arg target: a Rule's target.
:arg httputil.HTTPServerRequest request: current request.
:arg target_params: additional parameters that can be useful
for `~.httputil.HTTPMessageDelegate` creation.
"""
if isinstance(target, Router):
return target.find_handler(request, **target_params)
elif isinstance(target, httputil.HTTPServerConnectionDelegate):
return target.start_request(request.server_connection, request.connection)
elif callable(target):
return _CallableAdapter(
partial(target, **target_params), request.connection
)
return None
class ReversibleRuleRouter(ReversibleRouter, RuleRouter):
"""A rule-based router that implements ``reverse_url`` method.
Each rule added to this router may have a ``name`` attribute that can be
used to reconstruct an original uri. The actual reconstruction takes place
in a rule's matcher (see `Matcher.reverse`).
"""
def __init__(self, rules=None):
self.named_rules = {} # type: typing.Dict[str]
super(ReversibleRuleRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(ReversibleRuleRouter, self).process_rule(rule)
if rule.name:
if rule.name in self.named_rules:
app_log.warning(
"Multiple handlers named %s; replacing previous value",
rule.name)
self.named_rules[rule.name] = rule
return rule
def reverse_url(self, name, *args):
if name in self.named_rules:
return self.named_rules[name].matcher.reverse(*args)
for rule in self.rules:
if isinstance(rule.target, ReversibleRouter):
reversed_url = rule.target.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
return None
class Rule(object):
"""A routing rule."""
def __init__(self, matcher, target, target_kwargs=None, name=None):
"""Constructs a Rule instance.
:arg Matcher matcher: a `Matcher` instance used for determining
whether the rule should be considered a match for a specific
request.
:arg target: a Rule's target (typically a ``RequestHandler`` or
`~.httputil.HTTPServerConnectionDelegate` subclass or even a nested `Router`,
depending on routing implementation).
:arg dict target_kwargs: a dict of parameters that can be useful
at the moment of target instantiation (for example, ``status_code``
for a ``RequestHandler`` subclass). They end up in
``target_params['target_kwargs']`` of `RuleRouter.get_target_delegate`
method.
:arg str name: the name of the rule that can be used to find it
in `ReversibleRouter.reverse_url` implementation.
"""
if isinstance(target, str):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
target = import_object(target)
self.matcher = matcher # type: Matcher
self.target = target
self.target_kwargs = target_kwargs if target_kwargs else {}
self.name = name
def reverse(self, *args):
return self.matcher.reverse(*args)
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.matcher,
self.target, self.target_kwargs, self.name)
class Matcher(object):
"""Represents a matcher for request features."""
def match(self, request):
"""Matches current instance against the request.
:arg httputil.HTTPServerRequest request: current HTTP request
:returns: a dict of parameters to be passed to the target handler
(for example, ``handler_kwargs``, ``path_args``, ``path_kwargs``
can be passed for proper `~.web.RequestHandler` instantiation).
An empty dict is a valid (and common) return value to indicate a match
when the argument-passing features are not used.
``None`` must be returned to indicate that there is no match."""
raise NotImplementedError()
def reverse(self, *args):
"""Reconstructs full url from matcher instance and additional arguments."""
return None
class AnyMatches(Matcher):
"""Matches any request."""
def match(self, request):
return {}
class HostMatches(Matcher):
"""Matches requests from hosts specified by ``host_pattern`` regex."""
def __init__(self, host_pattern):
if isinstance(host_pattern, basestring_type):
if not host_pattern.endswith("$"):
host_pattern += "$"
self.host_pattern = re.compile(host_pattern)
else:
self.host_pattern = host_pattern
def match(self, request):
if self.host_pattern.match(request.host_name):
return {}
return None
class DefaultHostMatches(Matcher):
"""Matches requests from host that is equal to application's default_host.
Always returns no match if ``X-Real-Ip`` header is present.
"""
def __init__(self, application, host_pattern):
self.application = application
self.host_pattern = host_pattern
def match(self, request):
# Look for default host if not behind load balancer (for debugging)
if "X-Real-Ip" not in request.headers:
if self.host_pattern.match(self.application.default_host):
return {}
return None
class PathMatches(Matcher):
"""Matches requests with paths specified by ``path_pattern`` regex."""
def __init__(self, path_pattern):
if isinstance(path_pattern, basestring_type):
if not path_pattern.endswith('$'):
path_pattern += '$'
self.regex = re.compile(path_pattern)
else:
self.regex = path_pattern
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self._path, self._group_count = self._find_groups()
def match(self, request):
match = self.regex.match(request.path)
if match is None:
return None
if not self.regex.groups:
return {}
path_args, path_kwargs = [], {}
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups, we want to use either groups
# or groupdict but not both.
if self.regex.groupindex:
path_kwargs = dict(
(str(k), _unquote_or_none(v))
for (k, v) in match.groupdict().items())
else:
path_args = [_unquote_or_none(s) for s in match.groups()]
return dict(path_args=path_args, path_kwargs=path_kwargs)
def reverse(self, *args):
if self._path is None:
raise ValueError("Cannot reverse url regex " + self.regex.pattern)
assert len(args) == self._group_count, "required number of arguments " \
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes)):
a = str(a)
converted_args.append(url_escape(utf8(a), plus=False))
return self._path % tuple(converted_args)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return None, None
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
try:
unescaped_fragment = re_unescape(fragment)
except ValueError as exc:
# If we can't unescape part of it, we can't
# reverse this url.
return (None, None)
pieces.append(unescaped_fragment)
return ''.join(pieces), self.regex.groups
class URLSpec(Rule):
"""Specifies mappings between URLs and handlers.
.. versionchanged: 4.5
`URLSpec` is now a subclass of a `Rule` with `PathMatches` matcher and is preserved for
backwards compatibility.
"""
def __init__(self, pattern, handler, kwargs=None, name=None):
"""Parameters:
* ``pattern``: Regular expression to be matched. Any capturing
groups in the regex will be passed in to the handler's
get/post/etc methods as arguments (by keyword if named, by
position if unnamed. Named and unnamed capturing groups may
may not be mixed in the same rule).
* ``handler``: `~.web.RequestHandler` subclass to be invoked.
* ``kwargs`` (optional): A dictionary of additional arguments
to be passed to the handler's constructor.
* ``name`` (optional): A name for this handler. Used by
`~.web.Application.reverse_url`.
"""
super(URLSpec, self).__init__(PathMatches(pattern), handler, kwargs, name)
self.regex = self.matcher.regex
self.handler_class = self.target
self.kwargs = kwargs
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _unquote_or_none(s):
"""None-safe wrapper around url_unescape to handle unmatched optional
groups correctly.
Note that args are passed as bytes so the handler can decide what
encoding to use.
"""
if s is None:
return s
return url_unescape(s, encoding=None, plus=False)
| mr-ping/tornado | tornado/routing.py | Python | apache-2.0 | 21,421 |
HOST = "ip-172-31-29-102.us-west-2.compute.internal:27017,ip-172-31-29-103.us-west-2.compute.internal:27017,ip-172-31-29-104.us-west-2.compute.internal:27017,ip-172-31-29-105.us-west-2.compute.internal:27017,ip-172-31-29-101.us-west-2.compute.internal:27017,ip-172-31-29-106.us-west-2.compute.internal:27017,ip-172-31-29-107.us-west-2.compute.internal:27017,ip-172-31-29-108.us-west-2.compute.internal:27017,ip-172-31-29-109.us-west-2.compute.internal:27017"
PORT = ""
USER = ""
PASSWORD = ""
DATABASE = "googleh"
READ_PREFERENCE = "secondary"
WRITE_CONCERN = "majority"
COLLECTION_INPUT = "task_events"
COLLECTION_OUTPUT = "ratio"
PREFIX_COLUMN = "g_"
FIRST_ITEM = {'numline':128578,'filepath':'part-00004-of-00500.csv.gz'}
LAST_ITEM = {'numline':160721,'filepath':'part-00005-of-00500.csv.gz'}
ATTRIBUTES = ["job ID", "task index", "event type","time", "memory request","CPU request"]
SORT = ["_id.filepath", "_id.numline"]
OPERATION_TYPE = "UNIT"
INPUT_FILE = "task_googleh.csv"
OUTPUT_FILE = "ratio_cpu_memory_4.csv"
| elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1A/instances/11_2_workflow_full_10files_secondary_wmj_3sh_3rs_with_annot_with_proj_3s_hash/work/ubuntu/pegasus/example_workflow/20161109T024901+0000/ConfigDB_Calc_TaskEvent_4.py | Python | gpl-3.0 | 1,023 |
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def get_modelKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
for i in range(2,7):
# Log.info("H2O K-Means")
km_h2o = h2o.kmeans(x=benign_h2o, k=i)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print "sckit centers"
print km_sci.cluster_centers_
if __name__ == "__main__":
tests.run_test(sys.argv, get_modelKmeans)
| printedheart/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py | Python | apache-2.0 | 1,092 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 06:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('.league.champ_chooser', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='summoner_v3',
old_name='account_id',
new_name='accountId',
),
]
| belleandindygames/league | league/champ_chooser/bkp/0002_auto_20170613_0654.py | Python | mit | 443 |
from collections import namedtuple
from logging import WARNING
from uuid import uuid4
import ddt
from django.test import TestCase
from django.urls import reverse
from rest_framework.exceptions import ValidationError
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory
from credentials.apps.api.v2.serializers import (
CourseCertificateSerializer,
CredentialField,
UserCredentialAttributeSerializer,
UserCredentialCreationSerializer,
UserCredentialSerializer,
UserGradeSerializer,
)
from credentials.apps.catalog.tests.factories import CourseFactory, CourseRunFactory
from credentials.apps.core.tests.mixins import SiteMixin
from credentials.apps.credentials.models import CourseCertificate
from credentials.apps.credentials.tests.factories import (
CourseCertificateFactory,
ProgramCertificateFactory,
UserCredentialAttributeFactory,
UserCredentialFactory,
)
from credentials.apps.records.tests.factories import UserGradeFactory
@ddt.ddt
class CredentialFieldTests(SiteMixin, TestCase):
def setUp(self):
super().setUp()
self.program_certificate = ProgramCertificateFactory(site=self.site)
self.course_certificate = CourseCertificateFactory(site=self.site, certificate_type="verified")
self.field_instance = CredentialField()
# see: https://github.com/encode/django-rest-framework/blob/3.9.x/rest_framework/fields.py#L610
# pylint: disable=protected-access
self.field_instance._context = {
"request": namedtuple("HttpRequest", ["site"])(self.site),
}
def assert_program_uuid_validation_error_raised(self, program_uuid):
try:
self.field_instance.to_internal_value({"program_uuid": program_uuid})
except ValidationError as ex:
expected = {"program_uuid": f"No active ProgramCertificate exists for program [{program_uuid}]"}
self.assertEqual(ex.detail, expected)
def assert_course_run_key_validation_error_raised(self, course_run_key):
try:
self.field_instance.to_internal_value({"course_run_key": course_run_key, "mode": "verified"})
except ValidationError as ex:
expected = {"course_run_key": f"No active CourseCertificate exists for course run [{course_run_key}]"}
self.assertEqual(ex.detail, expected)
def test_to_internal_value_with_empty_program_uuid(self):
"""Verify an error is raised if no program UUID is provided."""
with self.assertRaisesMessage(ValidationError, "Credential identifier is missing"):
self.field_instance.to_internal_value({"program_uuid": ""})
def test_to_internal_value_with_invalid_program_uuid(
self,
):
"""Verify the method raises a ValidationError if the passed program UUID does not correspond to a
ProgramCertificate.
"""
self.assert_program_uuid_validation_error_raised(uuid4())
def test_to_internal_value_with_invalid_site(
self,
):
"""Verify the method raises a ValidationError if the passed program UUID belongs to a different site."""
certificate = ProgramCertificateFactory() # without setting site=self.site
self.assert_program_uuid_validation_error_raised(certificate.program_uuid)
def test_to_internal_value_with_inactive_program_certificate(
self,
):
"""Verify the method raises a ValidationError if the ProgramCertificate is NOT active."""
self.program_certificate.is_active = False
self.program_certificate.save()
self.assert_program_uuid_validation_error_raised(self.program_certificate.program_uuid)
def test_to_internal_value_with_valid_program_credential(self):
"""Verify the method returns the ProgramCertificate corresponding to the specified UUID."""
self.assertEqual(
self.field_instance.to_internal_value({"program_uuid": self.program_certificate.program_uuid}),
self.program_certificate,
)
def test_to_internal_value_with_created_course_credential(self):
"""Verify the method creates a course credential if needed."""
credential = self.field_instance.to_internal_value({"course_run_key": "create-me", "mode": "verified"})
self.assertEqual(credential, CourseCertificate.objects.get(course_id="create-me"))
def test_to_internal_value_with_created_course_credential_read_only(self):
"""Verify the method refuses to create a course credential when read-only."""
self.field_instance.read_only = True
self.assert_course_run_key_validation_error_raised("create-me")
def test_to_internal_value_with_created_course_credential_no_type_change(self):
"""Verify the method won't update cert information when creating a course credential."""
credential = self.field_instance.to_internal_value(
{"course_run_key": self.course_certificate.course_id, "mode": "honor"}
)
credential.refresh_from_db() # just in case
self.assertEqual(credential.certificate_type, "verified")
def test_to_internal_value_with_inactive_course_credential(self):
"""Verify the method raises a ValidationError if the CourseCertificate is NOT active."""
self.course_certificate.is_active = False
self.course_certificate.save()
self.assert_course_run_key_validation_error_raised(self.course_certificate.course_id)
def test_to_internal_value_with_valid_course_credential(self):
"""Verify the method serializes the course credential details to a dict."""
self.assertEqual(
self.field_instance.to_internal_value(
{"course_run_key": self.course_certificate.course_id, "mode": "verified"}
),
self.course_certificate,
)
def test_to_representation_data_with_program(self):
"""Verify the method serializes the credential details to a dict."""
expected = {
"type": "program",
"program_uuid": self.program_certificate.program_uuid,
"credential_id": self.program_certificate.id,
}
self.assertEqual(self.field_instance.to_representation(self.program_certificate), expected)
def test_to_representation_data_with_course(self):
"""Verify the method serializes the credential details to a dict."""
expected = {
"type": "course-run",
"course_run_key": self.course_certificate.course_id,
"mode": self.course_certificate.certificate_type,
}
self.assertEqual(self.field_instance.to_representation(self.course_certificate), expected)
class UserGradeSerializerTests(SiteMixin, TestCase):
def test_to_representation(self):
grade = UserGradeFactory()
expected = {
"id": grade.id,
"username": grade.username,
"course_run": grade.course_run.key,
"letter_grade": grade.letter_grade,
"percent_grade": str(grade.percent_grade),
"verified": grade.verified,
"created": grade.created.strftime(api_settings.DATETIME_FORMAT),
"modified": grade.modified.strftime(api_settings.DATETIME_FORMAT),
}
actual = UserGradeSerializer(grade).data
self.assertDictEqual(actual, expected)
def test_to_internal_value(self):
Request = namedtuple("Request", ["site"])
serializer = UserGradeSerializer(context={"request": Request(site=self.site)})
data = {
"username": "alice",
"course_run": "nope",
"letter_grade": "A",
"percent_grade": 0.9,
"verified": True,
}
with self.assertRaisesMessage(ValidationError, "No CourseRun exists for key [nope]"):
serializer.to_internal_value(data)
course = CourseFactory(site=self.site)
course_run = CourseRunFactory(course=course)
data["course_run"] = course_run.key
grade = serializer.to_internal_value(data)
self.assertEqual(grade["username"], "alice")
self.assertEqual(grade["course_run"], course_run)
self.assertEqual(grade["verified"], True)
self.assertEqual(grade["letter_grade"], "A")
self.assertEqual(str(grade["percent_grade"]), "0.9000")
class UserCredentialAttributeSerializerTests(TestCase):
def test_data(self):
program_certificate = ProgramCertificateFactory()
user_credential = UserCredentialFactory(credential=program_certificate)
program_certificate_attr = UserCredentialAttributeFactory(user_credential=user_credential)
expected = {"name": program_certificate_attr.name, "value": program_certificate_attr.value}
actual = UserCredentialAttributeSerializer(program_certificate_attr).data
self.assertEqual(actual, expected)
class UserCredentialCreationSerializerTests(TestCase):
def test_data(self):
"""Verify the serializer serializes a UserCredential exactly as UserCredentialSerializer does."""
request = APIRequestFactory().get("/")
user_credential = UserCredentialFactory()
actual = UserCredentialCreationSerializer(user_credential, context={"request": request}).data
expected = UserCredentialSerializer(user_credential, context={"request": request}).data
self.assertEqual(actual, expected)
def test_validate_attributes(self):
"""Verify the method prevents attributes with duplicate names from being created."""
serializer = UserCredentialCreationSerializer()
value = []
self.assertEqual(serializer.validate_attributes(value), value)
value = [{"name": "attr-name", "value": "attr-value"}]
self.assertEqual(serializer.validate_attributes(value), value)
with self.assertRaisesMessage(ValidationError, "Attribute names cannot be duplicated."):
value = [{"name": "attr-name", "value": "attr-value"}, {"name": "attr-name", "value": "another-attr-value"}]
serializer.validate_attributes(value)
class UserCredentialSerializerTests(TestCase):
def test_program_credential(self):
request = APIRequestFactory().get("/")
program_certificate = ProgramCertificateFactory()
user_credential = UserCredentialFactory(credential=program_certificate)
user_credential_attribute = UserCredentialAttributeFactory(user_credential=user_credential)
expected_url = "http://testserver{}".format(
reverse("credentials:render", kwargs={"uuid": user_credential.uuid.hex})
)
expected = {
"username": user_credential.username,
"uuid": str(user_credential.uuid),
"credential": {
"type": "program",
"program_uuid": program_certificate.program_uuid,
"credential_id": program_certificate.id,
},
"date_override": None,
"download_url": user_credential.download_url,
"status": user_credential.status,
"attributes": [{"name": user_credential_attribute.name, "value": user_credential_attribute.value}],
"created": user_credential.created.strftime(api_settings.DATETIME_FORMAT),
"modified": user_credential.modified.strftime(api_settings.DATETIME_FORMAT),
"certificate_url": expected_url,
}
actual = UserCredentialSerializer(user_credential, context={"request": request}).data
self.assertEqual(actual, expected)
def test_course_credential(self):
request = APIRequestFactory().get("/")
course_certificate = CourseCertificateFactory()
user_credential = UserCredentialFactory(credential=course_certificate)
user_credential_attribute = UserCredentialAttributeFactory(user_credential=user_credential)
expected_url = "http://testserver{}".format(
reverse("credentials:render", kwargs={"uuid": user_credential.uuid.hex})
)
expected = {
"username": user_credential.username,
"uuid": str(user_credential.uuid),
"credential": {
"type": "course-run",
"course_run_key": course_certificate.course_id,
"mode": course_certificate.certificate_type,
},
"date_override": None,
"download_url": user_credential.download_url,
"status": user_credential.status,
"attributes": [{"name": user_credential_attribute.name, "value": user_credential_attribute.value}],
"created": user_credential.created.strftime(api_settings.DATETIME_FORMAT),
"modified": user_credential.modified.strftime(api_settings.DATETIME_FORMAT),
"certificate_url": expected_url,
}
actual = UserCredentialSerializer(user_credential, context={"request": request}).data
self.assertEqual(actual, expected)
class CourseCertificateSerializerTests(SiteMixin, TestCase):
def test_create_course_certificate(self):
course_run = CourseRunFactory()
course_certificate = CourseCertificateFactory(site=self.site, course_run=course_run)
Request = namedtuple("Request", ["site"])
actual = CourseCertificateSerializer(course_certificate, context={"request": Request(site=self.site)}).data
expected = {
"id": course_certificate.id,
"site": self.site.id,
"course_id": course_certificate.course_id,
"course_run": course_certificate.course_run.key,
"certificate_type": course_certificate.certificate_type,
"certificate_available_date": course_certificate.certificate_available_date,
"is_active": course_certificate.is_active,
}
self.assertEqual(actual, expected)
def test_missing_course_run(self):
# We should be able to create an entry without a course run
course_certificate = CourseCertificateFactory(site=self.site, course_run=None)
Request = namedtuple("Request", ["site"])
actual = CourseCertificateSerializer(course_certificate, context={"request": Request(site=self.site)}).data
expected = {
"id": course_certificate.id,
"site": self.site.id,
"course_run": None,
"course_id": course_certificate.course_id,
"certificate_type": course_certificate.certificate_type,
"certificate_available_date": course_certificate.certificate_available_date,
"is_active": course_certificate.is_active,
}
self.assertEqual(actual, expected)
def test_create_without_course_run_raises_warning(self):
# even though you can create an entry without a course run,
# we want to make sure we are logging a warning when it is missing
with self.assertLogs(level=WARNING):
Request = namedtuple("Request", ["site"])
CourseCertificateSerializer(context={"request": Request(site=self.site)}).create(
validated_data={
"course_id": "DemoCourse0",
"certificate_type": "verified",
"is_active": True,
"certificate_available_date": None,
}
)
| edx/credentials | credentials/apps/api/v2/tests/test_serializers.py | Python | agpl-3.0 | 15,360 |
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi, Leonardo Pistone
# Copyright 2013, 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import datetime
from openerp import models, fields, api
from openerp.tools.translate import _
class SaleOrderLine(models.Model):
"""Adds two exception functions to be called by the sale_exceptions module.
The first one will ensure that an order line can be delivered on the
delivery date, if the related product is in MTS. Validation is done by
using the shop related to the sales order line location and using the line
delay.
The second one will raise a sales exception if the current SO will break an
order already placed in future
"""
_inherit = "sale.order.line"
@api.one
def _compute_line_delivery_date(self):
date_order = self.order_id.date_order
date_order = fields.Date.from_string(date_order)
# delay is a float, that is perfectly supported by timedelta
return date_order + datetime.timedelta(days=self.delay)
def _find_parent_locations(self):
location = self.order_id.partner_shipping_id.property_stock_customer
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
@api.multi
def _predict_rules(self):
"""Choose a rule without a procurement.
This imitates what will be done when the order is validated, with the
difference that here we do not have a procurement yet.
"""
Rule = self.env['procurement.rule']
Warehouse = self.env['stock.warehouse']
order = self.order_id
procurement_data = order._prepare_order_line_procurement(order, self)
# normally this is the order's warehouse, but modules like
# sale_sourced_by_line change this behaviour
warehouse = Warehouse.browse(procurement_data['warehouse_id'])
domain = [('location_id', 'in', self._find_parent_locations())]
warehouse_route_ids = []
if warehouse:
domain += [
'|',
('warehouse_id', '=', warehouse.id),
('warehouse_id', '=', False)
]
warehouse_route_ids = [x.id for x in warehouse.route_ids]
product_route_ids = [
x.id
for x in self.product_id.route_ids +
self.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in self.route_id]
res = Rule.search(
domain + [('route_id', 'in', procurement_route_ids)],
order='route_sequence, sequence'
)
if not res:
res = Rule.search(
domain + [('route_id', 'in', product_route_ids)],
order='route_sequence, sequence'
)
if not res:
res = warehouse_route_ids and Rule.search(
domain + [('route_id', 'in', warehouse_route_ids)],
order='route_sequence, sequence'
) or []
if not res:
res = Rule.search(domain + [('route_id', '=', False)],
order='sequence')
return res
@api.multi
def _get_line_location(self):
""" Get the source location from the predicted rule"""
rules = self._predict_rules()
if rules:
return rules[0].location_src_id
return False
@api.multi
def _is_make_to_stock(self):
"""Predict whether a make to stock rule will be chosen"""
return self._predict_procure_method() == 'make_to_stock'
@api.multi
def _predict_procure_method(self):
"""Predict the procurement method that will be chosen"""
rules = self._predict_rules()
return rules[0].procure_method
@api.multi
def _should_skip_stock_checks(self):
self.ensure_one()
if (
self.product_id and
self.product_id.type == 'product' and
self._is_make_to_stock() and
self._get_line_location() and
self._get_line_location().usage == 'internal'
):
return False
else:
return True
@api.multi
def can_command_at_delivery_date(self):
"""Predicate that checks whether a SO line can be delivered at delivery
date.
The delivery date is computed using date of the order + line delay.
The source location is predicted with a logic similar to the one that
will be used for real.
:return: True if line can be delivered on time
"""
self.ensure_one()
if self._should_skip_stock_checks():
return True
delivery_date = self._compute_line_delivery_date()[0]
delivery_date = fields.Datetime.to_string(delivery_date)
location = self._get_line_location()
assert location, _("No rules specifies a location"
" for this sale order line")
ctx = {
'to_date': delivery_date,
'compute_child': True,
'location': location.id,
}
try:
ctx['owner_id'] = self.stock_owner_id.id
except AttributeError:
# module sale_owner_stock_sourcing not installed, fine
pass
# Virtual qty is made on all childs of chosen location
prod_for_virtual_qty = (self.product_id
.with_context(ctx)
.virtual_available)
if prod_for_virtual_qty < self.product_uom_qty:
return False
return True
@api.model
def _get_states(self):
return ('waiting', 'confirmed', 'assigned')
@api.model
def _get_affected_dates(self, location_id, product_id, delivery_date):
"""Determine future dates where virtual stock has to be checked.
It will only look for stock move that pass by location_id.
If your stock location have children or you have configured automated
stock action
they must pass by the location related to SO line, else the will be
ignored
:param location_id: location id to be checked
:param product_id: product id te be checked
:return: list of dates to be checked
"""
cr = self._cr
sql = ("SELECT date FROM stock_move"
" WHERE state IN %s"
" AND date > %s"
" AND product_id = %s"
" AND location_id = %s")
cr.execute(sql, (self._get_states(),
delivery_date,
product_id,
location_id))
return (row[0] for row in cr.fetchall())
@api.multi
def future_orders_are_affected(self):
"""Predicate function that is a naive workaround for the lack of stock
reservation.
This can be a performance killer, you should not use it
if you have constantly a lot of running Orders
:return: True if future order are affected by current command line
"""
self.ensure_one()
if self._should_skip_stock_checks():
return False
delivery_date = self._compute_line_delivery_date()[0]
delivery_date = fields.Datetime.to_string(delivery_date)
location = self._get_line_location()
assert location, _("No rules specifies a location"
" for this sale order line")
ctx = {
'compute_child': True,
'location_id': location.id,
}
try:
ctx['owner_id'] = self.stock_owner_id.id
except AttributeError:
# module sale_owner_stock_sourcing not installed, fine
pass
# Virtual qty is made on all childs of chosen location
dates = self._get_affected_dates(location.id, self.product_id.id,
delivery_date)
for aff_date in dates:
ctx['to_date'] = aff_date
prod_for_virtual_qty = (self.product_id
.with_context(ctx)
.virtual_available)
if prod_for_virtual_qty < self.product_uom_qty:
return True
return False
| damdam-s/sale-workflow | sale_exception_nostock/model/sale.py | Python | agpl-3.0 | 9,058 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
from __future__ import print_function
import cgi
import dircache
import fnmatch
from functools import partial
import gc
import io
import locale
import os
import pkg_resources
from pprint import pformat, pprint
import re
import sys
from genshi.builder import tag
from genshi.output import DocType
from genshi.template import TemplateLoader
from trac import __version__ as TRAC_VERSION
from trac.config import BoolOption, ChoiceOption, ConfigurationError, \
ExtensionOption, Option, OrderedExtensionsOption
from trac.core import *
from trac.env import open_environment
from trac.loader import get_plugin_info, match_plugins_to_frames
from trac.perm import PermissionCache, PermissionError
from trac.resource import ResourceNotFound
from trac.util import arity, get_frame_info, get_last_traceback, hex_entropy, \
lazy, read_file, safe_repr, translation, \
warn_setuptools_issue
from trac.util.concurrency import threading
from trac.util.datefmt import format_datetime, localtz, timezone, user_time
from trac.util.text import exception_to_unicode, shorten_line, to_unicode, \
to_utf8, unicode_quote
from trac.util.translation import _, get_negotiated_locale, has_babel, \
safefmt, tag_
from trac.web.api import HTTPBadRequest, HTTPException, HTTPForbidden, \
HTTPInternalError, HTTPNotFound, IAuthenticator, \
IRequestFilter, IRequestHandler, Request, \
RequestDone, is_valid_default_handler
from trac.web.chrome import Chrome, add_notice, add_warning
from trac.web.href import Href
from trac.web.session import Session
#: This URL is used for semi-automatic bug reports (see
#: `send_internal_error`). Please modify it to point to your own
#: Trac instance if you distribute a patched version of Trac.
default_tracker = 'http://trac.edgewall.org'
class FakeSession(dict):
sid = None
def save(self):
pass
class FakePerm(object):
username = 'anonymous'
def __call__(self, realm_or_resource, id=False, version=False):
return self
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return False
__contains__ = has_permission
def require(self, action, realm_or_resource=None, id=False, version=False,
message=None):
if message is None:
raise PermissionError(action)
else:
raise PermissionError(msg=message)
assert_permission = require
class RequestWithSession(Request):
"""A request that saves its associated session when sending the reply."""
def send_response(self, code=200):
if code < 400:
self.session.save()
super(RequestWithSession, self).send_response(code)
class RequestDispatcher(Component):
"""Web request dispatcher.
This component dispatches incoming requests to registered
handlers. Besides, it also takes care of user authentication and
request pre- and post-processing.
"""
required = True
authenticators = ExtensionPoint(IAuthenticator)
handlers = ExtensionPoint(IRequestHandler)
filters = OrderedExtensionsOption('trac', 'request_filters',
IRequestFilter,
doc="""Ordered list of filters to apply to all requests.""")
default_handler = ExtensionOption('trac', 'default_handler',
IRequestHandler, 'WikiModule',
"""Name of the component that handles requests to the base
URL.
Options include `TimelineModule`, `RoadmapModule`,
`BrowserModule`, `QueryModule`, `ReportModule`, `TicketModule`
and `WikiModule`.""")
default_timezone = Option('trac', 'default_timezone', '',
"""The default timezone to use""")
default_language = Option('trac', 'default_language', '',
"""The preferred language to use if no user preference has
been set. (''since 0.12.1'')
""")
default_date_format = ChoiceOption('trac', 'default_date_format',
('', 'iso8601'),
"""The date format. Valid options are 'iso8601' for selecting
ISO 8601 format, or leave it empty which means the default
date format will be inferred from the browser's default
language. (''since 1.0'')
""")
use_xsendfile = BoolOption('trac', 'use_xsendfile', 'false',
"""When true, send a `X-Sendfile` header and no content when sending
files from the filesystem, so that the web server handles the content.
This requires a web server that knows how to handle such a header,
like Apache with `mod_xsendfile` or lighttpd. (''since 1.0'')
""")
xsendfile_header = Option('trac', 'xsendfile_header', 'X-Sendfile',
"""The header to use if `use_xsendfile` is enabled. If Nginx is used,
set `X-Accel-Redirect`. (''since 1.0.6'')""")
# Public API
def authenticate(self, req):
for authenticator in self.authenticators:
try:
authname = authenticator.authenticate(req)
except TracError as e:
self.log.error("Can't authenticate using %s: %s",
authenticator.__class__.__name__,
exception_to_unicode(e, traceback=True))
add_warning(req, _("Authentication error. "
"Please contact your administrator."))
break # don't fallback to other authenticators
if authname:
return authname
return 'anonymous'
def dispatch(self, req):
"""Find a registered handler that matches the request and let
it process it.
In addition, this method initializes the data dictionary
passed to the the template and adds the web site chrome.
"""
self.log.debug('Dispatching %r', req)
chrome = Chrome(self.env)
# Setup request callbacks for lazily-evaluated properties
req.callbacks.update({
'authname': self.authenticate,
'chrome': chrome.prepare_request,
'perm': self._get_perm,
'session': self._get_session,
'locale': self._get_locale,
'lc_time': self._get_lc_time,
'tz': self._get_timezone,
'form_token': self._get_form_token,
'use_xsendfile': self._get_use_xsendfile,
'xsendfile_header': self._get_xsendfile_header,
})
try:
try:
# Select the component that should handle the request
chosen_handler = None
try:
for handler in self._request_handlers.values():
if handler.match_request(req):
chosen_handler = handler
break
if not chosen_handler and \
(not req.path_info or req.path_info == '/'):
chosen_handler = self._get_valid_default_handler(req)
# pre-process any incoming request, whether a handler
# was found or not
self.log.debug("Chosen handler is %s", chosen_handler)
chosen_handler = \
self._pre_process_request(req, chosen_handler)
except TracError as e:
raise HTTPInternalError(e)
if not chosen_handler:
if req.path_info.endswith('/'):
# Strip trailing / and redirect
target = unicode_quote(req.path_info.rstrip('/'))
if req.query_string:
target += '?' + req.query_string
req.redirect(req.href + target, permanent=True)
raise HTTPNotFound('No handler matched request to %s',
req.path_info)
req.callbacks['chrome'] = partial(chrome.prepare_request,
handler=chosen_handler)
# Protect against CSRF attacks: we validate the form token
# for all POST requests with a content-type corresponding
# to form submissions
if req.method == 'POST':
ctype = req.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype in ('application/x-www-form-urlencoded',
'multipart/form-data') and \
req.args.get('__FORM_TOKEN') != req.form_token:
if self.env.secure_cookies and req.scheme == 'http':
msg = _('Secure cookies are enabled, you must '
'use https to submit forms.')
else:
msg = _('Do you have cookies enabled?')
raise HTTPBadRequest(_('Missing or invalid form token.'
' %(msg)s', msg=msg))
# Process the request and render the template
resp = chosen_handler.process_request(req)
if resp:
if len(resp) == 2: # old Clearsilver template and HDF data
self.log.error("Clearsilver template are no longer "
"supported (%s)", resp[0])
raise TracError(
_("Clearsilver templates are no longer supported, "
"please contact your Trac administrator."))
# Genshi
template, data, content_type, method = \
self._post_process_request(req, *resp)
if 'hdfdump' in req.args:
req.perm.require('TRAC_ADMIN')
# debugging helper - no need to render first
out = io.BytesIO()
pprint(data, out)
req.send(out.getvalue(), 'text/plain')
self.log.debug("Rendering response from handler")
output = chrome.render_template(
req, template, data, content_type, method=method,
iterable=chrome.use_chunked_encoding)
req.send(output, content_type or 'text/html')
else:
self.log.debug("Empty or no response from handler. "
"Entering post_process_request.")
self._post_process_request(req)
except RequestDone:
raise
except:
# post-process the request in case of errors
err = sys.exc_info()
try:
self._post_process_request(req)
except RequestDone:
raise
except Exception as e:
self.log.error("Exception caught while post-processing"
" request: %s",
exception_to_unicode(e, traceback=True))
raise err[0], err[1], err[2]
except PermissionError as e:
raise HTTPForbidden(e)
except ResourceNotFound as e:
raise HTTPNotFound(e)
except TracError as e:
raise HTTPInternalError(e)
# Internal methods
@lazy
def _request_handlers(self):
return dict((handler.__class__.__name__, handler)
for handler in self.handlers)
def _get_valid_default_handler(self, req):
# Use default_handler from the Session if it is a valid value.
name = req.session.get('default_handler')
handler = self._request_handlers.get(name)
if handler and not is_valid_default_handler(handler):
handler = None
if not handler:
# Use default_handler from project configuration.
handler = self.default_handler
if not is_valid_default_handler(handler):
raise ConfigurationError(
tag_("%(handler)s is not a valid default handler. Please "
"update %(option)s through the %(page)s page or by "
"directly editing trac.ini.",
handler=tag.code(handler.__class__.__name__),
option=tag.code("[trac] default_handler"),
page=tag.a(_("Basic Settings"),
href=req.href.admin('general/basics'))))
return handler
def _get_perm(self, req):
if isinstance(req.session, FakeSession):
return FakePerm()
else:
return PermissionCache(self.env, req.authname)
def _get_session(self, req):
try:
return Session(self.env, req)
except TracError as e:
self.log.error("can't retrieve session: %s",
exception_to_unicode(e))
return FakeSession()
def _get_locale(self, req):
if has_babel:
preferred = req.session.get('language')
default = self.env.config.get('trac', 'default_language', '')
negotiated = get_negotiated_locale([preferred, default] +
req.languages)
self.log.debug("Negotiated locale: %s -> %s", preferred,
negotiated)
return negotiated
def _get_lc_time(self, req):
lc_time = req.session.get('lc_time')
if not lc_time or lc_time == 'locale' and not has_babel:
lc_time = self.default_date_format
if lc_time == 'iso8601':
return 'iso8601'
return req.locale
def _get_timezone(self, req):
try:
return timezone(req.session.get('tz', self.default_timezone
or 'missing'))
except Exception:
return localtz
def _get_form_token(self, req):
"""Used to protect against CSRF.
The 'form_token' is strong shared secret stored in a user
cookie. By requiring that every POST form to contain this
value we're able to protect against CSRF attacks. Since this
value is only known by the user and not by an attacker.
If the the user does not have a `trac_form_token` cookie a new
one is generated.
"""
if 'trac_form_token' in req.incookie:
return req.incookie['trac_form_token'].value
else:
req.outcookie['trac_form_token'] = hex_entropy(24)
req.outcookie['trac_form_token']['path'] = req.base_path or '/'
if self.env.secure_cookies:
req.outcookie['trac_form_token']['secure'] = True
req.outcookie['trac_form_token']['httponly'] = True
return req.outcookie['trac_form_token'].value
def _get_use_xsendfile(self, req):
return self.use_xsendfile
# RFC7230 3.2 Header Fields
_xsendfile_header_re = re.compile(r"[-0-9A-Za-z!#$%&'*+.^_`|~]+\Z")
_warn_xsendfile_header = False
def _get_xsendfile_header(self, req):
header = self.xsendfile_header.strip()
if self._xsendfile_header_re.match(header):
return to_utf8(header)
else:
if not self._warn_xsendfile_header:
self._warn_xsendfile_header = True
self.log.warn("[trac] xsendfile_header is invalid: '%s'",
header)
return None
def _pre_process_request(self, req, chosen_handler):
for filter_ in self.filters:
chosen_handler = filter_.pre_process_request(req, chosen_handler)
return chosen_handler
def _post_process_request(self, req, *args):
resp = args
# `method` is optional in IRequestHandler's response. If not
# specified, the default value is appended to response.
if len(resp) == 3:
resp += (None,)
nbargs = len(resp)
for f in reversed(self.filters):
# As the arity of `post_process_request` has changed since
# Trac 0.10, only filters with same arity gets passed real values.
# Errors will call all filters with None arguments,
# and results will not be not saved.
extra_arg_count = arity(f.post_process_request) - 1
if extra_arg_count == nbargs:
resp = f.post_process_request(req, *resp)
elif extra_arg_count == nbargs - 1:
# IRequestFilters may modify the `method`, but the `method`
# is forwarded when not accepted by the IRequestFilter.
method = resp[-1]
resp = f.post_process_request(req, *resp[:-1])
resp += (method,)
elif nbargs == 0:
f.post_process_request(req, *(None,)*extra_arg_count)
return resp
_warn_setuptools = False
_slashes_re = re.compile(r'/+')
def dispatch_request(environ, start_response):
"""Main entry point for the Trac web interface.
:param environ: the WSGI environment dict
:param start_response: the WSGI callback for starting the response
"""
global _warn_setuptools
if _warn_setuptools is False:
_warn_setuptools = True
warn_setuptools_issue(out=environ.get('wsgi.errors'))
# SCRIPT_URL is an Apache var containing the URL before URL rewriting
# has been applied, so we can use it to reconstruct logical SCRIPT_NAME
script_url = environ.get('SCRIPT_URL')
if script_url is not None:
path_info = environ.get('PATH_INFO')
if not path_info:
environ['SCRIPT_NAME'] = script_url
else:
# mod_wsgi squashes slashes in PATH_INFO (!)
script_url = _slashes_re.sub('/', script_url)
path_info = _slashes_re.sub('/', path_info)
if script_url.endswith(path_info):
environ['SCRIPT_NAME'] = script_url[:-len(path_info)]
# If the expected configuration keys aren't found in the WSGI environment,
# try looking them up in the process environment variables
environ.setdefault('trac.env_path', os.getenv('TRAC_ENV'))
environ.setdefault('trac.env_parent_dir',
os.getenv('TRAC_ENV_PARENT_DIR'))
environ.setdefault('trac.env_index_template',
os.getenv('TRAC_ENV_INDEX_TEMPLATE'))
environ.setdefault('trac.template_vars',
os.getenv('TRAC_TEMPLATE_VARS'))
environ.setdefault('trac.locale', '')
environ.setdefault('trac.base_url',
os.getenv('TRAC_BASE_URL'))
locale.setlocale(locale.LC_ALL, environ['trac.locale'])
# Determine the environment
env_path = environ.get('trac.env_path')
if not env_path:
env_parent_dir = environ.get('trac.env_parent_dir')
env_paths = environ.get('trac.env_paths')
if env_parent_dir or env_paths:
# The first component of the path is the base name of the
# environment
path_info = environ.get('PATH_INFO', '').lstrip('/').split('/')
env_name = path_info.pop(0)
if not env_name:
# No specific environment requested, so render an environment
# index page
send_project_index(environ, start_response, env_parent_dir,
env_paths)
return []
errmsg = None
# To make the matching patterns of request handlers work, we append
# the environment name to the `SCRIPT_NAME` variable, and keep only
# the remaining path in the `PATH_INFO` variable.
script_name = environ.get('SCRIPT_NAME', '')
try:
script_name = unicode(script_name, 'utf-8')
# (as Href expects unicode parameters)
environ['SCRIPT_NAME'] = Href(script_name)(env_name)
environ['PATH_INFO'] = '/' + '/'.join(path_info)
if env_parent_dir:
env_path = os.path.join(env_parent_dir, env_name)
else:
env_path = get_environments(environ).get(env_name)
if not env_path or not os.path.isdir(env_path):
errmsg = 'Environment not found'
except UnicodeDecodeError:
errmsg = 'Invalid URL encoding (was %r)' % script_name
if errmsg:
start_response('404 Not Found',
[('Content-Type', 'text/plain'),
('Content-Length', str(len(errmsg)))])
return [errmsg]
if not env_path:
raise EnvironmentError('The environment options "TRAC_ENV" or '
'"TRAC_ENV_PARENT_DIR" or the mod_python '
'options "TracEnv" or "TracEnvParentDir" are '
'missing. Trac requires one of these options '
'to locate the Trac environment(s).')
run_once = environ['wsgi.run_once']
env = env_error = None
try:
env = open_environment(env_path, use_cache=not run_once)
if env.base_url_for_redirect:
environ['trac.base_url'] = env.base_url
# Web front-end type and version information
if not hasattr(env, 'webfrontend'):
mod_wsgi_version = environ.get('mod_wsgi.version')
if mod_wsgi_version:
mod_wsgi_version = (
"%s (WSGIProcessGroup %s WSGIApplicationGroup %s)" %
('.'.join([str(x) for x in mod_wsgi_version]),
environ.get('mod_wsgi.process_group'),
environ.get('mod_wsgi.application_group') or
'%{GLOBAL}'))
environ.update({
'trac.web.frontend': 'mod_wsgi',
'trac.web.version': mod_wsgi_version})
env.webfrontend = environ.get('trac.web.frontend')
if env.webfrontend:
env.webfrontend_version = environ['trac.web.version']
except Exception as e:
env_error = e
req = RequestWithSession(environ, start_response)
translation.make_activable(lambda: req.locale, env.path if env else None)
try:
return _dispatch_request(req, env, env_error)
finally:
translation.deactivate()
if env and not run_once:
env.shutdown(threading._get_ident())
# Now it's a good time to do some clean-ups
#
# Note: enable the '##' lines as soon as there's a suspicion
# of memory leak due to uncollectable objects (typically
# objects with a __del__ method caught in a cycle)
#
##gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
unreachable = gc.collect()
##env.log.debug("%d unreachable objects found.", unreachable)
##uncollectable = len(gc.garbage)
##if uncollectable:
## del gc.garbage[:]
## env.log.warn("%d uncollectable objects found.", uncollectable)
def _dispatch_request(req, env, env_error):
resp = []
# fixup env.abs_href if `[trac] base_url` was not specified
if env and not env.abs_href.base:
env.abs_href = req.abs_href
try:
if not env and env_error:
raise HTTPInternalError(env_error)
try:
dispatcher = RequestDispatcher(env)
dispatcher.dispatch(req)
except RequestDone as req_done:
resp = req_done.iterable
resp = resp or req._response or []
except HTTPException as e:
_send_user_error(req, env, e)
except Exception:
send_internal_error(env, req, sys.exc_info())
return resp
def _send_user_error(req, env, e):
# See trac/web/api.py for the definition of HTTPException subclasses.
if env:
env.log.warn('[%s] %s', req.remote_addr, exception_to_unicode(e))
data = {'title': e.title, 'type': 'TracError', 'message': e.message,
'frames': [], 'traceback': None}
if e.code == 403 and req.authname == 'anonymous':
# TRANSLATOR: ... not logged in, you may want to 'do so' now (link)
do_so = tag.a(_("do so"), href=req.href.login())
add_notice(req, tag_("You are currently not logged in. You may want "
"to %(do_so)s now.", do_so=do_so))
try:
req.send_error(sys.exc_info(), status=e.code, env=env, data=data)
except RequestDone:
pass
def send_internal_error(env, req, exc_info):
if env:
env.log.error("Internal Server Error: %r, referrer %r%s",
req, req.environ.get('HTTP_REFERER'),
exception_to_unicode(exc_info[1], traceback=True))
message = exception_to_unicode(exc_info[1])
traceback = get_last_traceback()
frames, plugins, faulty_plugins, interface_custom = [], [], [], []
th = 'http://trac-hacks.org'
has_admin = False
try:
has_admin = 'TRAC_ADMIN' in req.perm
except Exception:
pass
tracker = default_tracker
tracker_args = {}
if has_admin and not isinstance(exc_info[1], MemoryError):
# Collect frame and plugin information
frames = get_frame_info(exc_info[2])
if env:
plugins = [p for p in get_plugin_info(env)
if any(c['enabled']
for m in p['modules'].itervalues()
for c in m['components'].itervalues())]
match_plugins_to_frames(plugins, frames)
# Identify the tracker where the bug should be reported
faulty_plugins = [p for p in plugins if 'frame_idx' in p]
faulty_plugins.sort(key=lambda p: p['frame_idx'])
if faulty_plugins:
info = faulty_plugins[0]['info']
if 'trac' in info:
tracker = info['trac']
elif info.get('home_page', '').startswith(th):
tracker = th
plugin_name = info.get('home_page', '').rstrip('/') \
.split('/')[-1]
tracker_args = {'component': plugin_name}
interface_custom = Chrome(env).get_interface_customization_files()
def get_description(_):
if env and has_admin:
sys_info = "".join("|| '''`%s`''' || `%s` ||\n"
% (k, v.replace('\n', '` [[br]] `'))
for k, v in env.get_systeminfo())
sys_info += "|| '''`jQuery`''' || `#JQUERY#` ||\n" \
"|| '''`jQuery UI`''' || `#JQUERYUI#` ||\n" \
"|| '''`jQuery Timepicker`''' || `#JQUERYTP#` ||\n"
enabled_plugins = "".join("|| '''`%s`''' || `%s` ||\n"
% (p['name'], p['version'] or _('N/A'))
for p in plugins)
files = Chrome(env).get_interface_customization_files().items()
interface_files = "".join("|| **%s** || %s ||\n"
% (k, ", ".join("`%s`" % f for f in v))
for k, v in sorted(files))
else:
sys_info = _("''System information not available''\n")
enabled_plugins = _("''Plugin information not available''\n")
interface_files = _("''Interface customization information not "
"available''\n")
return _("""\
==== How to Reproduce ====
While doing a %(method)s operation on `%(path_info)s`, Trac issued an internal error.
''(please provide additional details here)''
Request parameters:
{{{
%(req_args)s
}}}
User agent: `#USER_AGENT#`
==== System Information ====
%(sys_info)s
==== Enabled Plugins ====
%(enabled_plugins)s
==== Interface Customization ====
%(interface_customization)s
==== Python Traceback ====
{{{
%(traceback)s}}}""",
method=req.method, path_info=req.path_info,
req_args=pformat(req.args), sys_info=sys_info,
enabled_plugins=enabled_plugins,
interface_customization=interface_files,
traceback=to_unicode(traceback))
# Generate the description once in English, once in the current locale
description_en = get_description(lambda s, **kw: safefmt(s, kw))
try:
description = get_description(_)
except Exception:
description = description_en
data = {'title': 'Internal Error',
'type': 'internal', 'message': message,
'traceback': traceback, 'frames': frames,
'shorten_line': shorten_line, 'repr': safe_repr,
'plugins': plugins, 'faulty_plugins': faulty_plugins,
'interface': interface_custom,
'tracker': tracker, 'tracker_args': tracker_args,
'description': description, 'description_en': description_en}
Chrome(env).add_jquery_ui(req)
try:
req.send_error(exc_info, status=500, env=env, data=data)
except RequestDone:
pass
def send_project_index(environ, start_response, parent_dir=None,
env_paths=None):
req = Request(environ, start_response)
loadpaths = [pkg_resources.resource_filename('trac', 'templates')]
if req.environ.get('trac.env_index_template'):
env_index_template = req.environ['trac.env_index_template']
tmpl_path, template = os.path.split(env_index_template)
loadpaths.insert(0, tmpl_path)
else:
template = 'index.html'
data = {'trac': {'version': TRAC_VERSION,
'time': user_time(req, format_datetime)},
'req': req}
if req.environ.get('trac.template_vars'):
for pair in req.environ['trac.template_vars'].split(','):
key, val = pair.split('=')
data[key] = val
try:
href = Href(req.base_path)
projects = []
for env_name, env_path in get_environments(environ).items():
try:
env = open_environment(env_path,
use_cache=not environ['wsgi.run_once'])
proj = {
'env': env,
'name': env.project_name,
'description': env.project_description,
'href': href(env_name)
}
except Exception as e:
proj = {'name': env_name, 'description': to_unicode(e)}
projects.append(proj)
projects.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
data['projects'] = projects
loader = TemplateLoader(loadpaths, variable_lookup='lenient',
default_encoding='utf-8')
tmpl = loader.load(template)
stream = tmpl.generate(**data)
if template.endswith('.xml'):
output = stream.render('xml')
req.send(output, 'text/xml')
else:
output = stream.render('xhtml', doctype=DocType.XHTML_STRICT,
encoding='utf-8')
req.send(output, 'text/html')
except RequestDone:
pass
def get_tracignore_patterns(env_parent_dir):
"""Return the list of patterns from env_parent_dir/.tracignore or
a default pattern of `".*"` if the file doesn't exist.
"""
path = os.path.join(env_parent_dir, '.tracignore')
try:
lines = [line.strip() for line in read_file(path).splitlines()]
except IOError:
return ['.*']
return [line for line in lines if line and not line.startswith('#')]
def get_environments(environ, warn=False):
"""Retrieve canonical environment name to path mapping.
The environments may not be all valid environments, but they are
good candidates.
"""
env_paths = environ.get('trac.env_paths', [])
env_parent_dir = environ.get('trac.env_parent_dir')
if env_parent_dir:
env_parent_dir = os.path.normpath(env_parent_dir)
paths = dircache.listdir(env_parent_dir)[:]
dircache.annotate(env_parent_dir, paths)
# Filter paths that match the .tracignore patterns
ignore_patterns = get_tracignore_patterns(env_parent_dir)
paths = [path[:-1] for path in paths if path[-1] == '/'
and not any(fnmatch.fnmatch(path[:-1], pattern)
for pattern in ignore_patterns)]
env_paths.extend(os.path.join(env_parent_dir, project)
for project in paths)
envs = {}
for env_path in env_paths:
env_path = os.path.normpath(env_path)
if not os.path.isdir(env_path):
continue
env_name = os.path.split(env_path)[1]
if env_name in envs:
if warn:
print('Warning: Ignoring project "%s" since it conflicts with'
' project "%s"' % (env_path, envs[env_name]),
file=sys.stderr)
else:
envs[env_name] = env_path
return envs
| pkdevbox/trac | trac/web/main.py | Python | bsd-3-clause | 34,356 |
#!/usr/bin/python
import os, sys
import httplib2
from lxml import etree
from StringIO import StringIO
import re
import simplejson
# Table Three Scraper http://uscode.house.gov/table3/table3years.htm
# The scrapers grab the URLs for each year from 1789 to 2011, go one directory down to grab the directory, then go one directory below and grab the whole page. THIS CODE TAKES A WHILE TO RUN. It may be better to tweak just for the years you want. Also, could use some refactoring, e.g. merge some of the functions.
# This script downloads files into the current directory
# GLOBAL VARIABLES
# Specify the years you want in a set
years = { 1950 }
# for testing purposes, the number of files downloaded can be limited.
LIMIT_SUBSUBRELEASES = False
LIMIT = 5
def mainscraper(content): #function to parse Table 3 website
doc = etree.parse(StringIO(content), parser=etree.HTMLParser())
releases = []
subreleases = []
for element in doc.xpath('//div[@class="alltable3years"]'): #Could also use "alltable3statutesatlargevolumes"
for d_element in element.xpath('span'):
text = d_element.xpath('a')[0].text
unitext = unicode(text).encode(sys.stdout.encoding, 'replace')
for m_element in d_element.xpath('a'):
addy = m_element.attrib['href']
year = addy.replace( 'year', '' )
year = year.replace( '.htm', '' )
if int( year ) in set( years ):
url = "http://uscode.house.gov/table3/" + addy
#print unitext, url
#releases += [(unitext, url)]
subreleases += add_subrelease(url)
#return subreleases
else:
pass
return subreleases #releases, subreleases
def subscraper(content): #function to parse Table 3 website
doc = etree.parse(StringIO(content), parser=etree.HTMLParser())
subsubreleases = []
releases = []
for element in doc.xpath('//div[@class="yearmaster"]'): #Could also use "statutesatlargevolumemasterhead"
for d_element in element.xpath('span'):
text = d_element.xpath('a')[0].text
unitext = unicode(text).encode(sys.stdout.encoding, 'replace')
for m_element in d_element.xpath('a'):
addy = m_element.attrib['href']
url = "http://uscode.house.gov/table3/" + addy
print addy
#print text, url
#releases += [(text, url)]
subsubreleases.append( add_subsubrelease(url) )
if LIMIT_SUBSUBRELEASES and len( subsubreleases ) == LIMIT:
return subsubreleases
return subsubreleases
def add_release(url): #function grab main page data
http = httplib2.Http('/tmp/httpcache')
response, content = http.request(url)
if response.status != 200:
sys.stderr.write('Error, returned status: %s\n' % response.status)
sys.exit(1) #bomb out, non-zero return indicates error
#print content
return mainscraper(content)
def add_subrelease(url): #function to grab sub page data
http = httplib2.Http('/tmp/httpcache')
response, content = http.request(url)
if response.status != 200:
sys.stderr.write('Error, returned status: %s\n' % response.status)
sys.exit(1) #bomb out, non-zero return indicates error
#print content
return subscraper(content)
def add_subsubrelease(url): #function to grab sub, sub page data
http = httplib2.Http('/tmp/httpcache')
response, content = http.request(url)
if response.status != 200:
sys.stderr.write('Error, returned status: %s\n' % response.status)
sys.exit(1) #bomb out, non-zero return indicates error
# print content
return url, content
def main():
dataset = []
x = add_release("http://uscode.house.gov/table3/table3years.htm") #Could also use "/alltable3statutesatlargevolumes.html"
for filename, html_string in x:
final_pagename = filename.split('/')[-1]
with open( final_pagename, 'w' ) as f:
f.write( html_string )
sys.stderr.write( "Wrote %s\n" % ( final_pagename ) )
if __name__ == '__main__':
main()
| unitedstates/uscode | table3/table3_scraper.py | Python | cc0-1.0 | 3,822 |
import os
import command
import serverinfo
import accounts
from google.appengine.ext.webapp import template
class Drain(command.Command):
def have_access(self):
account = accounts.account()
if not account or not account.DRAIN_ACCESS_RIGHT:
return False
return True
def finish_get(self, template_values):
self.response.headers['Content-Type'] = 'application/xhtml+xml'
path = os.path.join(os.path.dirname(__file__), 'drain.xhtml')
self.response.out.write(template.render(path, template_values))
def finish_post(self, selected, template_values):
drain_command = None
if self.request.get('drain'):
drain_command = 'drain'
if self.request.get('undrain'):
drain_command = 'undrain'
errors = []
if len(selected) == 0:
errors.append('Must select at least one server.')
if drain_command == None:
errors.append('Must select drain or undrain.')
else:
for info in selected:
serverinfo.ServerInfo.send_command(info, '{"command": "%s"}' % drain_command)
errors.append('Server %s sent %s command.' % (info['name'], drain_command))
template_values['errors'] = errors
self.response.headers['Content-Type'] = 'application/xhtml+xml'
path = os.path.join(os.path.dirname(__file__), 'drain.xhtml')
self.response.out.write(template.render(path, template_values))
| spiffcode/hostile-takeover | stats/drain.py | Python | bsd-2-clause | 1,517 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import datetime
import glob
import optparse
import os
import re
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
NACLPORTS_URL = 'https://naclports.googlecode.com/svn/trunk/src'
NACLPORTS_REV = 1293
GYPBUILD_DIR = 'gypbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir).
TOOLCHAIN_PACKAGE_MAP = {
'newlib': ('nacl_x86_newlib', '%(platform)s_x86_newlib'),
'bionic': ('nacl_arm_bionic', '%(platform)s_arm_bionic'),
'arm': ('nacl_arm_newlib', '%(platform)s_arm_newlib'),
'glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl')
}
def GetToolchainNaClInclude(tcname, tcpath, arch):
if arch == 'x86':
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'include')
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetGypGenDir(xarch):
if xarch == 'arm':
build_dir = GYPBUILD_DIR + '-arm'
else:
build_dir = GYPBUILD_DIR
return os.path.join(OUT_DIR, build_dir, 'Release', 'gen')
def GetGypBuiltLib(tcname, xarch=None):
if tcname == 'pnacl':
tcname = 'pnacl_newlib'
if not xarch:
xarch = ''
return os.path.join(GetGypGenDir(xarch), 'tc_' + tcname, 'lib' + xarch)
def GetToolchainNaClLib(tcname, tcpath, xarch):
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'lib')
elif xarch == '32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif xarch == '64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif xarch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
def GetToolchainDirName(tcname, xarch):
if tcname == 'pnacl':
return '%s_%s' % (getos.GetPlatform(), tcname)
elif xarch == 'arm':
return '%s_arm_%s' % (getos.GetPlatform(), tcname)
else:
return '%s_x86_%s' % (getos.GetPlatform(), tcname)
def GetGypToolchainLib(tcname, xarch):
if xarch == 'arm':
toolchain = xarch
else:
toolchain = tcname
tcpath = os.path.join(GetGypGenDir(xarch), 'sdk',
'%s_x86' % getos.GetPlatform(),
TOOLCHAIN_PACKAGE_MAP[toolchain][0])
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetOutputToolchainLib(pepperdir, tcname, xarch):
tcpath = os.path.join(pepperdir, 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetPNaClNativeLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'lib-' + arch)
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--exclude', 'arm_trusted']
if 'bionic' in toolchains:
build_platform = '%s_x86' % getos.GetPlatform()
args.extend(['--append', os.path.join(build_platform, 'nacl_arm_bionic')])
args.append('sync')
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
buildbot_common.RemoveDir(pepperdir_old)
buildbot_common.RemoveDir(pepperdir)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcname = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcname % {'platform': platform})
extract_packages.append(package_tuple)
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'host': []
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tc_name):
"""Copies NaCl headers to expected locations in the toolchain."""
if tc_name == 'arm':
# arm toolchain header should be the same as the x86 newlib
# ones
tc_name = 'newlib'
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[tc_name])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
TOOLCHAIN_LIBS = {
'bionic' : [
'libminidump_generator.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libppapi.a',
],
'newlib' : [
'crti.o',
'crtn.o',
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
],
'pnacl': [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
build_dir = GYPBUILD_DIR
ninja_out_dir = os.path.join(OUT_DIR, build_dir, 'Release')
tools_files = [
['sel_ldr', 'sel_ldr_x86_32'],
['ncval_new', 'ncval'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files.append(['sel_ldr64', 'sel_ldr_x86_64'])
if platform == 'linux':
tools_files.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files.append(['nacl_helper_bootstrap64',
'nacl_helper_bootstrap_x86_64'])
buildbot_common.MakeDir(os.path.join(pepperdir, 'tools'))
# Add .exe extensions to all windows tools
for pair in tools_files:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(ninja_out_dir, os.path.join(pepperdir, 'tools'), tools_files)
# Add ARM binaries
if platform == 'linux' and not options.no_arm_trusted:
tools_files = [
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['irt_core_newlib_arm.nexe', 'irt_core_arm.nexe'],
['sel_ldr', 'sel_ldr_arm'],
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm']
]
ninja_out_dir = os.path.join(OUT_DIR, build_dir + '-arm', 'Release')
InstallFiles(ninja_out_dir, os.path.join(pepperdir, 'tools'), tools_files)
for tc in set(toolchains) & set(['newlib', 'glibc', 'pnacl']):
if tc == 'pnacl':
xarches = (None,)
else:
xarches = ('arm', '32', '64')
for xarch in xarches:
if tc == 'glibc' and xarch == 'arm':
continue
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[tc])
# Copy ARM newlib components to bionic
if tc == 'newlib' and xarch == 'arm' and 'bionic' in toolchains:
bionic_dir = GetOutputToolchainLib(pepperdir, 'bionic', xarch)
InstallFiles(src_dir, bionic_dir, TOOLCHAIN_LIBS['bionic'])
if tc != 'pnacl':
src_dir = GetGypToolchainLib(tc, xarch)
InstallFiles(src_dir, dst_dir, ['crt1.o'])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir = MakeNinjaRelPath(rel_out_dir)
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir)
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm)
GypNinjaBuild('ia32', gyp_py, all_gyp, 'ncval_new', out_dir)
platform = getos.GetPlatform()
if platform == 'win':
NinjaBuild('sel_ldr64', out_dir)
else:
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-64')
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'sel_ldr', out_dir_64)
# We only need sel_ldr from the 64-bit out directory.
# sel_ldr needs to be renamed, so we'll call it sel_ldr64.
files_to_copy = [('sel_ldr', 'sel_ldr64')]
if platform == 'linux':
files_to_copy.append(('nacl_helper_bootstrap', 'nacl_helper_bootstrap64'))
for src, dst in files_to_copy:
buildbot_common.CopyFile(
os.path.join(SRC_DIR, out_dir_64, 'Release', src),
os.path.join(SRC_DIR, out_dir, 'Release', dst))
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('ia32', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir, False)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, force_arm_gcc=True):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = []
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
if getos.GetPlatform() == 'linux':
gyp_env['CC'] = 'arm-linux-gnueabihf-gcc'
gyp_env['CXX'] = 'arm-linux-gnueabihf-g++'
gyp_env['AR'] = 'arm-linux-gnueabihf-ar'
gyp_env['AS'] = 'arm-linux-gnueabihf-as'
gyp_env['CC_host'] = 'cc'
gyp_env['CXX_host'] = 'c++'
gyp_defines += ['armv7=1', 'arm_thumb=0', 'arm_neon=1',
'arm_float_abi=hard']
if force_arm_gcc:
gyp_defines.append('nacl_enable_arm_gcc=1')
if options.no_arm_trusted:
gyp_defines.append('disable_cross_trusted=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
for key in ['GYP_GENERATORS', 'GYP_DEFINES', 'CC']:
value = gyp_env.get(key)
if value is not None:
print '%s="%s"' % (key, value)
gyp_generator_flags = ['-G', 'output_dir=%s' % (out_dir,)]
gyp_depth = '--depth=.'
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, gyp_depth] + \
gyp_generator_flags,
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir)
def NinjaBuild(targets, out_dir):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, 'Release')
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('SDK Items')
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR)
platform = getos.GetPlatform()
newlibdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_newlib')
glibcdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_glibc')
armdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_newlib')
pnacldir = os.path.join(pepperdir, 'toolchain', platform + '_pnacl')
if set(toolchains) & set(['glibc', 'newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR)
if 'arm' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm')
GypNinjaInstall(pepperdir, toolchains)
if 'newlib' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', newlibdir, 'x86'),
'newlib')
if 'glibc' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('glibc', glibcdir, 'x86'),
'glibc')
if 'arm' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', armdir, 'arm'),
'arm')
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
GetPNaClNativeLib(pnacldir, nacl_arch))
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'x86'),
'newlib')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
if toolchains:
toolchains = toolchains[:]
# arm isn't a valid toolchain for build_projects
if 'arm' in toolchains:
toolchains.remove('arm')
if 'host' in toolchains:
toolchains.remove('host')
toolchains.append(getos.GetPlatform())
filters['TOOLS'] = toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
buildbot_common.Archive(tarname, bucket_path, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
buildbot_common.Archive(tarname + '.json', bucket_path, OUT_DIR,
step_link=False)
def BuildStepArchiveSDKTools():
# Only push up sdk_tools.tgz and nacl_sdk.zip on the linux buildbot.
builder_name = os.getenv('BUILDBOT_BUILDERNAME', '')
if builder_name == 'linux-sdk-multi':
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
buildbot_common.Archive('sdk_tools.tgz', bucket_path, OUT_DIR,
step_link=False)
buildbot_common.Archive('nacl_sdk.zip', bucket_path, OUT_DIR,
step_link=False)
def BuildStepSyncNaClPorts():
"""Pull the pinned revision of naclports from SVN."""
buildbot_common.BuildStep('Sync naclports')
if not os.path.exists(NACLPORTS_DIR):
# checkout new copy of naclports
cmd = ['svn', 'checkout', '-q', '-r', str(NACLPORTS_REV), NACLPORTS_URL,
'naclports']
buildbot_common.Run(cmd, cwd=os.path.dirname(NACLPORTS_DIR))
else:
# sync existing copy to pinned revision.
cmd = ['svn', 'update', '-r', str(NACLPORTS_REV)]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
def BuildStepBuildNaClPorts(pepper_ver, pepperdir):
"""Build selected naclports in all configurations."""
# TODO(sbc): currently naclports doesn't know anything about
# Debug builds so the Debug subfolders are all empty.
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['PEPPER_DIR'] = os.path.basename(pepperdir) # pepper_NN
env['NACLPORTS_NO_ANNOTATE'] = "1"
env['NACLPORTS_NO_UPLOAD'] = "1"
env['BUILDBOT_GOT_REVISION'] = str(NACLPORTS_REV)
build_script = 'build_tools/buildbot_sdk_bundle.sh'
buildbot_common.BuildStep('Build naclports')
bundle_dir = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
out_dir = os.path.join(bundle_dir, 'pepper_%s' % pepper_ver)
# Remove the sdk_bundle directory to remove stale files from previous builds.
buildbot_common.RemoveDir(bundle_dir)
buildbot_common.Run([build_script], env=env, cwd=NACLPORTS_DIR)
# Some naclports do not include a standalone LICENSE/COPYING file
# so we explicitly list those here for inclusion.
extra_licenses = ('tinyxml/readme.txt',
'jpeg-8d/README',
'zlib-1.2.3/README')
src_root = os.path.join(NACLPORTS_DIR, 'out', 'build')
output_license = os.path.join(out_dir, 'ports', 'LICENSE')
GenerateNotice(src_root , output_license, extra_licenses)
readme = os.path.join(out_dir, 'ports', 'README')
oshelpers.Copy(['-v', os.path.join(SDK_SRC_DIR, 'README.naclports'), readme])
def BuildStepTarNaClPorts(pepper_ver, tarfile):
"""Create tar archive containing headers and libs from naclports build."""
buildbot_common.BuildStep('Tar naclports Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
pepper_dir = 'pepper_%s' % pepper_ver
archive_dirs = [os.path.join(pepper_dir, 'ports')]
ports_out = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
cmd = [sys.executable, CYGTAR, '-C', ports_out, '-cjf', tarfile]
cmd += archive_dirs
buildbot_common.Run(cmd, cwd=NACL_DIR)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--nacl-tree-path',
help='Path to native client tree for bionic build.',
dest='nacl_tree_path')
parser.add_option('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_option('--bionic', help='Add bionic build.',
action='store_true')
parser.add_option('--tar', help='Force the tar step.',
action='store_true')
parser.add_option('--archive', help='Force the archive step.',
action='store_true')
parser.add_option('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_option('--build-ports',
help='Build naclport bundle.', action='store_true')
parser.add_option('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_option('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_option('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_option('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_option('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options, args = parser.parse_args(args[1:])
if args:
parser.error("Unexpected arguments: %s" % str(args))
if options.nacl_tree_path:
options.bionic = True
toolchain_build = os.path.join(options.nacl_tree_path, 'toolchain_build')
print 'WARNING: Building bionic toolchain from NaCl checkout.'
print 'This option builds bionic from the sources currently in the'
print 'provided NativeClient checkout, and the results instead of '
print 'downloading a toolchain from the builder. This may result in a'
print 'NaCl SDK that can not run on ToT chrome.'
print 'NOTE: To clobber you will need to run toolchain_build_bionic.py'
print 'directly from the NativeClient checkout.'
print ''
response = raw_input("Type 'y' and hit enter to continue.\n")
if response != 'y' and response != 'Y':
print 'Aborting.'
return 1
# Get head version of NativeClient tree
buildbot_common.BuildStep('Build bionic toolchain.')
buildbot_common.Run([sys.executable, 'toolchain_build_bionic.py', '-f'],
cwd=toolchain_build)
else:
toolchain_build = None
if buildbot_common.IsSDKBuilder():
options.archive = True
options.build_ports = True
options.build_app_engine = True
options.tar = True
toolchains = ['newlib', 'glibc', 'arm', 'pnacl', 'host']
# Changes for experimental bionic builder
if options.bionic:
toolchains.append('bionic')
options.build_ports = False
options.build_app_engine = False
print 'Building: ' + ' '.join(toolchains)
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
if options.bionic:
tarname = 'naclsdk_bionic.tar.bz2'
else:
tarname = 'naclsdk_' + getos.GetPlatform() + '.tar.bz2'
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
if options.nacl_tree_path:
# Instead of untarring, copy the raw bionic toolchain
not_bionic = [i for i in toolchains if i != 'bionic']
BuildStepUntarToolchains(pepperdir, not_bionic)
tcname = GetToolchainDirName('bionic', 'arm')
srcdir = os.path.join(toolchain_build, 'out', tcname)
bionicdir = os.path.join(pepperdir, 'toolchain', tcname)
oshelpers.Copy(['-r', srcdir, bionicdir])
else:
BuildStepUntarToolchains(pepperdir, toolchains)
BuildStepBuildToolchains(pepperdir, toolchains)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
if not options.bionic:
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
ports_tarfile = os.path.join(OUT_DIR, 'naclports.tar.bz2')
BuildStepSyncNaClPorts()
BuildStepBuildNaClPorts(pepper_ver, pepperdir)
if options.tar:
BuildStepTarNaClPorts(pepper_ver, ports_tarfile)
if options.build_app_engine and getos.GetPlatform() == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive on non-trybots.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
BuildStepArchiveBundle('naclports', pepper_ver, chrome_revision,
nacl_revision, ports_tarfile)
BuildStepArchiveSDKTools()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| bright-sparks/chromium-spacewalk | native_client_sdk/src/build_tools/build_sdk.py | Python | bsd-3-clause | 36,606 |
from Values import x, y, z
l = []
t = ()
s = {1}
XY = x
XY = y
XYZ = z
XYZ = XY
D = l
D = t
D = s
D = s # ensure multiple assignments do not clear it
from Values import D
| Microsoft/PTVS | Python/Tests/TestData/AstAnalysis/MultiValues.py | Python | apache-2.0 | 175 |
#!/usr/bin/env python
'''
1. Using SNMPv3 create a script that detects router configuration changes.
If the running configuration has changed, then send an email notification to yourself identifying the router that changed and the time that it changed.
Note, the running configuration of pynet-rtr2 is changing every 15 minutes (roughly at 0, 15, 30, and 45 minutes after the hour).
This will allow you to test your script in the lab environment.
In this exercise, you will possibly need to save data to an external file. One way you can accomplish this is by using a pickle file, see:
http://youtu.be/ZJOJjyhhEvM
A pickle file lets you save native Python data structures (dictionaries, lists, objects) directly to a file.
Here is some additional reference material that you will probably need to solve this problem:
Cisco routers have the following three OIDs:
# Uptime when running config last changed
ccmHistoryRunningLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
# Uptime when running config last saved (note any 'write' constitutes a save)
ccmHistoryRunningLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'
# Uptime when startup config last saved
ccmHistoryStartupLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
From the above descriptions, the router will save the sysUptime timestamp (OID sysUptime = 1.3.6.1.2.1.1.3.0) when a running configuration change occurs.
The router will also record the sysUptime timestamp when the running configuration is saved to the startup config.
Here is some data on the behavior of these OIDs. Note, sysUptime times are in hundredths of seconds so 317579 equals 3175.79 seconds (i.e. a bit less than one hour)
# After reboot
pynet-rtr2.twb-tech.com
317579 (sysUptime)
2440 (ccmHistoryRunningLastChanged--running-config is changed during boot)
0 (ccmHistoryRunningLastSaved -- i.e. reset to 0 on reload)
0 (ccmHistoryStartupLastChanged -- i.e. reset to 0 on reload)
# After config change on router (but no save to startup config)
pynet-rtr2.twb-tech.com
322522 (sysUptime)
322219 (ccmHistoryRunningLastChanged)
0 (ccmHistoryRunningLastSaved)
0 (ccmHistoryStartupLastChanged)
# After 'write mem' on router
pynet-rtr2.twb-tech.com
324543 (sysUptime)
322219 (ccmHistoryRunningLastChanged)
323912 (ccmHistoryRunningLastSaved)
323912 (ccmHistoryStartupLastChanged)
# After another configuration change (but no save to startup config)
pynet-rtr2.twb-tech.com
327177 (sysUptime)
326813 (ccmHistoryRunningLastChanged)
323912 (ccmHistoryRunningLastSaved)
323912 (ccmHistoryStartupLastChanged)
# After 'show run' command (note, this causes 'ccmHistoryRunningLastSaved' to
# increase i.e. 'write terminal' causes this OID to be updated)
pynet-rtr2.twb-tech.com
343223 (sysUptime)
326813 (ccmHistoryRunningLastChanged)
342898 (ccmHistoryRunningLastSaved)
323912 (ccmHistoryStartupLastChanged)
Bonus challenge: instead of saving your data in a pickle file, save the data using either a YAML or a JSON file.
My alternate solution supports pickle, YAML, or JSON depending on the name of the file (.pkl, .yml, or .json).
'''
import email_helper
import snmp_helper
import time
import pickle
IP = '50.76.53.27'
a_user = 'pysnmp'
auth_key = 'galileo1'
encrypt_key = 'galileo1'
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (IP, 7961)
pynet_rtr2 = (IP, 8061)
#grab our SNMP info in an ugly format
uptime = snmp_helper.snmp_get_oid_v3(pynet_rtr2, snmp_user, oid='1.3.6.1.2.1.1.3.0')
runninglastchanged = snmp_helper.snmp_get_oid_v3(pynet_rtr2, snmp_user, oid='1.3.6.1.4.1.9.9.43.1.1.1.0')
runninglastsaved = snmp_helper.snmp_get_oid_v3(pynet_rtr2, snmp_user, oid='1.3.6.1.4.1.9.9.43.1.1.2.0')
startlastchanged = snmp_helper.snmp_get_oid_v3(pynet_rtr2, snmp_user, oid='1.3.6.1.4.1.9.9.43.1.1.3.0')
#the below extracts it into something a little more legibile
uptime = snmp_helper.snmp_extract(uptime)
runninglastchanged = snmp_helper.snmp_extract(runninglastchanged)
runninglastsaved = snmp_helper.snmp_extract(runninglastsaved)
startlastchanged = snmp_helper.snmp_extract(startlastchanged)
file_read = open("storage.pkl", "rb")
check_runningchanged = pickle.load(file_read)
check_runningsaved = pickle.load(file_read)
check_startchanged = pickle.load(file_read)
file_read.close()
file_opened = open("storage.pkl", "wb")
pickle.dump(runninglastchanged, file_opened)
pickle.dump(runninglastsaved, file_opened)
pickle.dump(startlastchanged, file_opened)
file_opened.close()
def convert_to_time(change_me):
'''this will attempt to take our time change and convert it to a float, then convert the microsecond value to a time in minutes'''
try:
change_me = float(change_me)
change_me = (change_me*.01)/60
return change_me
except:
print 'something went horribly wrong'
if check_runningchanged >= runninglastchanged:
runninglastchanged = check_runningchanged
timestamp = convert_to_time(uptime) - convert_to_time(runninglastchanged)
recipient = 'joey@networkbit.com'
subject = 'Pynet status change' #could use some logic to enter device name here if we rewrote some of the above
message = '''
Something changed on the configuration of rtr2 %s minutes ago!
''' % timestamp
sender = 'joey@theboyers.org'
email_helper.send_mail(recipient, subject, message, sender)
if check_runningsaved >= runninglastsaved:
runninglastsaved = check_runningsaved
timestamp = convert_to_time(uptime) - convert_to_time(runninglastsaved)
recipient = 'joey@networkbit.com'
subject = 'Pynet status change' #could use some logic to enter device name here if we rewrote some of the above
message = '''
Something changed on the configuration of rtr2 %s minutes ago!
''' % timestamp
sender = 'joey@theboyers.org'
email_helper.send_mail(recipient, subject, message, sender)
if check_startchanged >= startlastchanged:
startlastchanged = check_startchanged
timestamp = convert_to_time(uptime) - convert_to_time(startlastchanged)
recipient = 'joey@networkbit.com'
subject = 'Pynet status change' #could use some logic to enter device name here if we rewrote some of the above
message = '''
Something changed on the configuration of rtr2 %s minutes ago!
''' % timestamp
sender = 'joey@theboyers.org'
email_helper.send_mail(recipient, subject, message, sender)
| joeyb182/pynet_ansible | byers-paid/exercise_week_3_1.py | Python | apache-2.0 | 6,430 |
"""
Tests for the FileRequest module.
"""
from teiler import filerequest
from twisted.internet.defer import Deferred
from twisted.trial import unittest
class ParseFileRequestTests(unittest.SynchronousTestCase):
def test_parse_file_req_ret(self):
request = {
'url': ['192.168.1.1'],
'filenames': ['foo/plop,foo/bar/baz.txt'],
'directories': ['foo,foo/bar']
}
downdir = "."
result = filerequest.parseFileRequest(request, downdir)
self.assertEqual(2, len(result.files))
self.assertEqual('foo/plop', result.files[0])
self.assertEqual('foo/bar/baz.txt', result.files[1])
self.assertEqual(2, len(result.directories))
self.assertEqual('foo', result.directories[0])
self.assertEqual('foo/bar', result.directories[1])
def test_malformed_request_raises_missing_url_exception(self):
request = {
'filenames': ['plop', 'foo/bar/baz.txt'],
'directories': ['foo/bar'],
}
downdir = "."
self.assertRaises(
filerequest.MissingUrlError,
filerequest.parseFileRequest,
request,
downdir
)
def test_malformed_request_raises_missing_files_exception(self):
request = {
'url': 'foo://foo',
'directories': [],
}
downdir = "."
self.assertRaises(
filerequest.MissingFilesError,
filerequest.parseFileRequest,
request,
downdir
)
def test_malformed_request_raises_missing_dir_exception(self):
request = {
'url': 'foo//foo',
'filenames': ['imafile']
}
downdir = "."
self.assertRaises(
filerequest.MissingDirectoriesError,
filerequest.parseFileRequest,
request,
downdir
)
class FakeDownloader(object):
"""
Fake download agent object
"""
def __init__(self):
self.called = 0
self.requests = []
def getFile(self, url, filepath):
d = Deferred()
self.requests.append(url)
self.called += 1
def finish(ignored):
return True
d.addBoth(finish)
return d
def fakeCreateFileDirs(downloadTo, newPath):
"""
Stand in for createFileDirs. Same args, no side effect.
"""
return True
class FileRequestTests(unittest.SynchronousTestCase):
def setUp(self):
self.url = 'here'
self.files = ['file1']
self.directories = ['home']
self.downloadTo = '.'
self.frequest = filerequest.FileRequest(self.url,
self.files,
self.directories,
self.downloadTo)
def test_get_files_adds_files_to_downloading(self):
self.frequest.getFiles(FakeDownloader(), fakeCreateFileDirs)
self.assertTrue(self.frequest.downloading == ['file1'])
self.assertTrue(len(self.frequest.downloading) == 1)
def test_get_files_removes_files_from_files(self):
self.frequest.getFiles(FakeDownloader(), fakeCreateFileDirs)
self.assertTrue(len(self.frequest.files) == 0)
def test_get_files_with_url_and_filename(self):
self.frequest.getFiles(FakeDownloader(), fakeCreateFileDirs)
self.assertTrue(len(self.frequest.history) == 1)
self.assertTrue(self.frequest.history == ['here/file1'])
| derwolfe/teiler | teiler/test/test_filerequest.py | Python | mit | 3,551 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class HrEmployee(models.Model):
_name = 'hr.employee'
_inherit = ['hr.employee', 'website.published.mixin']
public_info = fields.Char(string='Public Info')
@api.multi
def _website_url(self, field_name, arg):
res = super(HrEmployee, self)._website_url(field_name, arg)
res.update({(employee_id, '/page/website.aboutus#team') for employee_id in self.ids})
return res
| ChawalitK/odoo | addons/website_hr/models/hr_employee.py | Python | gpl-3.0 | 549 |
# -*- coding:utf-8 -*-
import sys
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import EngFormatter
local_path = '/'.join(sys.path[0].split('/')[:-2]) + '/'
sys.path.insert(0, local_path + 'mswim/')
from mswim import WimSys
a = ''
x = True if a else False
print(x)
exit()
root_file = '/home/ivan/dev/pydev/datos/mswim/calibracao/'
labwim = WimSys('host=150.162.176.222 dbname=labdb user=lab password=lab')
#labwim.save_file(root_file + '20130626_150243_piezoQuartzo_DadosBrutos.txt')
wimdata = labwim.search_acquisition(1)
wimdata.extract_data_array()
print(wimdata.axles())
labwim.plot(wimdata)
"""
from datetime import datetime, timedelta
acq_date, acq_time = ('2013/06/26', '11:45:14,9430323499424465829')
acq_time = acq_time.split(',')
acq_time = ','.join((acq_time[0], acq_time[1][:6]))
date_object = datetime.strptime('%s %s' % (acq_date, acq_time), '%Y/%m/%d %H:%M:%S,%f')
#0,000200
delta_time = float('0,000200'.replace(',', '.'))
delta_time = timedelta(seconds=delta_time)
new_time = date_object + delta_time
print(new_time)
"""
"""
import datetime
s = "2010-01-01 18:48:14,631829"
print(datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S,%f"))
"""
import os
root_file = '/media/ivan/bf7f8bb4-842c-4abb-b280-8195370749c0/ivan/dev/labtrans/datos/mswim/calibracao/'
_files = [f for f in os.listdir(root_file) if f.endswith('_piezoQuartzo_DadosBrutos.txt')]
print _files | xmnlab/minilab | labtrans/plotter/lab.py | Python | gpl-3.0 | 1,420 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.shell import dependency_inference, shell_command, shunit2_test_runner, tailor
from pants.backend.shell.target_types import (
ShellCommand,
ShellCommandRun,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
Shunit2TestsGeneratorTarget,
Shunit2TestTarget,
)
from pants.backend.shell.target_types import rules as target_types_rules
def target_types():
return [
ShellCommand,
ShellCommandRun,
ShellSourcesGeneratorTarget,
Shunit2TestsGeneratorTarget,
ShellSourceTarget,
Shunit2TestTarget,
]
def rules():
return [
*dependency_inference.rules(),
*shell_command.rules(),
*shunit2_test_runner.rules(),
*tailor.rules(),
*target_types_rules(),
]
| patricklaw/pants | src/python/pants/backend/shell/register.py | Python | apache-2.0 | 917 |
import methods
methods.reset_dates_ids()
| jaimejimbo/rod-analysis | reset_dates_ids.py | Python | gpl-3.0 | 42 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os.path import join, abspath, dirname
import logging
ROOT = abspath(join(dirname(__file__), ".."))
# These default settings have two purposes:
# 1) Give a template for writing local "private_settings.py"
# 2) Give default initialization fields for the "toolchains.py" constructors
##############################################################################
# Build System Settings
##############################################################################
BUILD_DIR = abspath(join(ROOT, "build"))
# ARM
armcc = "standalone" # "keil", or "standalone", or "ds-5"
if armcc == "keil":
ARM_PATH = "C:/Keil_4_54/ARM"
ARM_BIN = join(ARM_PATH, "BIN40")
ARM_INC = join(ARM_PATH, "RV31", "INC")
ARM_LIB = join(ARM_PATH, "RV31", "LIB")
elif armcc == "standalone":
ARM_PATH = "C:/Program Files/ARM/armcc_4.1_791"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "include")
ARM_LIB = join(ARM_PATH, "lib")
elif armcc == "ds-5":
ARM_PATH = "C:/Program Files (x86)/DS-5"
ARM_BIN = join(ARM_PATH, "bin")
ARM_INC = join(ARM_PATH, "include")
ARM_LIB = join(ARM_PATH, "lib")
ARM_CPPLIB = join(ARM_LIB, "cpplib")
MY_ARM_CLIB = join(ARM_PATH, "lib", "microlib")
# GCC ARM
GCC_ARM_PATH = ""
# GCC CodeSourcery
GCC_CS_PATH = "C:/Program Files (x86)/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin"
# GCC CodeRed
GCC_CR_PATH = "C:/code_red/RedSuite_4.2.0_349/redsuite/Tools/bin"
# IAR
IAR_PATH = "C:/Program Files (x86)/IAR Systems/Embedded Workbench 6.0/arm"
# GCC Code Warrior
CW_GCC_PATH = "C:/Freescale/CW MCU v10.3/Cross_Tools/arm-none-eabi-gcc-4_6_2/bin"
CW_EWL_PATH = "C:/Freescale/CW MCU v10.3/MCU/ARM_GCC_Support/ewl/lib"
# Goanna static analyzer
GOANNA_PATH = "c:/Program Files (x86)/RedLizards/Goanna Central 3.1.4/bin"
# cppcheck path (command) and output message format
CPPCHECK_CMD = ["cppcheck", "--enable=all"]
CPPCHECK_MSG_FORMAT = ["--template=[{severity}] {file}@{line}: {id}:{message}"]
BUILD_OPTIONS = []
# mbed.org username
MBED_ORG_USER = ""
##############################################################################
# Test System Settings
##############################################################################
SERVER_PORT = 59432
SERVER_ADDRESS = "10.2.200.94"
LOCALHOST = "10.2.200.94"
MUTs = {
"1" : {"mcu": "LPC1768",
"port":"COM41", "disk":'E:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2": {"mcu": "LPC11U24",
"port":"COM42", "disk":'F:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "SD"]
},
"3" : {"mcu": "KL25Z",
"port":"COM43", "disk":'G:\\',
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
}
##############################################################################
# Private Settings
##############################################################################
try:
# Allow to overwrite the default settings without the need to edit the
# settings file stored in the repository
from workspace_tools.private_settings import *
except ImportError:
print '[WARNING] Using default settings. Define you settings in the file "workspace_tools/private_settings.py" or in "./mbed_settings.py"'
| NordicSemiconductor/mbed | workspace_tools/settings.py | Python | apache-2.0 | 3,867 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-29 01:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_maintenancenotice'),
]
operations = [
migrations.AddField(
model_name='maintenancenotice',
name='title',
field=models.TextField(default=b'Maintenance Information'),
),
]
| CCI-MOC/GUI-Frontend | api/migrations/0009_maintenancenotice_title.py | Python | apache-2.0 | 474 |
def findNextPalindrome(number):
new_list = []
list_number = list(str(number))
size = len(list_number)
first = list_number[0]
last = list_number[size-1]
if first == last and size == 3:
new_list = list_number
new_list[1] = str(int(list_number[1]) + 1)
elif size > 3:
for i in range(size):
if i == 0 or i == size-1:
new_list.append(list_number[0])
else:
if list_number[1] == 9:
str(int(list_number[1-1]) + 1 )
new_list.append(str(int(list_number[1])+1))
result = int(''.join(new_list))
return result
def main():
result_list = []
amount = int(input())
for i in range(amount):
result_list.append(findNextPalindrome(int(input())))
for result in result_list:
print(result)
main()
| Diegow3b/Sphere-Online-Judge-SPOJ- | challenges/classical/PALINTheNextPalindrome.py | Python | mit | 889 |
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| treycausey/scikit-learn | sklearn/datasets/lfw.py | Python | bsd-3-clause | 16,524 |
#!/usr/bin/python
import sqlite3
conn = sqlite3.connect('elephant-seals.db')
c = conn.cursor()
# Create table (no primary key, rowid is sufficient)
c.execute('''CREATE TABLE IF NOT EXISTS ddt
(individual INTEGER NOT NULL, date REAL,
ax REAL, ay REAL, az REAL,
mx REAL, my REAL, mz REAL,
depth REAL, vel REAL, has_vel INTEGER)''')
c.execute('''CREATE TABLE IF NOT EXISTS raw_estimate
(id INTEGER NOT NULL, dive_status INTEGER,
x REAL, y REAL, z REAL,
roll REAL, pitch REAL, yaw REAL,
velocity REAL,
FOREIGN KEY(id) REFERENCES ddt(rowid))''')
c.execute('''CREATE TABLE IF NOT EXISTS dives
(start INTEGER NOT NULL, end INTEGER NOT NULL,
FOREIGN KEY(start) REFERENCES ddt(rowid),
FOREIGN KEY(end) REFERENCES ddt(rowid))''')
c.execute('''CREATE TABLE IF NOT EXISTS dive_properties
(id INTEGER NOT NULL,
bscale_x REAL, bscale_y REAL, bscale_z REAL,
k_depth REAL,
FOREIGN KEY(id) REFERENCES dives(rowid))''')
c.execute('''CREATE TABLE IF NOT EXISTS orientations
(id INTEGER NOT NULL, use_quaternions INTEGER,
x REAL, y REAL, z REAL, w REAL,
FOREIGN KEY(id) REFERENCES ddt(rowid))''')
c.execute('''CREATE TABLE IF NOT EXISTS gps
(individual INTEGER NOT NULL, date REAL, latitude REAL, longitude REAL,
dive_start INTEGER,
easting REAL, northing REAL, zone INTEGER,
bx REAL, by REAL, bz REAL,
FOREIGN KEY(dive_start) REFERENCES dives(rowid))''')
# file generated from octave
f = open("data/preload_mat_all.txt","r")
l = f.readlines()
f.close()
l = [[float(y) for y in x.strip().split()] for x in l]
l_ddt = [tuple([0,x[0]+x[1]] + x[2:10] + [int(x[10])]) for x in l]
l_raw = [tuple([i,int(x[18])] + x[11:18]) for i,x in enumerate(l)]
c.executemany('INSERT INTO ddt VALUES ('+'?,'*(len(l_ddt[0])-1)+'?)', l_ddt)
c.executemany('INSERT INTO raw_estimate VALUES ('+'?,'*(len(l_raw[0])-1)+'?)', l_raw)
# Save (commit) the changes
conn.commit()
f = open("data/gps_mat_all.txt","r")
l = f.readlines()
f.close()
l = [[float(y) for y in x.strip().split()] for x in l]
# [ ts tsf lat lon ind err east north zone bx by bz]
l_gps = [tuple([0,x[0]+x[1]] + x[2:4] + [None] + x[6:]) for x in l]
c.executemany('INSERT INTO gps VALUES ('+'?,'*(len(l_gps[0])-1)+'?)', l_gps)
# Save (commit) the changes
conn.commit()
# query = """
# select gps.rowid,ddt.rowid,min(abs(ddt.date-gps.date))
# from ddt, gps
# where abs(ddt.date-gps.date)<30./(24*60)
# group by gps.rowid;
# """
# for g_rowid,d_rowid,_ in c.execute(query):
# c.execute("UPDATE gps SET
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
| cedricpradalier/elephant-seals | python/data_to_db.py | Python | gpl-3.0 | 2,836 |
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigSubList, getConfigListEntry, KEY_LEFT, KEY_RIGHT, KEY_0, ConfigNothing, ConfigPIN
from Components.ConfigList import ConfigList
from Components.SystemInfo import SystemInfo
from enigma import eTimer, eDVBCI_UI, eDVBCIInterfaces
from boxbranding import getBoxType
MAX_NUM_CI = 4
def setCIBitrate(configElement):
if configElement.value == "no":
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateNormal)
else:
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateHigh)
def InitCiConfig():
config.ci = ConfigSubList()
for slot in range(MAX_NUM_CI):
config.ci.append(ConfigSubsection())
config.ci[slot].canDescrambleMultipleServices = ConfigSelection(choices = [("auto", _("Auto")), ("no", _("No")), ("yes", _("Yes"))], default = "auto")
if SystemInfo["CommonInterfaceSupportsHighBitrates"]:
config.ci[slot].canHandleHighBitrates = ConfigSelection(choices = [("no", _("No")), ("yes", _("Yes"))], default = "yes")
config.ci[slot].canHandleHighBitrates.slotid = slot
config.ci[slot].canHandleHighBitrates.addNotifier(setCIBitrate)
class MMIDialog(Screen):
def __init__(self, session, slotid, action, handler = eDVBCI_UI.getInstance(), wait_text = _("wait for ci...") ):
Screen.__init__(self, session)
print "[CI] with action" + str(action)
self.mmiclosed = False
self.tag = None
self.slotid = slotid
self.timer = eTimer()
self.timer.callback.append(self.keyCancel)
#else the skins fails
self["title"] = Label("")
self["subtitle"] = Label("")
self["bottom"] = Label("")
self["entries"] = ConfigList([ ])
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.okbuttonClick,
"cancel": self.keyCancel,
#for PIN
"left": self.keyLeft,
"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.action = action
self.handler = handler
self.wait_text = wait_text
if action == 2: #start MMI
handler.startMMI(self.slotid)
self.showWait()
elif action == 3: #mmi already there (called from infobar)
self.showScreen()
def addEntry(self, list, entry):
if entry[0] == "TEXT": #handle every item (text / pin only?)
list.append( (entry[1], ConfigNothing(), entry[2]) )
if entry[0] == "PIN":
pinlength = entry[1]
if entry[3] == 1:
# masked pins:
x = ConfigPIN(0, len = pinlength, censor = "*")
else:
# unmasked pins:
x = ConfigPIN(0, len = pinlength)
x.addEndNotifier(self.pinEntered)
self["subtitle"].setText(entry[2])
list.append( getConfigListEntry("", x) )
self["bottom"].setText(_("please press OK when ready"))
def pinEntered(self, value):
self.okbuttonClick()
def okbuttonClick(self):
self.timer.stop()
if not self.tag:
return
if self.tag == "WAIT":
print "[CI] do nothing - wait"
elif self.tag == "MENU":
print "[CI] answer MENU"
cur = self["entries"].getCurrent()
if cur:
self.handler.answerMenu(self.slotid, cur[2])
else:
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "LIST":
print "[CI] answer LIST"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
cur = self["entries"].getCurrent()
answer = str(cur[1].value)
length = len(answer)
while length < cur[1].getLength():
answer = '0'+answer
length+=1
self.handler.answerEnq(self.slotid, answer)
self.showWait()
def closeMmi(self):
self.timer.stop()
self.close(self.slotid)
def keyCancel(self):
self.timer.stop()
if not self.tag or self.mmiclosed:
self.closeMmi()
elif self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
self.closeMmi()
elif self.tag in ( "MENU", "LIST" ):
print "[CI] cancel list"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
print "[CI] cancel enq"
self.handler.cancelEnq(self.slotid)
self.showWait()
else:
print "[CI] give cancel action to ci"
def keyConfigEntry(self, key):
self.timer.stop()
try:
self["entries"].handleKey(key)
except:
pass
def keyNumberGlobal(self, number):
self.timer.stop()
self.keyConfigEntry(KEY_0 + number)
def keyLeft(self):
self.timer.stop()
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.timer.stop()
self.keyConfigEntry(KEY_RIGHT)
def updateList(self, list):
List = self["entries"]
try:
List.instance.moveSelectionTo(0)
except:
pass
List.l.setList(list)
def showWait(self):
self.tag = "WAIT"
self["title"].setText("")
self["subtitle"].setText("")
self["bottom"].setText("")
list = [(self.wait_text, ConfigNothing())]
self.updateList(list)
def showScreen(self):
screen = self.handler.getMMIScreen(self.slotid)
list = [ ]
self.timer.stop()
if len(screen) > 0 and screen[0][0] == "CLOSE":
timeout = screen[0][1]
self.mmiclosed = True
if timeout > 0:
self.timer.start(timeout*1000, True)
else:
self.keyCancel()
else:
self.mmiclosed = False
self.tag = screen[0][0]
for entry in screen:
if entry[0] == "PIN":
self.addEntry(list, entry)
else:
if entry[0] == "TITLE":
self["title"].setText(entry[1])
elif entry[0] == "SUBTITLE":
self["subtitle"].setText(entry[1])
elif entry[0] == "BOTTOM":
self["bottom"].setText(entry[1])
elif entry[0] == "TEXT":
self.addEntry(list, entry)
self.updateList(list)
def ciStateChanged(self):
do_close = False
if self.action == 0: #reset
do_close = True
if self.action == 1: #init
do_close = True
#module still there ?
if self.handler.getState(self.slotid) != 2:
do_close = True
#mmi session still active ?
if self.handler.getMMIState(self.slotid) != 1:
do_close = True
if do_close:
self.closeMmi()
elif self.action > 1 and self.handler.availableMMI(self.slotid) == 1:
self.showScreen()
#FIXME: check for mmi-session closed
class CiMessageHandler:
def __init__(self):
self.session = None
self.ci = { }
self.dlgs = { }
eDVBCI_UI.getInstance().ciStateChanged.get().append(self.ciStateChanged)
if getBoxType() in ('vuzero'):
SystemInfo["CommonInterface"] = False
else:
SystemInfo["CommonInterface"] = eDVBCIInterfaces.getInstance().getNumOfSlots() > 0
try:
file = open("/proc/stb/tsmux/ci0_tsclk", "r")
file.close()
SystemInfo["CommonInterfaceSupportsHighBitrates"] = True
except:
SystemInfo["CommonInterfaceSupportsHighBitrates"] = False
def setSession(self, session):
self.session = session
def ciStateChanged(self, slot):
if slot in self.ci:
self.ci[slot](slot)
else:
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif eDVBCI_UI.getInstance().availableMMI(slot) == 1:
if self.session and not config.usage.hide_ci_messages.value:
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3)
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
def registerCIMessageHandler(self, slot, func):
self.unregisterCIMessageHandler(slot)
self.ci[slot] = func
def unregisterCIMessageHandler(self, slot):
if slot in self.ci:
del self.ci[slot]
CiHandler = CiMessageHandler()
class CiSelection(Screen):
def __init__(self, session, menu_path=""):
Screen.__init__(self, session)
screentitle = _("Common Interface")
if config.usage.show_menupath.value == 'large':
menu_path += screentitle
title = menu_path
self["menu_path_compressed"] = StaticText("")
elif config.usage.show_menupath.value == 'small':
title = screentitle
self["menu_path_compressed"] = StaticText(menu_path + " >" if not menu_path.endswith(' / ') else menu_path[:-3] + " >" or "")
else:
title = screentitle
self["menu_path_compressed"] = StaticText("")
Screen.setTitle(self, title)
self["actions"] = ActionMap(["OkCancelActions", "CiSelectionActions"],
{
"left": self.keyLeft,
"right": self.keyLeft,
"ok": self.okbuttonClick,
"cancel": self.cancel
},-1)
self.dlg = None
self.state = { }
self.list = [ ]
for slot in range(MAX_NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
self.appendEntries(slot, state)
CiHandler.registerCIMessageHandler(slot, self.ciStateChanged)
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["entries"] = menuList
self["entries"].onSelectionChanged.append(self.selectionChanged)
self["text"] = Label(_("Slot %d")% 1)
def selectionChanged(self):
cur_idx = self["entries"].getCurrentIndex()
self["text"].setText(_("Slot %d")%((cur_idx / 5)+1))
def keyConfigEntry(self, key):
try:
self["entries"].handleKey(key)
self["entries"].getCurrent()[1].save()
except:
pass
def keyLeft(self):
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.keyConfigEntry(KEY_RIGHT)
def appendEntries(self, slot, state):
self.state[slot] = state
self.list.append( (_("Reset"), ConfigNothing(), 0, slot) )
self.list.append( (_("Init"), ConfigNothing(), 1, slot) )
if self.state[slot] == 0: #no module
self.list.append( (_("no module found"), ConfigNothing(), 2, slot) )
elif self.state[slot] == 1: #module in init
self.list.append( (_("init module"), ConfigNothing(), 2, slot) )
elif self.state[slot] == 2: #module ready
#get appname
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 2, slot) )
self.list.append(getConfigListEntry(_("Multiple service support"), config.ci[slot].canDescrambleMultipleServices))
if SystemInfo["CommonInterfaceSupportsHighBitrates"]:
self.list.append(getConfigListEntry(_("High bitrate support"), config.ci[slot].canHandleHighBitrates))
def updateState(self, slot):
state = eDVBCI_UI.getInstance().getState(slot)
self.state[slot] = state
slotidx=0
while len(self.list[slotidx]) < 3 or self.list[slotidx][3] != slot:
slotidx += 1
slotidx += 1 # do not change Reset
slotidx += 1 # do not change Init
if state == 0: #no module
self.list[slotidx] = (_("no module found"), ConfigNothing(), 2, slot)
elif state == 1: #module in init
self.list[slotidx] = (_("init module"), ConfigNothing(), 2, slot)
elif state == 2: #module ready
#get appname
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list[slotidx] = (appname, ConfigNothing(), 2, slot)
lst = self["entries"]
lst.list = self.list
lst.l.setList(self.list)
def ciStateChanged(self, slot):
if self.dlg:
self.dlg.ciStateChanged()
else:
state = eDVBCI_UI.getInstance().getState(slot)
if self.state[slot] != state:
#print "something happens"
self.state[slot] = state
self.updateState(slot)
def dlgClosed(self, slot):
self.dlg = None
def okbuttonClick(self):
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 0: #reset
eDVBCI_UI.getInstance().setReset(slot)
elif action == 1: #init
eDVBCI_UI.getInstance().setInit(slot)
elif self.state[slot] == 2:
self.dlg = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, action)
def cancel(self):
for slot in range(MAX_NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
CiHandler.unregisterCIMessageHandler(slot)
self.close()
| mrnamingo/vix4-34-enigma2-bcm | lib/python/Screens/Ci.py | Python | gpl-2.0 | 11,872 |
from django.shortcuts import get_object_or_404
from facebook import GraphAPI, GraphAPIError
from raven.contrib.django.raven_compat.models import client
from canvas.exceptions import InvalidFacebookAccessToken
from canvas.templatetags.jinja_base import render_jinja_to_string
from canvas.view_guards import require_staff, require_user
from drawquest.api_decorators import api_decorator
from drawquest.apps.twitter.models import Twitter, TwitterError, TwitterDuplicateStatusError
urlpatterns = []
api = api_decorator(urlpatterns)
@api('share_web_profile')
@require_user
def share_web_profile(request, message,
twitter_access_token=None, twitter_access_token_secret=None,
facebook_access_token=None):
if twitter_access_token is not None and twitter_access_token_secret is not None:
try:
Twitter(twitter_access_token, twitter_access_token_secret).tweet(message)
except TwitterDuplicateStatusError as e:
pass
except TwitterError as e:
client.captureException()
if facebook_access_token:
graph = GraphAPI(facebook_access_token)
try:
graph.put_object('me', 'feed', message=message)
except GraphAPIError:
raise InvalidFacebookAccessToken("Invalid Facebook access token, please re-auth with Facebook.")
except IOError:
client.captureException()
| drawquest/drawquest-web | website/drawquest/apps/profiles/api.py | Python | bsd-3-clause | 1,424 |
import os
import appdirs
def get_local_datadir(name):
""" Helper function for datadir transition.
It returns path to a data directory of given name in 'data' dir.
Usage should be avoid at all costs. It is always better to ask for
a dir the upper layer (like Client instance).
"""
return os.path.join(appdirs.user_data_dir('golem'), name)
class SimpleEnv(object):
""" Metaclass that keeps information about golem configuration files location. """
@staticmethod
def env_file_name(filename):
""" Return full configuration file name adding configuration files location to the filename
:param str filename: name of a file
:return str: name of a file connected with path
"""
# FIXME: Deprecated!
datadir = get_local_datadir('SimpleEnv')
if not os.path.exists(datadir):
os.makedirs(datadir)
return os.path.join(datadir, filename)
| Radagast-red/golem | golem/core/simpleenv.py | Python | gpl-3.0 | 960 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various TensorFlow Ops (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.ops.embeddings_ops import *
from tensorflow.contrib.learn.python.learn.ops.losses_ops import *
from tensorflow.contrib.learn.python.learn.ops.seq2seq_ops import *
# pylint: enable=wildcard-import
| ghchinoy/tensorflow | tensorflow/contrib/learn/python/learn/ops/__init__.py | Python | apache-2.0 | 1,296 |
"""
Parser and serializer for file formats supported by compare-locales library:
https://hg.mozilla.org/l10n/compare-locales/
"""
import logging
from collections import OrderedDict
from compare_locales import (
parser,
serializer,
)
from pontoon.sync.exceptions import ParseError, SyncError
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.utils import create_parent_directory
from pontoon.sync.vcs.models import VCSTranslation
log = logging.getLogger(__name__)
class CompareLocalesEntity(VCSTranslation):
"""
Represents an entity in a file handled by compare-locales.
"""
def __init__(self, key, string, comment, order):
self.key = key
self.source_string = string
self.source_string_plural = ""
self.strings = (
{None: self.source_string} if self.source_string is not None else {}
)
self.comments = comment.val.split("\n") if comment else []
self.order = order
self.fuzzy = False
self.source = []
class CompareLocalesResource(ParsedResource):
def __init__(self, path, source_resource=None):
self.path = path
self.entities = OrderedDict() # Preserve entity order.
self.source_resource = source_resource
try:
self.parser = parser.getParser(self.path)
except UserWarning as err:
raise ParseError(err)
self.parsed_objects = []
# A monolingual l10n file might not contain all entities, but the code
# expects ParsedResource to contain representations of all of them. So
# when parsing the l10n resource, we first create empty entity for each
# source resource entity.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = CompareLocalesEntity(entity.key, None, None, 0,)
try:
self.parser.readFile(self.path)
except IOError as err:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise ParseError(err)
self.parsed_objects = list(self.parser.walk())
order = 0
for entity in self.parsed_objects:
if isinstance(entity, parser.Entity):
self.entities[entity.key] = CompareLocalesEntity(
entity.key, entity.unwrap(), entity.pre_comment, order,
)
order += 1
@property
def translations(self):
return sorted(self.entities.values(), key=lambda e: e.order)
def save(self, locale):
if not self.source_resource:
raise SyncError(
"Cannot save resource {0}: No source resource given.".format(self.path)
)
# A dictionary of new translations
new_l10n = {
key: entity.strings[None] if entity.strings else None
for key, entity in self.entities.items()
}
# Create parent folders if necessary
create_parent_directory(self.path)
with open(self.path, "wb") as output_file:
log.debug("Saving file: %s", self.path)
output_file.write(
serializer.serialize(
self.path,
self.source_resource.parsed_objects,
self.parsed_objects,
new_l10n,
)
)
def parse(path, source_path=None, locale=None):
if source_path is not None:
source_resource = CompareLocalesResource(source_path)
else:
source_resource = None
return CompareLocalesResource(path, source_resource)
| jotes/pontoon | pontoon/sync/formats/compare_locales.py | Python | bsd-3-clause | 3,791 |
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Badlands surface processes modelling companion. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
Here we set usefull functions used to create dynamic topography files for Badlands inputs.
"""
import os
import math
import h5py
import errno
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree
import xml.etree.ElementTree as ETO
import plotly
from plotly import tools
from plotly.graph_objs import *
plotly.offline.init_notebook_mode()
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
class toolDynTopo:
"""
Class for creating Badlands dynamic topography displacement maps.
"""
def __init__(self, extentX=None, extentY=None, dx=None, filename='data/disp'):
"""
Initialization function which takes the extent of the X,Y coordinates and the discretization value.
Parameters
----------
variable : extentX
Lower and upper values of the X axis in metres.
variable: extentY
Lower and upper values of the Y axis in metres.
variable: dx
Discretisation values of the X-Y axes in metres.
"""
if extentX == None:
raise RuntimeError('Extent X-axis values are required.')
self.extentX = extentX
if extentY == None:
raise RuntimeError('Extent Y-axis values are required.')
self.extentY = extentY
if dx == None:
raise RuntimeError('Discretization space value along X axis is required.')
self.dx = dx
self.dy = self.dx
self.x = np.arange(self.extentX[0],self.extentX[1]+self.dx,self.dx,dtype=np.float)
self.y = np.arange(self.extentY[0],self.extentY[1]+self.dx,self.dy,dtype=np.float)
self.nx = None
self.ny = None
self.stepNb = None
self.filename = filename
return
def waveDT(self, A=None, L=None, V=None, endTime=None, dispTimeStep=None, axis='X'):
"""
Build a simple sine wave displacement map.
Parameters
----------
variable : A, L, V
The amplitude, wavelength, velocity of the Sine wave.
variable: endTime
The end time of the simulation.
variable: dispTimeStep
The time step of the each output of the dynamic topogrpahy data.
variable: axis
Slope displacements along X or Y axis.
"""
self.nx = len(self.x)
self.ny = len(self.y)
self.stepNb = int(endTime/dispTimeStep)
z = np.zeros((self.nx,self.ny))
if axis == 'X':
for k in range(0,self.stepNb):
f = self.filename+str(k)+'.csv'
disp = np.zeros((self.nx,self.ny))
zn = np.zeros((self.nx,self.ny))
t = (k+1)*dispTimeStep # the wave starts at the first timestep
posit = int(t*V/self.dx) # the position of wave at time t
if (posit<=int(L/self.dx)): # when the wave does not fully reach the surface
tmp = A*np.sin(np.pi*(V*t-self.x[:posit])/L)
zn[:posit,:] = np.array([tmp,]*self.ny).transpose()
disp[:posit,:] = zn[:posit,:] - z[:posit,:]
elif (posit<=int(self.nx)): # when the wave reaches the surface but does not leave
posit_pass = posit - int(L/self.dx)
tmp = A*np.sin(np.pi*(V*t-self.x[posit_pass:posit])/L)
zn[posit_pass:posit,:] = np.array([tmp,]*self.ny).transpose()
disp[posit_pass:posit,:] = zn[posit_pass:posit,:] - z[posit_pass:posit,:]
else: # when the wave starts to leave the surface
posit_pass = posit - int(L/self.dx)
tmp = A*np.sin(np.pi*(V*t-self.x[posit_pass:])/L)
zn[posit_pass:,:] = np.array([tmp,]*self.ny).transpose()
disp[posit_pass:,:] = zn[posit_pass:,:] - z[posit_pass:,:]
df = pd.DataFrame({'disp':disp.flatten('F')})
df.to_csv(str(f),columns=['disp'], sep=' ', index=False ,header=0)
z = np.copy(zn)
if axis == 'Y':
for k in range(0,self.stepNb):
f = self.filename+str(k)+'.csv'
disp = np.zeros((self.nx,self.ny))
zn = np.zeros((self.nx,self.ny))
t = (k+1)*dispTimeStep # the wave starts at the first timestep
posit = int(t*V/self.dx) # the position of wave at time t
if (posit<=int(L/self.dx)): # when the wave does not fully reach the surface
tmp = A*np.sin(np.pi*(V*t-self.y[:posit])/L)
zn[:,:posit] = np.array([tmp,]*self.nx)
disp[:,:posit] = zn[:,:posit] - z[:,:posit]
elif (posit<=int(self.ny)): # when the wave reaches the surface but does not leave
posit_pass = posit - int(L/self.dx)
tmp = A*np.sin(np.pi*(V*t-self.y[posit_pass:posit])/L)
zn[:,posit_pass:posit] = np.array([tmp,]*self.nx)
disp[:,posit_pass:posit] = zn[:,posit_pass:posit] - z[:,posit_pass:posit]
else: # when the wave starts to leave the surface
posit_pass = posit - int(L/self.dx)
tmp = A*np.sin(np.pi*(V*t-self.y[posit_pass:])/L)
zn[:,posit_pass:] = np.array([tmp,]*self.nx)
disp[:,posit_pass:] = zn[:,posit_pass:] - z[:,posit_pass:]
df = pd.DataFrame({'disp':disp.flatten('F')})
df.to_csv(str(f),columns=['disp'], sep=' ', index=False ,header=0)
z = np.copy(zn)
return | badlands-model/pyBadlands-Companion | badlands_companion/toolDynTopo.py | Python | gpl-3.0 | 6,636 |
#!/usr/bin/env python
# $Id: keyPress.py,v 1.3 2005/06/23 20:27:16 shawns Exp $
# tab:2
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement is
# hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
# CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# Copyright (c) 2002-2003 Intel Corporation
# All rights reserved.
#
# This file is distributed under the terms in the attached INTEL-LICENSE
# file. If you do not find these files, copies can be found by writing to
# Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300, Berkeley, CA,
# 94704. Attention: Intel License Inquiry.
#
#
# @author Shawn Schaffert
#
import os
if os.name == "posix" :
import sys, select, termios
class keyPress:
def __init__(self):
self.f = sys.stdin
self.oldAttr = termios.tcgetattr( self.f )
# we need to adjust some parameters of the term
# (1) do not echo
# (2) turn off canonical mode input (ie, switch to non-canonical mode input)
# so that input is not processed in line chunks
# (3) set the VMIN (the min num of bytes needed before returning during a read) to 1
# (4) set the VTIME (the inter-byte timer) to 0, so we do not delay
newAttr = self.oldAttr[:]
newAttr[3] = newAttr[3] & ~termios.ECHO & ~termios.ICANON
newAttr[6][termios.VMIN] = 1
newAttr[6][termios.VTIME] = 0
termios.tcsetattr( self.f , termios.TCSANOW , newAttr )
def destroy(self):
termios.tcsetattr( self.f , termios.TCSANOW , self.oldAttr )
def getChar( self , blocking = False ):
if blocking :
ready = select.select( [self.f] , [] , [] )
else :
ready = select.select( [self.f] , [] , [] , 0 )
if ready[0] :
ch = self.f.read(1)
else:
ch = ""
return ch
elif os.name == "nt" :
import msvcrt
class keyPress:
def __init__( self ):
pass
def destroy( self ):
pass
def getChar( self , blocking = False ):
if blocking :
return msvcrt.getch()
else :
if msvcrt.kbhit() :
return msvcrt.getch()
else:
return ""
else :
raise OSError
if __name__=="__main__":
import time
kp = keyPress()
i = 0
print "press \'q\' to quit"
while kp.getChar() != "q" :
i += 1
print i
time.sleep(.25)
kp.destroy()
| fresskarma/tinyos-1.x | contrib/ucb/apps/Monstro/lib/keyPress.py | Python | bsd-3-clause | 3,636 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model.meta import get_field_precision
from frappe.utils import flt
import erpnext
from erpnext.controllers.taxes_and_totals import init_landed_taxes_and_totals
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
class LandedCostVoucher(Document):
@frappe.whitelist()
def get_items_from_purchase_receipts(self):
self.set("items", [])
for pr in self.get("purchase_receipts"):
if pr.receipt_document_type and pr.receipt_document:
pr_items = frappe.db.sql("""select pr_item.item_code, pr_item.description,
pr_item.qty, pr_item.base_rate, pr_item.base_amount, pr_item.name,
pr_item.cost_center, pr_item.is_fixed_asset
from `tab{doctype} Item` pr_item where parent = %s
and exists(select name from tabItem
where name = pr_item.item_code and (is_stock_item = 1 or is_fixed_asset=1))
""".format(doctype=pr.receipt_document_type), pr.receipt_document, as_dict=True)
for d in pr_items:
item = self.append("items")
item.item_code = d.item_code
item.description = d.description
item.qty = d.qty
item.rate = d.base_rate
item.cost_center = d.cost_center or \
erpnext.get_default_cost_center(self.company)
item.amount = d.base_amount
item.receipt_document_type = pr.receipt_document_type
item.receipt_document = pr.receipt_document
item.purchase_receipt_item = d.name
item.is_fixed_asset = d.is_fixed_asset
def validate(self):
self.check_mandatory()
self.validate_receipt_documents()
init_landed_taxes_and_totals(self)
self.set_total_taxes_and_charges()
if not self.get("items"):
self.get_items_from_purchase_receipts()
self.set_applicable_charges_on_item()
self.validate_applicable_charges_for_item()
def check_mandatory(self):
if not self.get("purchase_receipts"):
frappe.throw(_("Please enter Receipt Document"))
def validate_receipt_documents(self):
receipt_documents = []
for d in self.get("purchase_receipts"):
docstatus = frappe.db.get_value(d.receipt_document_type, d.receipt_document, "docstatus")
if docstatus != 1:
msg = f"Row {d.idx}: {d.receipt_document_type} {frappe.bold(d.receipt_document)} must be submitted"
frappe.throw(_(msg), title=_("Invalid Document"))
if d.receipt_document_type == "Purchase Invoice":
update_stock = frappe.db.get_value(d.receipt_document_type, d.receipt_document, "update_stock")
if not update_stock:
msg = _("Row {0}: Purchase Invoice {1} has no stock impact.").format(d.idx, frappe.bold(d.receipt_document))
msg += "<br>" + _("Please create Landed Cost Vouchers against Invoices that have 'Update Stock' enabled.")
frappe.throw(msg, title=_("Incorrect Invoice"))
receipt_documents.append(d.receipt_document)
for item in self.get("items"):
if not item.receipt_document:
frappe.throw(_("Item must be added using 'Get Items from Purchase Receipts' button"))
elif item.receipt_document not in receipt_documents:
frappe.throw(_("Item Row {0}: {1} {2} does not exist in above '{1}' table")
.format(item.idx, item.receipt_document_type, item.receipt_document))
if not item.cost_center:
frappe.throw(_("Row {0}: Cost center is required for an item {1}")
.format(item.idx, item.item_code))
def set_total_taxes_and_charges(self):
self.total_taxes_and_charges = sum(flt(d.base_amount) for d in self.get("taxes"))
def set_applicable_charges_on_item(self):
if self.get('taxes') and self.distribute_charges_based_on != 'Distribute Manually':
total_item_cost = 0.0
total_charges = 0.0
item_count = 0
based_on_field = frappe.scrub(self.distribute_charges_based_on)
for item in self.get('items'):
total_item_cost += item.get(based_on_field)
for item in self.get('items'):
item.applicable_charges = flt(flt(item.get(based_on_field)) * (flt(self.total_taxes_and_charges) / flt(total_item_cost)),
item.precision('applicable_charges'))
total_charges += item.applicable_charges
item_count += 1
if total_charges != self.total_taxes_and_charges:
diff = self.total_taxes_and_charges - total_charges
self.get('items')[item_count - 1].applicable_charges += diff
def validate_applicable_charges_for_item(self):
based_on = self.distribute_charges_based_on.lower()
if based_on != 'distribute manually':
total = sum(flt(d.get(based_on)) for d in self.get("items"))
else:
# consider for proportion while distributing manually
total = sum(flt(d.get('applicable_charges')) for d in self.get("items"))
if not total:
frappe.throw(_("Total {0} for all items is zero, may be you should change 'Distribute Charges Based On'").format(based_on))
total_applicable_charges = sum(flt(d.applicable_charges) for d in self.get("items"))
precision = get_field_precision(frappe.get_meta("Landed Cost Item").get_field("applicable_charges"),
currency=frappe.get_cached_value('Company', self.company, "default_currency"))
diff = flt(self.total_taxes_and_charges) - flt(total_applicable_charges)
diff = flt(diff, precision)
if abs(diff) < (2.0 / (10**precision)):
self.items[-1].applicable_charges += diff
else:
frappe.throw(_("Total Applicable Charges in Purchase Receipt Items table must be same as Total Taxes and Charges"))
def on_submit(self):
self.update_landed_cost()
def on_cancel(self):
self.update_landed_cost()
def update_landed_cost(self):
for d in self.get("purchase_receipts"):
doc = frappe.get_doc(d.receipt_document_type, d.receipt_document)
# check if there are {qty} assets created and linked to this receipt document
self.validate_asset_qty_and_status(d.receipt_document_type, doc)
# set landed cost voucher amount in pr item
doc.set_landed_cost_voucher_amount()
# set valuation amount in pr item
doc.update_valuation_rate(reset_outgoing_rate=False)
# db_update will update and save landed_cost_voucher_amount and voucher_amount in PR
for item in doc.get("items"):
item.db_update()
# asset rate will be updated while creating asset gl entries from PI or PY
# update latest valuation rate in serial no
self.update_rate_in_serial_no_for_non_asset_items(doc)
for d in self.get("purchase_receipts"):
doc = frappe.get_doc(d.receipt_document_type, d.receipt_document)
# update stock & gl entries for cancelled state of PR
doc.docstatus = 2
doc.update_stock_ledger(allow_negative_stock=True, via_landed_cost_voucher=True)
doc.make_gl_entries_on_cancel()
# update stock & gl entries for submit state of PR
doc.docstatus = 1
doc.update_stock_ledger(allow_negative_stock=True, via_landed_cost_voucher=True)
doc.make_gl_entries()
doc.repost_future_sle_and_gle()
def validate_asset_qty_and_status(self, receipt_document_type, receipt_document):
for item in self.get('items'):
if item.is_fixed_asset:
receipt_document_type = 'purchase_invoice' if item.receipt_document_type == 'Purchase Invoice' \
else 'purchase_receipt'
docs = frappe.db.get_all('Asset', filters={ receipt_document_type: item.receipt_document,
'item_code': item.item_code }, fields=['name', 'docstatus'])
if not docs or len(docs) != item.qty:
frappe.throw(_('There are not enough asset created or linked to {0}. Please create or link {1} Assets with respective document.').format(
item.receipt_document, item.qty))
if docs:
for d in docs:
if d.docstatus == 1:
frappe.throw(_('{2} <b>{0}</b> has submitted Assets. Remove Item <b>{1}</b> from table to continue.').format(
item.receipt_document, item.item_code, item.receipt_document_type))
def update_rate_in_serial_no_for_non_asset_items(self, receipt_document):
for item in receipt_document.get("items"):
if not item.is_fixed_asset and item.serial_no:
serial_nos = get_serial_nos(item.serial_no)
if serial_nos:
frappe.db.sql("update `tabSerial No` set purchase_rate=%s where name in ({0})"
.format(", ".join(["%s"]*len(serial_nos))), tuple([item.valuation_rate] + serial_nos))
| frappe/erpnext | erpnext/stock/doctype/landed_cost_voucher/landed_cost_voucher.py | Python | gpl-3.0 | 8,235 |
# coding: utf-8
import warnings
from .util import configobj_walker as new_configobj_walker
if False: # MYPY
from typing import Any # NOQA
def configobj_walker(cfg):
# type: (Any) -> Any
warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code')
return new_configobj_walker(cfg)
| rochacbruno/dynaconf | dynaconf/vendor_src/ruamel/yaml/configobjwalker.py | Python | mit | 331 |
from nose.tools import eq_
import inflect
def test_loop():
p = inflect.engine()
for thresh in range(21):
for n in range(21):
threshed = p.number_to_words(n, threshold=thresh)
numwords = p.number_to_words(n)
if (n <= thresh):
eq_(numwords, threshed, msg="Wordified %s (<= %s)" % (n, thresh))
else:
# $threshed =~ s/\D//gxms;
eq_(threshed, str(n), msg="p.number_to_words(%s, thresold=%s) == %s != %s" % (
n, thresh, threshed, str(n)))
def test_lines():
p = inflect.engine()
eq_(p.number_to_words(999, threshold=500), '999', msg=' 999 -> 999')
eq_(p.number_to_words(1000, threshold=500), '1,000', msg='1000 -> 1,000')
eq_(p.number_to_words(10000, threshold=500), '10,000', msg='10000 -> 10,000')
eq_(p.number_to_words(100000, threshold=500), '100,000', msg='100000 -> 100,000')
eq_(p.number_to_words(1000000, threshold=500), '1,000,000', msg='1000000 -> 1,000,000')
eq_(p.number_to_words(999.3, threshold=500), '999.3', msg=' 999.3 -> 999.3')
eq_(p.number_to_words(1000.3, threshold=500), '1,000.3', msg='1000.3 -> 1,000.3')
eq_(p.number_to_words(10000.3, threshold=500), '10,000.3', msg='10000.3 -> 10,000.3')
eq_(p.number_to_words(100000.3, threshold=500), '100,000.3', msg='100000.3 -> 100,000.3')
eq_(p.number_to_words(1000000.3, threshold=500), '1,000,000.3', msg='1000000.3 -> 1,000,000.3')
eq_(p.number_to_words(999, threshold=500, comma=0), '999', msg=' 999 -> 999')
eq_(p.number_to_words(1000, threshold=500, comma=0), '1000', msg='1000 -> 1000')
eq_(p.number_to_words(10000, threshold=500, comma=0), '10000', msg='10000 -> 10000')
eq_(p.number_to_words(100000, threshold=500, comma=0), '100000', msg='100000 -> 100000')
eq_(p.number_to_words(1000000, threshold=500, comma=0), '1000000', msg='1000000 -> 1000000')
eq_(p.number_to_words(999.3, threshold=500, comma=0), '999.3', msg=' 999.3 -> 999.3')
eq_(p.number_to_words(1000.3, threshold=500, comma=0), '1000.3', msg='1000.3 -> 1000.3')
eq_(p.number_to_words(10000.3, threshold=500, comma=0), '10000.3', msg='10000.3 -> 10000.3')
eq_(p.number_to_words(100000.3, threshold=500, comma=0), '100000.3', msg='100000.3 -> 100000.3')
eq_(p.number_to_words(1000000.3, threshold=500, comma=0), '1000000.3', msg='1000000.3 -> 1000000.3')
def test_array():
nw = [
[
"0",
"zero",
"zero",
"zero",
"zero",
"zeroth",
], [
"1",
"one",
"one",
"one",
"one",
"first",
], [
"2",
"two",
"two",
"two",
"two",
"second",
], [
"3",
"three",
"three",
"three",
"three",
"third",
], [
"4",
"four",
"four",
"four",
"four",
"fourth",
], [
"5",
"five",
"five",
"five",
"five",
"fifth",
], [
"6",
"six",
"six",
"six",
"six",
"sixth",
], [
"7",
"seven",
"seven",
"seven",
"seven",
"seventh",
], [
"8",
"eight",
"eight",
"eight",
"eight",
"eighth",
], [
"9",
"nine",
"nine",
"nine",
"nine",
"ninth",
], [
"10",
"ten",
"one, zero",
"ten",
"ten",
"tenth",
], [
"11",
"eleven",
"one, one",
"eleven",
"eleven",
"eleventh",
], [
"12",
"twelve",
"one, two",
"twelve",
"twelve",
"twelfth",
], [
"13",
"thirteen",
"one, three",
"thirteen",
"thirteen",
"thirteenth",
], [
"14",
"fourteen",
"one, four",
"fourteen",
"fourteen",
"fourteenth",
], [
"15",
"fifteen",
"one, five",
"fifteen",
"fifteen",
"fifteenth",
], [
"16",
"sixteen",
"one, six",
"sixteen",
"sixteen",
"sixteenth",
], [
"17",
"seventeen",
"one, seven",
"seventeen",
"seventeen",
"seventeenth",
], [
"18",
"eighteen",
"one, eight",
"eighteen",
"eighteen",
"eighteenth",
], [
"19",
"nineteen",
"one, nine",
"nineteen",
"nineteen",
"nineteenth",
], [
"20",
"twenty",
"two, zero",
"twenty",
"twenty",
"twentieth",
], [
"21",
"twenty-one",
"two, one",
"twenty-one",
"twenty-one",
"twenty-first",
], [
"29",
"twenty-nine",
"two, nine",
"twenty-nine",
"twenty-nine",
"twenty-ninth",
], [
"99",
"ninety-nine",
"nine, nine",
"ninety-nine",
"ninety-nine",
"ninety-ninth",
], [
"100",
"one hundred",
"one, zero, zero",
"ten, zero",
"one zero zero",
"one hundredth"
], [
"101",
"one hundred and one",
"one, zero, one",
"ten, one",
"one zero one",
"one hundred and first"
], [
"110",
"one hundred and ten",
"one, one, zero",
"eleven, zero",
"one ten",
"one hundred and tenth",
], [
"111",
"one hundred and eleven",
"one, one, one",
"eleven, one",
"one eleven",
"one hundred and eleventh",
], [
"900",
"nine hundred",
"nine, zero, zero",
"ninety, zero",
"nine zero zero",
"nine hundredth",
], [
"999",
"nine hundred and ninety-nine",
"nine, nine, nine",
"ninety-nine, nine",
"nine ninety-nine",
"nine hundred and ninety-ninth",
], [
"1000",
"one thousand",
"one, zero, zero, zero",
"ten, zero zero",
"one zero zero, zero",
"one thousandth",
], [
"1001",
"one thousand and one",
"one, zero, zero, one",
"ten, zero one",
"one zero zero, one",
"one thousand and first",
], [
"1010",
"one thousand and ten",
"one, zero, one, zero",
"ten, ten",
"one zero one, zero",
"one thousand and tenth",
], [
"1100",
"one thousand, one hundred",
"one, one, zero, zero",
"eleven, zero zero",
"one ten, zero",
"one thousand, one hundredth",
], [
"2000",
"two thousand",
"two, zero, zero, zero",
"twenty, zero zero",
"two zero zero, zero",
"two thousandth",
], [
"10000",
"ten thousand",
"one, zero, zero, zero, zero",
"ten, zero zero, zero",
"one zero zero, zero zero",
"ten thousandth",
], [
"100000",
"one hundred thousand",
"one, zero, zero, zero, zero, zero",
"ten, zero zero, zero zero",
"one zero zero, zero zero zero",
"one hundred thousandth",
], [
"100001",
"one hundred thousand and one",
"one, zero, zero, zero, zero, one",
"ten, zero zero, zero one",
"one zero zero, zero zero one",
"one hundred thousand and first",
], [
"123456",
"one hundred and twenty-three thousand, four hundred and fifty-six",
"one, two, three, four, five, six",
"twelve, thirty-four, fifty-six",
"one twenty-three, four fifty-six",
"one hundred and twenty-three thousand, four hundred and fifty-sixth",
], [
"0123456",
"one hundred and twenty-three thousand, four hundred and fifty-six",
"zero, one, two, three, four, five, six",
"zero one, twenty-three, forty-five, six",
"zero twelve, three forty-five, six",
"one hundred and twenty-three thousand, four hundred and fifty-sixth",
], [
"1234567",
"one million, two hundred and thirty-four thousand, five hundred and sixty-seven",
"one, two, three, four, five, six, seven",
"twelve, thirty-four, fifty-six, seven",
"one twenty-three, four fifty-six, seven",
"one million, two hundred and thirty-four thousand, five hundred and sixty-seventh",
], [
"12345678",
"twelve million, three hundred and forty-five thousand, six hundred and seventy-eight",
"one, two, three, four, five, six, seven, eight",
"twelve, thirty-four, fifty-six, seventy-eight",
"one twenty-three, four fifty-six, seventy-eight",
"twelve million, three hundred and forty-five thousand, six hundred and seventy-eighth",
], [
"12_345_678",
"twelve million, three hundred and forty-five thousand, six hundred and seventy-eight",
"one, two, three, four, five, six, seven, eight",
"twelve, thirty-four, fifty-six, seventy-eight",
"one twenty-three, four fifty-six, seventy-eight",
], [
"1234,5678",
"twelve million, three hundred and forty-five thousand, six hundred and seventy-eight",
"one, two, three, four, five, six, seven, eight",
"twelve, thirty-four, fifty-six, seventy-eight",
"one twenty-three, four fifty-six, seventy-eight",
], [
"1234567890",
"one billion, two hundred and thirty-four million, five hundred and sixty-seven thousand, eight hundred and ninety",
"one, two, three, four, five, six, seven, eight, nine, zero",
"twelve, thirty-four, fifty-six, seventy-eight, ninety",
"one twenty-three, four fifty-six, seven eighty-nine, zero",
"one billion, two hundred and thirty-four million, five hundred and sixty-seven thousand, eight hundred and ninetieth",
], [
"123456789012345",
"one hundred and twenty-three trillion, four hundred and fifty-six billion, seven hundred and eighty-nine million, twelve thousand, three hundred and forty-five",
"one, two, three, four, five, six, seven, eight, nine, zero, one, two, three, four, five",
"twelve, thirty-four, fifty-six, seventy-eight, ninety, twelve, thirty-four, five",
"one twenty-three, four fifty-six, seven eighty-nine, zero twelve, three forty-five",
"one hundred and twenty-three trillion, four hundred and fifty-six billion, seven hundred and eighty-nine million, twelve thousand, three hundred and forty-fifth",
], [
"12345678901234567890",
"twelve quintillion, three hundred and forty-five quadrillion, six hundred and seventy-eight trillion, nine hundred and one billion, two hundred and thirty-four million, five hundred and sixty-seven thousand, eight hundred and ninety",
"one, two, three, four, five, six, seven, eight, nine, zero, one, two, three, four, five, six, seven, eight, nine, zero",
"twelve, thirty-four, fifty-six, seventy-eight, ninety, twelve, thirty-four, fifty-six, seventy-eight, ninety",
"one twenty-three, four fifty-six, seven eighty-nine, zero twelve, three forty-five, six seventy-eight, ninety",
"twelve quintillion, three hundred and forty-five quadrillion, six hundred and seventy-eight trillion, nine hundred and one billion, two hundred and thirty-four million, five hundred and sixty-seven thousand, eight hundred and ninetieth",
], [
"0.987654",
"zero point nine eight seven six five four",
"zero, point, nine, eight, seven, six, five, four",
"zero, point, ninety-eight, seventy-six, fifty-four",
"zero, point, nine eighty-seven, six fifty-four",
"zeroth point nine eight seven six five four",
"zero point nine eight seven six five fourth",
], [
".987654",
"point nine eight seven six five four",
"point, nine, eight, seven, six, five, four",
"point, ninety-eight, seventy-six, fifty-four",
"point, nine eighty-seven, six fifty-four",
"point nine eight seven six five four",
"point nine eight seven six five fourth",
], [
"9.87654",
"nine point eight seven six five four",
"nine, point, eight, seven, six, five, four",
"nine, point, eighty-seven, sixty-five, four",
"nine, point, eight seventy-six, fifty-four",
"ninth point eight seven six five four",
"nine point eight seven six five fourth",
], [
"98.7654",
"ninety-eight point seven six five four",
"nine, eight, point, seven, six, five, four",
"ninety-eight, point, seventy-six, fifty-four",
"ninety-eight, point, seven sixty-five, four",
"ninety-eighth point seven six five four",
"ninety-eight point seven six five fourth",
], [
"987.654",
"nine hundred and eighty-seven point six five four",
"nine, eight, seven, point, six, five, four",
"ninety-eight, seven, point, sixty-five, four",
"nine eighty-seven, point, six fifty-four",
"nine hundred and eighty-seventh point six five four",
"nine hundred and eighty-seven point six five fourth",
], [
"9876.54",
"nine thousand, eight hundred and seventy-six point five four",
"nine, eight, seven, six, point, five, four",
"ninety-eight, seventy-six, point, fifty-four",
"nine eighty-seven, six, point, fifty-four",
"nine thousand, eight hundred and seventy-sixth point five four",
"nine thousand, eight hundred and seventy-six point five fourth",
], [
"98765.4",
"ninety-eight thousand, seven hundred and sixty-five point four",
"nine, eight, seven, six, five, point, four",
"ninety-eight, seventy-six, five, point, four",
"nine eighty-seven, sixty-five, point, four",
"ninety-eight thousand, seven hundred and sixty-fifth point four",
"ninety-eight thousand, seven hundred and sixty-five point fourth",
], [
"101.202.303",
"one hundred and one point two zero two three zero three",
"one, zero, one, point, two, zero, two, point, three, zero, three",
"ten, one, point, twenty, two, point, thirty, three",
"one zero one, point, two zero two, point, three zero three",
], [
"98765.",
"ninety-eight thousand, seven hundred and sixty-five point",
"nine, eight, seven, six, five, point",
"ninety-eight, seventy-six, five, point",
"nine eighty-seven, sixty-five, point",
]
]
p = inflect.engine()
for i in nw:
yield go, p, i
def go(p, i):
eq_(p.number_to_words(i[0]), i[1], msg="number_to_words(%s) == %s != %s" % (
i[0],
p.number_to_words(i[0]),
i[1]))
eq_(p.number_to_words(i[0], group=1), i[2])
eq_(p.number_to_words(i[0], group=2), i[3])
eq_(p.number_to_words(i[0], group=3), i[4])
if len(i) > 5:
eq_(p.number_to_words(p.ordinal(i[0])), i[5], msg="number_to_words(ordinal(%s)) == %s != %s" % (
i[0], p.number_to_words(p.ordinal(i[0])),
i[5]))
if len(i) > 6:
eq_(p.ordinal(p.number_to_words(i[0])), i[6])
else:
if len(i) > 5:
eq_(p.ordinal(p.number_to_words(i[0])), i[5])
# eq_ !eval { p.number_to_words(42, and=>); 1; };
# eq_ $@ =~ 'odd number of';
| GeneralizedLearningUtilities/SuperGLU | python_module/SuperGLU/Services/TextProcessing/Tests/Inflect/test_numwords.py | Python | mit | 17,379 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Superdesk"""
import blinker
import logging as logging_lib
from flask import abort, json, Blueprint, current_app as app # noqa
from flask_script import Command as BaseCommand, Option # noqa
from werkzeug.exceptions import HTTPException
from eve.utils import config # noqa
from eve.methods.common import document_link # noqa
from .eve_backend import EveBackend
from .datalayer import SuperdeskDataLayer # noqa
from .services import BaseService as Service # noqa
from .resource import Resource # noqa
from .privilege import privilege, intrinsic_privilege, get_intrinsic_privileges # noqa
from .workflow import * # noqa
from .signals import * # noqa
__version__ = '1.28'
API_NAME = 'Superdesk API'
SCHEMA_VERSION = 0
DOMAIN = {}
COMMANDS = {}
JINJA_FILTERS = dict()
app_components = dict()
app_models = dict()
resources = dict()
eve_backend = EveBackend()
default_user_preferences = dict()
default_session_preferences = dict()
logger = logging_lib.getLogger(__name__)
class Command(BaseCommand):
"""Superdesk Command.
The Eve framework changes introduced with https://github.com/nicolaiarocci/eve/issues/213 make the commands fail.
Reason being the flask-script's run the commands using test_request_context() which is invalid.
That's the reason we are inheriting the Flask-Script's Command to overcome this issue.
"""
def __call__(self, _app=None, *args, **kwargs):
try:
with app.app_context():
res = self.run(*args, **kwargs)
logger.info('Command finished with: {}'.format(res))
return 0
except Exception as ex:
logger.info('Uhoh, an exception occured while running the command...')
logger.exception(ex)
return 1
def get_headers(self, environ=None):
"""Fix CORS for abort responses.
todo(petr): put in in custom flask error handler instead
"""
return [
('Content-Type', 'text/html'),
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', '*'),
]
setattr(HTTPException, 'get_headers', get_headers)
def domain(resource, res_config):
"""Register domain resource"""
DOMAIN[resource] = res_config
def command(name, command):
"""Register command"""
COMMANDS[name] = command
def blueprint(blueprint, app, **kwargs):
"""Register flask blueprint.
:param blueprint: blueprint instance
:param app: flask app instance
"""
blueprint.kwargs = kwargs
prefix = app.api_prefix or None
app.register_blueprint(blueprint, url_prefix=prefix, **kwargs)
def get_backend():
"""Returns the available backend, this will be changed in a factory if needed."""
return eve_backend
def get_resource_service(resource_name):
return resources[resource_name].service
def get_resource_privileges(resource_name):
attr = getattr(resources[resource_name], 'privileges', {})
return attr
def register_default_user_preference(preference_name, preference):
default_user_preferences[preference_name] = preference
def register_default_session_preference(preference_name, preference):
default_session_preferences[preference_name] = preference
def register_resource(name, resource, service=None, backend=None, privilege=None, _app=None):
"""Shortcut for registering resource and service together.
:param name: resource name
:param resource: resource class
:param service: service class
:param backend: backend instance
:param privilege: privilege to register with resource
:param _app: flask app
"""
if not backend:
backend = get_backend()
if not service:
service = Service
if privilege:
intrinsic_privilege(name, privilege)
if not _app:
_app = app
service_instance = service(name, backend=backend)
resource(name, app=_app, service=service_instance)
def register_jinja_filter(name, jinja_filter):
"""Register jinja filter
:param str name: name of the filter
:param jinja_filter: jinja filter function
"""
JINJA_FILTERS[name] = jinja_filter
def register_item_schema_field(name, schema, app, copy_on_rewrite=True):
"""Register new item schema field.
.. versionadded:: 1.28
:param str name: field name
:param dict schema: field schema
:param Flask app: flask app
:param bool copy_on_rewrite: copy field value when rewriting item
"""
for resource in ['ingest', 'archive', 'published', 'archive_autosave']:
app.config['DOMAIN'][resource]['schema'].update({name: schema})
app.config['DOMAIN']['content_templates_apply']['schema']['item']['schema'].update(
{name: schema}
)
if copy_on_rewrite:
app.config.setdefault('COPY_ON_REWRITE_FIELDS', [])
app.config['COPY_ON_REWRITE_FIELDS'].append(name)
from superdesk.search_provider import SearchProvider # noqa
from apps.search_providers import register_search_provider # noqa
| mdhaman/superdesk-core | superdesk/__init__.py | Python | agpl-3.0 | 5,286 |
#!c:\users\montes\documents\github\fluid-designer\win64-vc\2.78\python\bin\python.exe
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import Tk, Label
except ImportError:
from Tkinter import Tk, Label
from PIL import Image, ImageTk
#
# an image viewer
class UI(Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
Label.__init__(self, master, image=self.image, bg="black", bd=0)
else:
# photo image
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
import sys
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| Microvellum/Fluid-Designer | win64-vc/2.78/Python/Scripts/viewer.py | Python | gpl-3.0 | 1,034 |
#!/usr/bin/env python
import argparse
import json
import logging
import multiprocessing as mp
import os
import signal
import time
from collections import defaultdict
from random import sample
from insights.core import archives
from insights.core import load_package
from insights.core.evaluators import MultiEvaluator, SingleEvaluator
from insights.core.specs import SpecMapper
try:
from insights_nexus.config.factory import get_config
config = get_config()
except:
config = None
log = logging.getLogger(__name__)
stop = False
def stop_handler(signum, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
def get_args():
parser = argparse.ArgumentParser("python -m insights.tools.perf")
parser.add_argument("-p", "--package", required=True, dest="package", help="Package containing the rules to process.")
parser.add_argument("-n", "--num_archives", default=10, dest="num_archives", type=int, help="Number of archives to process.")
parser.add_argument("-w", "--workers", default=mp.cpu_count() / 2, dest="num_workers", type=int, help="Number of processes to use.")
parser.add_argument("-e", "--extract_dir", default="/tmp", dest="extract_dir", help="Working directory into which archives are extracted.")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Output DEBUG level messages and final stats.")
parser.add_argument("-s", "--silent", default=False, action="store_true", help="Output only FATAL messages and final stats.")
parser.add_argument("-r", "--random", default=False, action="store_true", help="Randomly select archives from all available.")
parser.add_argument("archive_path", nargs="*", help="Archive file or directory containing archives. Multiple files or directories may be specified.")
return parser.parse_args()
def print_stats(times, start, end, num_workers):
l = len(times)
median = sorted(times)[l / 2] if l else 0.0
avg = (sum(times) / float(l)) if l else 0.0
msg = """
Workers: %s
Max: %s
Min: %s
Avg: %s
Med: %s
Tot: %s
Throughput: %s
""" % (num_workers, max(times), min(times), avg, median, l, (float(l) / (end - start)))
print msg
def print_response(r):
skips = set()
for sk in r["skips"]:
ski = sk["details"]
something = ski[5:ski.index("]") + 1].replace("'", '"')
for ha in json.loads(something):
skips.add(ha)
r["skips"] = list(skips)
print json.dumps(r)
def get_paths(roots):
paths = []
for root in roots:
if os.path.isdir(root):
paths.extend([os.path.join(root, f) for f in os.listdir(root) if '.tar' in f])
elif '.tar' in root:
paths.append(root)
return paths
def process_report(path, tmp_dir):
with archives.TarExtractor() as extractor:
if config is None:
spec_mapper = SpecMapper(extractor.from_path(path, tmp_dir))
else:
spec_mapper = SpecMapper(extractor.from_path(path, tmp_dir), config)
md = json.loads(spec_mapper.get_content("metadata.json", split=False, default="{}"))
evaluator = MultiEvaluator(spec_mapper) if md and 'systems' in md else SingleEvaluator(spec_mapper)
return evaluator.process()
def worker(paths, extract_dir, results_queue):
for path in paths:
if stop:
results_queue.put(None)
return
result = None
start = time.time()
try:
result = process_report(path, extract_dir)
except Exception as ex:
result = ex
duration = time.time() - start
results_queue.put((duration, result))
def process_reports(paths, extract_dir, num_workers):
start = time.time()
times = []
results = []
results_queue = mp.Queue()
buckets = defaultdict(list)
for idx, path in enumerate(paths):
buckets[idx % num_workers].append(path)
pool = []
for i, p in buckets.iteritems():
args = (p, extract_dir, results_queue)
proc = mp.Process(target=worker, name="worker-%s" % i, args=args)
pool.append(proc)
for proc in pool:
proc.start()
def signal_handler(signum, frame):
print_stats(times, start, time.time(), num_workers)
signal.signal(signal.SIGUSR1, signal_handler)
stops = 0
for i in range(len(paths)):
t = results_queue.get()
if t is None:
stops += 1
if stops == num_workers:
break
else:
continue
d, r = t
times.append(d)
results.append(r)
print_stats(times, start, time.time(), num_workers)
for proc in pool:
proc.join()
def main():
args = get_args()
if args.silent:
logging.basicConfig(level=logging.FATAL)
else:
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
load_package(args.package)
extract_dir = args.extract_dir
num_archives = args.num_archives
paths = get_paths(args.archive_path)
if num_archives < len(paths):
if args.random:
paths = sample(paths, num_archives)
else:
paths = paths[:num_archives]
if len(paths) > 1:
process_reports(paths, extract_dir, args.num_workers)
else:
print_response(process_report(paths[0], extract_dir))
if __name__ == "__main__":
main()
| PaulWay/insights-core | insights/tools/perf.py | Python | apache-2.0 | 5,493 |
"""SoloVideoJuegos URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| lKaza/VideoJuegosFisicosChile | solovideojuegos/SoloVideoJuegos/urls.py | Python | mit | 793 |
from unittest import mock
import pytest
from conftest import create_project_config
from datakit_data import Push
@pytest.fixture(autouse=True)
def initialize_data_configs(dkit_home, fake_project):
project_configs = {
's3_bucket': 'foo.org',
's3_path': '2017/fake-project',
'aws_user_profile': 'ap'
}
create_project_config(fake_project, project_configs)
def test_s3_instantiation(mocker):
"""
S3 wrapper instantiated properly
"""
s3_mock = mocker.patch(
'datakit_data.commands.push.S3',
autospec=True,
)
cmd = Push(None, None, 'data push')
parsed_args = mock.Mock()
parsed_args.args = []
cmd.run(parsed_args)
# S3 instantiated with project-level configs for
# user profile and bucket
s3_mock.assert_called_once_with('ap', 'foo.org')
def test_push_invocation(mocker):
"""
S3.push invoked with default data dir and s3 path
"""
push_mock = mocker.patch(
'datakit_data.commands.push.S3.push',
autospec=True,
)
cmd = Push(None, None, 'data push')
parsed_args = mock.Mock()
parsed_args.args = []
cmd.run(parsed_args)
push_mock.assert_any_call(
mock.ANY,
'data/',
'2017/fake-project',
extra_flags=[]
)
def test_boolean_cli_flags(mocker):
push_mock = mocker.patch(
'datakit_data.commands.push.S3.push',
autospec=True,
)
parsed_args = mock.Mock()
parsed_args.args = ['dry-run']
cmd = Push(None, None, 'data push')
cmd.run(parsed_args)
push_mock.assert_any_call(
mock.ANY,
'data/',
'2017/fake-project',
extra_flags=['--dry-run']
)
| associatedpress/datakit-data | tests/commands/test_push.py | Python | isc | 1,702 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci, check_out_dtype
import paddle
import paddle.fluid.core as core
class ApiMaxTest(unittest.TestCase):
def setUp(self):
if core.is_compiled_with_cuda():
self.place = core.CUDAPlace(0)
else:
self.place = core.CPUPlace()
def test_api(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.static.data("data", shape=[10, 10], dtype="float32")
result_max = paddle.max(x=data, axis=1)
exe = paddle.static.Executor(self.place)
input_data = np.random.rand(10, 10).astype(np.float32)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=1)).all(), True)
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.static.data("data", shape=[10, 10], dtype="int64")
result_max = paddle.max(x=data, axis=0)
exe = paddle.static.Executor(self.place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=0)).all(), True)
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.static.data("data", shape=[10, 10], dtype="int64")
result_max = paddle.max(x=data, axis=(0, 1))
exe = paddle.static.Executor(self.place)
input_data = np.random.randint(10, size=(10, 10)).astype(np.int64)
res, = exe.run(feed={"data": input_data}, fetch_list=[result_max])
self.assertEqual((res == np.max(input_data, axis=(0, 1))).all(), True)
def test_errors(self):
paddle.enable_static()
def test_input_type():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = np.random.rand(10, 10)
result_max = paddle.max(x=data, axis=0)
self.assertRaises(TypeError, test_input_type)
def test_axis_type():
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
data = paddle.static.data("data", shape=[10, 10], dtype="int64")
axis = paddle.static.data("axis", shape=[10, 10], dtype="int64")
result_min = paddle.min(data, axis)
self.assertRaises(TypeError, test_axis_type)
def test_imperative_api(self):
paddle.disable_static()
np_x = np.array([10, 10]).astype('float64')
x = paddle.to_tensor(np_x)
z = paddle.max(x, axis=0)
np_z = z.numpy()
z_expected = np.array(np.max(np_x, axis=0))
self.assertEqual((np_z == z_expected).all(), True)
def test_big_dimension(self):
paddle.disable_static()
x = paddle.rand(shape=[2, 2, 2, 2, 2, 2, 2])
np_x = x.numpy()
z1 = paddle.max(x, axis=-1)
z2 = paddle.max(x, axis=6)
np_z1 = z1.numpy()
np_z2 = z2.numpy()
z_expected = np.array(np.max(np_x, axis=6))
self.assertEqual((np_z1 == z_expected).all(), True)
self.assertEqual((np_z2 == z_expected).all(), True)
def test_all_negative_axis(self):
paddle.disable_static()
x = paddle.rand(shape=[2, 2])
np_x = x.numpy()
z1 = paddle.max(x, axis=(-2, -1))
np_z1 = z1.numpy()
z_expected = np.array(np.max(np_x, axis=(0, 1)))
self.assertEqual((np_z1 == z_expected).all(), True)
class TestOutDtype(unittest.TestCase):
def test_max(self):
api_fn = paddle.max
shape = [10, 16]
check_out_dtype(
api_fn,
in_specs=[(shape, )],
expect_dtypes=['float32', 'float64', 'int32', 'int64'])
if __name__ == '__main__':
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_max_op.py | Python | apache-2.0 | 4,924 |
"""
Imitate the parser representation.
"""
import inspect
import re
import sys
import os
from functools import partial
from jedi._compatibility import builtins as _builtins, unicode
from jedi import debug
from jedi.cache import underscore_memoization, memoize_method
from jedi.parser.tree import Param, Base, Operator, zero_position_modifier
from jedi.evaluate.helpers import FakeName
from . import fake
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile('(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
class CheckAttribute(object):
"""Raises an AttributeError if the attribute X isn't available."""
def __init__(self, func):
self.func = func
# Remove the py in front of e.g. py__call__.
self.check_name = func.__name__[2:]
def __get__(self, instance, owner):
# This might raise an AttributeError. That's wanted.
getattr(instance.obj, self.check_name)
return partial(self.func, instance)
class CompiledObject(Base):
# comply with the parser
start_pos = 0, 0
path = None # modules have this attribute - set it to None.
used_names = {} # To be consistent with modules.
def __init__(self, evaluator, obj, parent=None):
self._evaluator = evaluator
self.obj = obj
self.parent = parent
@CheckAttribute
def py__call__(self, params):
if inspect.isclass(self.obj):
from jedi.evaluate.representation import Instance
return set([Instance(self._evaluator, self, params)])
else:
return set(self._execute_function(params))
@CheckAttribute
def py__class__(self):
return create(self._evaluator, self.obj.__class__)
@CheckAttribute
def py__mro__(self):
return tuple(create(self._evaluator, cls) for cls in self.obj.__mro__)
@CheckAttribute
def py__bases__(self):
return tuple(create(self._evaluator, cls) for cls in self.obj.__bases__)
def py__bool__(self):
return bool(self.obj)
def py__file__(self):
return self.obj.__file__
def is_class(self):
return inspect.isclass(self.obj)
@property
def doc(self):
return inspect.getdoc(self.obj) or ''
@property
def params(self):
params_str, ret = self._parse_function_doc()
tokens = params_str.split(',')
if inspect.ismethoddescriptor(self.obj):
tokens.insert(0, 'self')
params = []
for p in tokens:
parts = [FakeName(part) for part in p.strip().split('=')]
if len(parts) > 1:
parts.insert(1, Operator(zero_position_modifier, '=', (0, 0)))
params.append(Param(parts, self))
return params
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, repr(self.obj))
@underscore_memoization
def _parse_function_doc(self):
if self.doc is None:
return '', ''
return _parse_function_doc(self.doc)
def api_type(self):
obj = self.obj
if inspect.isclass(obj):
return 'class'
elif inspect.ismodule(obj):
return 'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return 'function'
# Everything else...
return 'instance'
@property
def type(self):
"""Imitate the tree.Node.type values."""
cls = self._get_class()
if inspect.isclass(cls):
return 'classdef'
elif inspect.ismodule(cls):
return 'file_input'
elif inspect.isbuiltin(cls) or inspect.ismethod(cls) or \
inspect.ismethoddescriptor(cls):
return 'funcdef'
@underscore_memoization
def _cls(self):
"""
We used to limit the lookups for instantiated objects like list(), but
this is not the case anymore. Python itself
"""
# Ensures that a CompiledObject is returned that is not an instance (like list)
return self
def _get_class(self):
if not fake.is_class_instance(self.obj) or \
inspect.ismethoddescriptor(self.obj): # slots
return self.obj
try:
return self.obj.__class__
except AttributeError:
# happens with numpy.core.umath._UFUNC_API (you get it
# automatically by doing `import numpy`.
return type
@property
def names_dict(self):
# For compatibility with `representation.Class`.
return self.names_dicts(False)[0]
def names_dicts(self, search_global, is_instance=False):
return self._names_dict_ensure_one_dict(is_instance)
@memoize_method
def _names_dict_ensure_one_dict(self, is_instance):
"""
search_global shouldn't change the fact that there's one dict, this way
there's only one `object`.
"""
return [LazyNamesDict(self._evaluator, self, is_instance)]
def get_subscope_by_name(self, name):
if name in dir(self.obj):
return CompiledName(self._evaluator, self, name).parent
else:
raise KeyError("CompiledObject doesn't have an attribute '%s'." % name)
@CheckAttribute
def py__getitem__(self, index):
if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return set()
return set([create(self._evaluator, self.obj[index])])
@CheckAttribute
def py__iter__(self):
if type(self.obj) not in (str, list, tuple, unicode, bytes, bytearray, dict):
# Get rid of side effects, we won't call custom `__getitem__`s.
return
for part in self.obj:
yield set([create(self._evaluator, part)])
@property
def name(self):
try:
name = self._get_class().__name__
except AttributeError:
name = repr(self.obj)
return FakeName(name, self)
def _execute_function(self, params):
if self.type != 'funcdef':
return
for name in self._parse_function_doc()[1].split():
try:
bltn_obj = getattr(_builtins, name)
except AttributeError:
continue
else:
if bltn_obj is None:
# We want to evaluate everything except None.
# TODO do we?
continue
bltn_obj = create(self._evaluator, bltn_obj)
for result in self._evaluator.execute(bltn_obj, params):
yield result
@property
@underscore_memoization
def subscopes(self):
"""
Returns only the faked scopes - the other ones are not important for
internal analysis.
"""
module = self.get_parent_until()
faked_subscopes = []
for name in dir(self.obj):
try:
faked_subscopes.append(
fake.get_faked(module.obj, self.obj, parent=self, name=name)
)
except fake.FakeDoesNotExist:
pass
return faked_subscopes
def is_scope(self):
return True
def get_self_attributes(self):
return [] # Instance compatibility
def get_imports(self):
return [] # Builtins don't have imports
class CompiledName(FakeName):
def __init__(self, evaluator, compiled_obj, name):
super(CompiledName, self).__init__(name)
self._evaluator = evaluator
self._compiled_obj = compiled_obj
self.name = name
def __repr__(self):
try:
name = self._compiled_obj.name # __name__ is not defined all the time
except AttributeError:
name = None
return '<%s: (%s).%s>' % (type(self).__name__, name, self.name)
def is_definition(self):
return True
@property
@underscore_memoization
def parent(self):
module = self._compiled_obj.get_parent_until()
return _create_from_name(self._evaluator, module, self._compiled_obj, self.name)
@parent.setter
def parent(self, value):
pass # Just ignore this, FakeName tries to overwrite the parent attribute.
class LazyNamesDict(object):
"""
A names_dict instance for compiled objects, resembles the parser.tree.
"""
name_class = CompiledName
def __init__(self, evaluator, compiled_obj, is_instance=False):
self._evaluator = evaluator
self._compiled_obj = compiled_obj
self._is_instance = is_instance
def __iter__(self):
return (v[0].value for v in self.values())
@memoize_method
def __getitem__(self, name):
try:
getattr(self._compiled_obj.obj, name)
except AttributeError:
raise KeyError('%s in %s not found.' % (name, self._compiled_obj))
except Exception:
# This is a bit ugly. We're basically returning this to make
# lookups possible without having the actual attribute. However
# this makes proper completion possible.
return [FakeName(name, create(self._evaluator, None), is_definition=True)]
return [self.name_class(self._evaluator, self._compiled_obj, name)]
def values(self):
obj = self._compiled_obj.obj
values = []
for name in dir(obj):
try:
values.append(self[name])
except KeyError:
# The dir function can be wrong.
pass
is_instance = self._is_instance or fake.is_class_instance(obj)
# ``dir`` doesn't include the type names.
if not inspect.ismodule(obj) and obj != type and not is_instance:
values += create(self._evaluator, type).names_dict.values()
return values
def dotted_from_fs_path(fs_path, sys_path):
"""
Changes `/usr/lib/python3.4/email/utils.py` to `email.utils`. I.e.
compares the path with sys.path and then returns the dotted_path. If the
path is not in the sys.path, just returns None.
"""
if os.path.basename(fs_path).startswith('__init__.'):
# We are calculating the path. __init__ files are not interesting.
fs_path = os.path.dirname(fs_path)
# prefer
# - UNIX
# /path/to/pythonX.Y/lib-dynload
# /path/to/pythonX.Y/site-packages
# - Windows
# C:\path\to\DLLs
# C:\path\to\Lib\site-packages
# over
# - UNIX
# /path/to/pythonX.Y
# - Windows
# C:\path\to\Lib
path = ''
for s in sys_path:
if (fs_path.startswith(s) and len(path) < len(s)):
path = s
# - Window
# X:\path\to\lib-dynload/datetime.pyd => datetime
module_path = fs_path[len(path):].lstrip(os.path.sep).lstrip('/')
# - Window
# Replace like X:\path\to\something/foo/bar.py
return _path_re.sub('', module_path).replace(os.path.sep, '.').replace('/', '.')
def load_module(evaluator, path=None, name=None):
sys_path = evaluator.sys_path
if path is not None:
dotted_path = dotted_from_fs_path(path, sys_path=sys_path)
else:
dotted_path = name
if dotted_path is None:
p, _, dotted_path = path.partition(os.path.sep)
sys_path.insert(0, p)
temp, sys.path = sys.path, sys_path
try:
__import__(dotted_path)
except RuntimeError:
if 'PySide' in dotted_path or 'PyQt' in dotted_path:
# RuntimeError: the PyQt4.QtCore and PyQt5.QtCore modules both wrap
# the QObject class.
# See https://github.com/davidhalter/jedi/pull/483
return None
raise
except ImportError:
# If a module is "corrupt" or not really a Python module or whatever.
debug.warning('Module %s not importable.', path)
return None
finally:
sys.path = temp
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_path]
return create(evaluator, module)
docstr_defaults = {
'floating point number': 'float',
'character': 'str',
'integer': 'int',
'dictionary': 'dict',
'string': 'str',
}
def _parse_function_doc(doc):
"""
Takes a function and returns the params and return value as a tuple.
This is nothing more than a docstring parser.
TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
TODO docstrings like 'tuple of integers'
"""
# parse round parentheses: def func(a, (b,c))
try:
count = 0
start = doc.index('(')
for i, s in enumerate(doc[start:]):
if s == '(':
count += 1
elif s == ')':
count -= 1
if count == 0:
end = start + i
break
param_str = doc[start + 1:end]
except (ValueError, UnboundLocalError):
# ValueError for doc.index
# UnboundLocalError for undefined end in last line
debug.dbg('no brackets found - no param')
end = 0
param_str = ''
else:
# remove square brackets, that show an optional param ( = None)
def change_options(m):
args = m.group(1).split(',')
for i, a in enumerate(args):
if a and '=' not in a:
args[i] += '=None'
return ','.join(args)
while True:
param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
change_options, param_str)
if changes == 0:
break
param_str = param_str.replace('-', '_') # see: isinstance.__doc__
# parse return value
r = re.search('-[>-]* ', doc[end:end + 7])
if r is None:
ret = ''
else:
index = end + r.end()
# get result type, which can contain newlines
pattern = re.compile(r'(,\n|[^\n-])+')
ret_str = pattern.match(doc, index).group(0).strip()
# New object -> object()
ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
ret = docstr_defaults.get(ret_str, ret_str)
return param_str, ret
def _create_from_name(evaluator, module, parent, name):
try:
return fake.get_faked(module.obj, parent.obj, parent=parent, name=name)
except fake.FakeDoesNotExist:
pass
try:
obj = getattr(parent.obj, name)
except AttributeError:
# Happens e.g. in properties of
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
obj = None
return create(evaluator, obj, parent)
def builtin_from_name(evaluator, string):
bltn_obj = getattr(_builtins, string)
return create(evaluator, bltn_obj)
def _a_generator(foo):
"""Used to have an object to return for generators."""
yield 42
yield foo
_SPECIAL_OBJECTS = {
'FUNCTION_CLASS': type(load_module),
'METHOD_CLASS': type(CompiledObject.is_class),
'MODULE_CLASS': type(os),
'GENERATOR_OBJECT': _a_generator(1.0),
'BUILTINS': _builtins,
}
def get_special_object(evaluator, identifier):
obj = _SPECIAL_OBJECTS[identifier]
return create(evaluator, obj, parent=create(evaluator, _builtins))
def compiled_objects_cache(attribute_name):
def decorator(func):
"""
This decorator caches just the ids, oopposed to caching the object itself.
Caching the id has the advantage that an object doesn't need to be
hashable.
"""
def wrapper(evaluator, obj, parent=None, module=None):
cache = getattr(evaluator, attribute_name)
# Do a very cheap form of caching here.
key = id(obj), id(parent)
try:
return cache[key][0]
except KeyError:
# TODO this whole decorator looks way too ugly and this if
# doesn't make it better. Find a more generic solution.
if parent or module:
result = func(evaluator, obj, parent, module)
else:
result = func(evaluator, obj)
# Need to cache all of them, otherwise the id could be overwritten.
cache[key] = result, obj, parent, module
return result
return wrapper
return decorator
@compiled_objects_cache('compiled_cache')
def create(evaluator, obj, parent=None, module=None):
"""
A very weird interface class to this module. The more options provided the
more acurate loading compiled objects is.
"""
if inspect.ismodule(obj):
if parent is not None:
# Modules don't have parents, be careful with caching: recurse.
return create(evaluator, obj)
else:
if parent is None and obj != _builtins:
return create(evaluator, obj, create(evaluator, _builtins))
try:
return fake.get_faked(module and module.obj, obj, parent=parent)
except fake.FakeDoesNotExist:
pass
return CompiledObject(evaluator, obj, parent)
| snakeleon/YouCompleteMe-x86 | third_party/ycmd/third_party/JediHTTP/vendor/jedi/jedi/evaluate/compiled/__init__.py | Python | gpl-3.0 | 17,309 |
#
# =================================================================
# =================================================================
from oslo.config import cfg
CONF = cfg.CONF
class IBMPowerVMBaseNetworkPlacement(object):
"""
Serves as a base class for host-agnostic placement/deploy data.
"""
def get_placement(self, context, host_name, network_id, list_only):
"""
Get the placement dictionary, optionally constrained by a host or
network.
:param context: HTTP request context.
:param host_name: Nova name of a host to optionally constrain
placement.
:param network_id: Neutron network ID to optionally constrain
placement.
:param list_only: If true, only a list of IDs will be returned.
:returns: Dict of placement data.
"""
# Handle the case where a host_name was provided - return network ids
if host_name and not network_id:
id_list = self._get_host_placement_for_host(context, host_name)
if list_only == 'true':
placement_dict = {'host-network-placement': id_list}
else:
host_dict = {}
host_dict['host_name'] = host_name
host_dict['networks'] = []
for network_id in id_list:
host_dict['networks'].append({'network_id': network_id})
placement_dict = {'host-network-placement':
[host_dict]}
# Handle the case where a network was provided - return host names
elif network_id and not host_name:
id_list = self._get_host_placement_for_network(context, network_id)
if list_only == 'true':
placement_dict = {'host-network-placement': id_list}
else:
placement_dict = {'host-network-placement': []}
for host_id in id_list:
host_dict = {}
host_dict['host_name'] = host_id
host_dict['networks'] = []
host_dict['networks'].append({'network_id': network_id})
placement_dict['host-network-placement'].append(host_dict)
# Handle the case where nothing was provided - return all data
elif not host_name and not network_id:
placement_dict = self._get_host_placement_all(context)
return placement_dict
def _get_host_placement_all(self, context):
"""
Implemented by child classes.
:param context: HTTP request context.
:return: dict of host-network-placement data.
"""
raise NotImplementedError('_get_host_placement_all not implemented')
def _get_host_placement_for_host(self, context, host_name):
"""
Implemented by child classes.
:param context: HTTP request context.
:param host_name: Nova name of a host to constrain placement
:return: list of network ids.
"""
raise NotImplementedError('_get_host_placement_for_host not '
'implemented')
def _get_host_placement_for_network(self, context, network_id):
"""
Implemented by child classes.
:param context: HTTP request context.
:param network_id: Neutron network ID to constrain placement.
:return: list of host names.
"""
raise NotImplementedError('_get_host_placement_for_network not '
'implemented')
| windskyer/k_nova | paxes_nova/network/placement.py | Python | apache-2.0 | 3,574 |
import json
from django import forms
from django_webtest import WebTest
from . import build_test_urls
class TextareaForm(forms.Form):
test_field = forms.CharField(
min_length=5,
max_length=20,
widget=forms.Textarea(attrs={'data-test': 'Test Attr'}))
class Test(WebTest):
default_form = TextareaForm
urls = 'tests.integration.tests.test_textarea'
def test_default_usecase(self):
page = self.app.get(self.test_default_usecase.url)
self.assertIn('id="id_test_field_container"', page.body.decode('utf-8'))
self.assertIn('id="id_test_field"', page.body.decode('utf-8'))
self.assertIn('maxlength="20"', page.body.decode('utf-8'))
self.assertIn('data-test="Test Attr"', page.body.decode('utf-8'))
form = page.form
self.assertIn('test_field', form.fields)
form['test_field'] = 'TEST CONTENT'
response = json.loads(form.submit().body.decode('utf-8'))
self.assertIn('cleaned_data', response)
self.assertIn('test_field', response['cleaned_data'])
self.assertEquals('TEST CONTENT', response['cleaned_data']['test_field'])
def test_missing_value_error(self):
form = self.app.get(self.test_missing_value_error.url).form
response = form.submit()
self.assertIn('has-error', response.body.decode('utf-8'))
self.assertIn('This field is required.', response.body.decode('utf-8'))
def test_render_with_value(self):
form = self.app.get(self.test_render_with_value.url).form
form['test_field'] = 'a'*21
response = form.submit()
self.assertIn('>{}<'.format('a'*21), response.body.decode('utf-8'))
self.assertIn('Ensure this value has at most 20 characters', response.body.decode('utf-8'))
def test_part_group_class(self):
page = self.app.get(self.test_part_group_class.url)
self.assertIn('class="input-field col s12 yellow"', page.body.decode('utf-8'))
test_part_group_class.template = '''
{% form %}
{% part form.test_field group_class %}input-field col s12 yellow{% endpart %}
{% endform %}
'''
def test_part_add_group_class(self):
page = self.app.get(self.test_part_add_group_class.url)
self.assertIn('class="input-field col s12 required deep-purple lighten-5"', page.body.decode('utf-8'))
test_part_add_group_class.template = '''
{% form %}
{% part form.test_field add_group_class %}deep-purple lighten-5{% endpart %}
{% endform %}
'''
def test_part_prefix(self):
response = self.app.get(self.test_part_prefix.url)
self.assertIn('<i class="mdi-communication-email prefix"></i>', response.body.decode('utf-8'))
test_part_prefix.template = '''
{% form %}
{% part form.test_field prefix %}<i class="mdi-communication-email prefix"></i>{% endpart %}
{% endform %}
'''
def test_part_add_control_class(self):
response = self.app.get(self.test_part_add_control_class.url)
self.assertIn('class="materialize-textarea orange"', response.body.decode('utf-8'))
test_part_add_control_class.template = '''
{% form %}
{% part form.test_field add_control_class %}orange{% endpart %}
{% endform %}
'''
def test_part_label(self):
response = self.app.get(self.test_part_label.url)
self.assertIn('<label for="id_test_field">My label</label>', response.body.decode('utf-8'))
test_part_label.template = '''
{% form %}
{% part form.test_field label %}<label for="id_test_field">My label</label>{% endpart %}
{% endform %}
'''
def test_part_add_label_class(self):
response = self.app.get(self.test_part_add_label_class.url)
self.assertIn('<label for="id_test_field" class="green-text">Test field</label>', response.body.decode('utf-8'))
test_part_add_label_class.template = '''
{% form %}
{% part form.test_field add_label_class %}green-text{% endpart %}
{% endform %}
'''
def test_part_help_text(self):
response = self.app.get(self.test_part_help_text.url)
self.assertIn('<small class="help-block">My help</small>', response.body.decode('utf-8'))
test_part_help_text.template = '''
{% form %}
{% part form.test_field help_text %}<small class="help-block">My help</small>{% endpart %}
{% endform %}
'''
def test_part_errors(self):
response = self.app.get(self.test_part_errors.url)
self.assertIn('<div class="errors"><small class="error">My Error</small></div>', response.body.decode('utf-8'))
test_part_errors.template = '''
{% form %}
{% part form.test_field errors%}<div class="errors"><small class="error">My Error</small></div>{% endpart %}
{% endform %}
'''
urlpatterns = build_test_urls(Test)
| thiagoramos-luizalabs/django-material | tests/integration/tests/test_textarea.py | Python | bsd-3-clause | 4,963 |
#!/usr/bin/env python
copyright = '''
/*
* Copyright 2009 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
'''
GENERATE, UBYTE, USHORT, UINT = 'generate', 'ubyte', 'ushort', 'uint'
FIRST, LAST = 'first', 'last'
INTYPES = (GENERATE, UBYTE, USHORT, UINT)
OUTTYPES = (USHORT, UINT)
PVS=(FIRST, LAST)
PRIMS=('points',
'lines',
'linestrip',
'lineloop',
'tris',
'trifan',
'tristrip',
'quads',
'quadstrip',
'polygon')
LONGPRIMS=('PIPE_PRIM_POINTS',
'PIPE_PRIM_LINES',
'PIPE_PRIM_LINE_STRIP',
'PIPE_PRIM_LINE_LOOP',
'PIPE_PRIM_TRIANGLES',
'PIPE_PRIM_TRIANGLE_FAN',
'PIPE_PRIM_TRIANGLE_STRIP',
'PIPE_PRIM_QUADS',
'PIPE_PRIM_QUAD_STRIP',
'PIPE_PRIM_POLYGON')
longprim = dict(zip(PRIMS, LONGPRIMS))
intype_idx = dict(ubyte='IN_UBYTE', ushort='IN_USHORT', uint='IN_UINT')
outtype_idx = dict(ushort='OUT_USHORT', uint='OUT_UINT')
pv_idx = dict(first='PV_FIRST', last='PV_LAST')
def prolog():
print '''/* File automatically generated by indices.py */'''
print copyright
print r'''
/**
* @file
* Functions to translate and generate index lists
*/
#include "indices/u_indices.h"
#include "indices/u_indices_priv.h"
#include "pipe/p_compiler.h"
#include "util/u_debug.h"
#include "pipe/p_defines.h"
#include "util/u_memory.h"
static unsigned out_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return OUT_UINT;
case 2: return OUT_USHORT;
default: assert(0); return OUT_USHORT;
}
}
static unsigned in_size_idx( unsigned index_size )
{
switch (index_size) {
case 4: return IN_UINT;
case 2: return IN_USHORT;
case 1: return IN_UBYTE;
default: assert(0); return IN_UBYTE;
}
}
static u_translate_func translate[IN_COUNT][OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
static u_generate_func generate[OUT_COUNT][PV_COUNT][PV_COUNT][PRIM_COUNT];
'''
def vert( intype, outtype, v0 ):
if intype == GENERATE:
return '(' + outtype + ')(' + v0 + ')'
else:
return '(' + outtype + ')in[' + v0 + ']'
def point( intype, outtype, ptr, v0 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
def line( intype, outtype, ptr, v0, v1 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
def tri( intype, outtype, ptr, v0, v1, v2 ):
print ' (' + ptr + ')[0] = ' + vert( intype, outtype, v0 ) + ';'
print ' (' + ptr + ')[1] = ' + vert( intype, outtype, v1 ) + ';'
print ' (' + ptr + ')[2] = ' + vert( intype, outtype, v2 ) + ';'
def do_point( intype, outtype, ptr, v0 ):
point( intype, outtype, ptr, v0 )
def do_line( intype, outtype, ptr, v0, v1, inpv, outpv ):
if inpv == outpv:
line( intype, outtype, ptr, v0, v1 )
else:
line( intype, outtype, ptr, v1, v0 )
def do_tri( intype, outtype, ptr, v0, v1, v2, inpv, outpv ):
if inpv == outpv:
tri( intype, outtype, ptr, v0, v1, v2 )
else:
if inpv == FIRST:
tri( intype, outtype, ptr, v1, v2, v0 )
else:
tri( intype, outtype, ptr, v2, v0, v1 )
def do_quad( intype, outtype, ptr, v0, v1, v2, v3, inpv, outpv ):
do_tri( intype, outtype, ptr+'+0', v0, v1, v3, inpv, outpv );
do_tri( intype, outtype, ptr+'+3', v1, v2, v3, inpv, outpv );
def name(intype, outtype, inpv, outpv, prim):
if intype == GENERATE:
return 'generate_' + prim + '_' + outtype + '_' + inpv + '2' + outpv
else:
return 'translate_' + prim + '_' + intype + '2' + outtype + '_' + inpv + '2' + outpv
def preamble(intype, outtype, inpv, outpv, prim):
print 'static void ' + name( intype, outtype, inpv, outpv, prim ) + '('
if intype != GENERATE:
print ' const void * _in,'
print ' unsigned start,'
print ' unsigned nr,'
print ' void *_out )'
print '{'
if intype != GENERATE:
print ' const ' + intype + '*in = (const ' + intype + '*)_in;'
print ' ' + outtype + ' *out = (' + outtype + '*)_out;'
print ' unsigned i, j;'
print ' (void)j;'
def postamble():
print '}'
def points(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='points')
print ' for (i = start; i < (nr+start); i++) { '
do_point( intype, outtype, 'out+i', 'i' );
print ' }'
postamble()
def lines(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lines')
print ' for (i = start; i < (nr+start); i+=2) { '
do_line( intype, outtype, 'out+i', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def linestrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='linestrip')
print ' for (i = start, j = 0; j < nr; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
postamble()
def lineloop(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='lineloop')
print ' for (i = start, j = 0; j < nr - 2; j+=2, i++) { '
do_line( intype, outtype, 'out+j', 'i', 'i+1', inpv, outpv );
print ' }'
do_line( intype, outtype, 'out+j', 'i', '0', inpv, outpv );
postamble()
def tris(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tris')
print ' for (i = start; i < (nr+start); i+=3) { '
do_tri( intype, outtype, 'out+i', 'i', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def tristrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='tristrip')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', 'i', 'i+1+(i&1)', 'i+2-(i&1)', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+(i&1)', 'i+1-(i&1)', 'i+2', inpv, outpv );
print ' }'
postamble()
def trifan(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='trifan')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
print ' }'
postamble()
def polygon(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='polygon')
print ' for (i = start, j = 0; j < nr; j+=3, i++) { '
if inpv == FIRST:
do_tri( intype, outtype, 'out+j', '0', 'i+1', 'i+2', inpv, outpv );
else:
do_tri( intype, outtype, 'out+j', 'i+1', 'i+2', '0', inpv, outpv );
print ' }'
postamble()
def quads(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quads')
print ' for (i = start, j = 0; j < nr; j+=6, i+=4) { '
do_quad( intype, outtype, 'out+j', 'i+0', 'i+1', 'i+2', 'i+3', inpv, outpv );
print ' }'
postamble()
def quadstrip(intype, outtype, inpv, outpv):
preamble(intype, outtype, inpv, outpv, prim='quadstrip')
print ' for (i = start, j = 0; j < nr; j+=6, i+=2) { '
do_quad( intype, outtype, 'out+j', 'i+2', 'i+0', 'i+1', 'i+3', inpv, outpv );
print ' }'
postamble()
def emit_funcs():
for intype in INTYPES:
for outtype in OUTTYPES:
for inpv in (FIRST, LAST):
for outpv in (FIRST, LAST):
points(intype, outtype, inpv, outpv)
lines(intype, outtype, inpv, outpv)
linestrip(intype, outtype, inpv, outpv)
lineloop(intype, outtype, inpv, outpv)
tris(intype, outtype, inpv, outpv)
tristrip(intype, outtype, inpv, outpv)
trifan(intype, outtype, inpv, outpv)
quads(intype, outtype, inpv, outpv)
quadstrip(intype, outtype, inpv, outpv)
polygon(intype, outtype, inpv, outpv)
def init(intype, outtype, inpv, outpv, prim):
if intype == GENERATE:
print ('generate[' +
outtype_idx[outtype] +
'][' + pv_idx[inpv] +
'][' + pv_idx[outpv] +
'][' + longprim[prim] +
'] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
else:
print ('translate[' +
intype_idx[intype] +
'][' + outtype_idx[outtype] +
'][' + pv_idx[inpv] +
'][' + pv_idx[outpv] +
'][' + longprim[prim] +
'] = ' + name( intype, outtype, inpv, outpv, prim ) + ';')
def emit_all_inits():
for intype in INTYPES:
for outtype in OUTTYPES:
for inpv in PVS:
for outpv in PVS:
for prim in PRIMS:
init(intype, outtype, inpv, outpv, prim)
def emit_init():
print 'void u_index_init( void )'
print '{'
print ' static int firsttime = 1;'
print ' if (!firsttime) return;'
print ' firsttime = 0;'
emit_all_inits()
print '}'
def epilog():
print '#include "indices/u_indices.c"'
def main():
prolog()
emit_funcs()
emit_init()
epilog()
if __name__ == '__main__':
main()
| execunix/vinos | xsrc/external/mit/MesaLib/dist/src/gallium/auxiliary/indices/u_indices_gen.py | Python | apache-2.0 | 10,366 |
import logging
import os
import sys
import time
import bluetooth
from Utilities.SettingsManager import SettingsManager
from Utilities.SleepableThread import SleepableThread
from Motors.MotorController import MotorController
from Sensors.CompassController import CompassController
from Sensors.GPSController import GPSController
from Sensors.SonarController import SonarController
from termcolor import colored
class BluetoothController(SleepableThread):
# region Variables
SM = SettingsManager(settings_name='bluetooth', file_path='../config.xml')
log_name = '../Logs/{}-run.log'.format(time.strftime("%Y-%m-%d %H-%M"))
logging.basicConfig(filename=log_name, level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
# region Bluetooth Variables
server_sock_in = None
server_sock_out = None
client_sock_in = None
client_sock_out = None
client_address_in = ''
client_address_out = ''
server_address = ''
server_backlog = 0
server_in_port = 0
server_out_port = 0
server_in_connection_timeout = 0
server_out_connection_timeout = 0
server_in_byte_size = 0
connected = False
socket_in_created = False
socket_in_bound = False
socket_out_created = False
socket_out_bound = False
# endregion
# region Controller Variables
# try:
# mc = MotorController
# logging.info('(BLUETOOTH) Set mc to MotorController instance.')
# except:
# logging.error('(BLUETOOTH) Couldn\'t set mc to MotorController instance.')
# try:
# sc = SonarController
# logging.info('(BLUETOOTH) Set sc to SonarController instance.')
# except:
# logging.error('(BLUETOOTH) Couldn\'t set sc to SonarController instance.')
# try:
# gc = GPSController
# logging.info('(BLUETOOTH) Set gc to GPSController instance.')
# except:
# logging.error('(BLUETOOTH) Couldn\'t set gc to GPSController instance.')
# try:
# cc = CompassController
# logging.info('(BLUETOOTH) Set cc to CompassController instance.')
# except:
# logging.error('(BLUETOOTH) Couldn\'t set cc to CompassController instance.')
# endregion
# region ETC Variables
valid_terminal_commands = []
received_commands = []
hide_menu = False
return_to_main_menu = False
clear = 'cls' if os.name == 'nt' else 'clear'
# endregion
# endregion
# region Server Functions
def is_connected(self):
if self.client_address_in == '' and self.client_address_out == '':
return colored('DISCONNECTED','red')
return colored('CONNECTED', 'green')
def sockets_created_and_bound(self):
return True if self.socket_in_created and self.socket_in_bound and self.socket_out_created and self.socket_out_bound else False
def create_socket(self):
try:
self.server_sock_in = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
logging.info('(BLUETOOTH) Server_sock_in created.')
self.socket_in_created = True
except:
logging.error('(BLUETOOTH) Couldn\'t create server_sock_in.')
self.socket_in_created = False
try:
self.server_sock_out = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
logging.info('(BLUETOOTH) Server_sock_out created.')
self.socket_out_created = True
except:
logging.error('(BLUETOOTH) Couldn\'t create server_sock_out.')
self.socket_out_created = False
def set_timeouts(self):
try:
self.server_sock_in.settimeout(self.server_in_connection_timeout)
logging.error('Set server_sock_in timeout to {}.'.format(self.server_in_connection_timeout))
except:
logging.error('Couldn\'t set server_sock_in timeout to {}.'.format(self.server_in_connection_timeout))
try:
self.server_sock_out.settimeout(self.server_out_connection_timeout)
logging.error('Set server_sock_out timeout to {}.'.format(self.server_out_connection_timeout))
except:
logging.error('Couldn\'t set server_sock_out timeout to {}.'.format(self.server_out_connection_timeout))
def bind_server_in_port(self):
try:
self.server_sock_in.bind((self.server_address, self.server_in_port))
logging.info('(BLUETOOTH) Server_sock_in bound on address {} on port {}.'.format(self.server_address,
self.server_in_port))
self.socket_in_bound = True
except:
logging.error('(BLUETOOTH) Couldn\'t bind server_sock_in {} on port {}.'.format(self.server_address,
self.server_in_port))
self.socket_in_bound = False
def bind_server_out_port(self):
try:
self.server_sock_out.bind((self.server_address, self.server_out_port))
logging.info('(BLUETOOTH) Server_sock_out bound on address {} on port {}.'.format(self.server_address,
self.server_out_port))
self.socket_out_bound = True
except:
logging.error('(BLUETOOTH) Couldn\'t bind server_sock_out {} on port {}.'.format(self.server_address,
self.server_out_port))
self.socket_out_bound = False
def listen(self):
try:
self.server_sock_in.listen(self.server_backlog)
logging.info('(BLUETOOTH) Server_sock_in started listening.')
except:
logging.error('(BLUETOOTH) Server_sock_in couldn\'t start listening.')
try:
self.server_sock_out.listen(self.server_backlog)
logging.info('(BLUETOOTH) Server_sock_out started listening.')
except:
logging.error('(BLUETOOTH) Server_sock_in couldn\;t start listening.')
def accept_connections(self):
try:
self.client_sock_in, self.client_address_in = self.server_sock_in.accept()
except:
logging.error('Couldn\'t accept inbound connection.')
try:
self.client_sock_out, self.client_address_out = self.server_sock_out.accept()
except:
logging.error('Couldn\'t accept outbound connection.')
def close_sockets(self):
try:
self.client_sock_in.close()
logging.info('(BLUETOOTH) Closed client_sock_in.')
except:
logging.error('(BLUETOOTH) Couldn\'t close client_sock_in.')
try:
self.client_sock_out.close()
logging.info('(BLUETOOTH) Closed client_sock_out.')
except:
logging.error('(BLUETOOTH) Couldn\'t close client_sock_out.')
try:
self.server_sock_in.close()
logging.info('(BLUETOOTH) Closed server_sock_in.')
except:
logging.error('(BLUETOOTH) Couldn\'t close server_sock_in.')
try:
logging.info('(BLUETOOTH) Closed server_sock_out.')
self.server_sock_out.close()
except:
logging.error('(BLUETOOTH) Couldn\'t close server_sock_out.')
self.client_address_in=''
self.client_address_out=''
self.connected = False
def setup(self):
self.create_socket()
# self.set_timeouts()
self.bind_server_in_port()
self.bind_server_out_port()
self.listen()
self.accept_connections()
self.create_thread()
self.connected = True
def send_data(self, data):
# TEST
try:
self.client_sock_out.send(data)
except:
print ' No outbound connection available.'
# endregion
# region Thread Functions
def run(self):
while self.thread_state != 4:
if self.thread_state == 3:
while self.thread_state == 3:
time.sleep(1)
self.setup()
self.parse_terminal_command('c')
while self.connected:
try:
data = self.client_sock_in.recv(self.server_in_byte_size)
if data:
self.parse_terminal_command(data)
except:
self.close_sockets()
self.parse_terminal_command('c')
# endregion
def __init__(self, motor=MotorController(), sonar=SonarController(), gps=GPSController(), compass=CompassController()):
self.apply_settings()
self.mc = motor
self.sc = sonar
self.gc = gps
self.cc = compass
super(BluetoothController, self).__init__()
def apply_settings(self):
self.server_in_port = int(self.SM.get_setting_value('server_in_port'))
self.server_out_port = int(self.SM.get_setting_value('server_out_port'))
self.server_backlog = int(self.SM.get_setting_value('server_backlog'))
self.server_in_byte_size = int(self.SM.get_setting_value('server_in_byte_size'))
self.server_in_connection_timeout = float(self.SM.get_setting_value('server_in_connection_timeout'))
self.server_out_connection_timeout = float(self.SM.get_setting_value('server_out_connection_timeout'))
self.server_address = str(self.SM.get_setting_value('server_address'))
def parse_terminal_command(self, command):
prefixes = ['mc', 'motorcontroller', 'motor_controller',
'sc', 'sonarcontroller', 'sonar_controller',
'gc', 'gpscontroller', 'gps_controller',
'cc', 'compasscontroller', 'compass_controller',
'bc', 'bluetoothcontroller', 'cluetooth_controller']
data = ''
command = command.lower()
split = command.split()
prefix = command.split()[0]
try:
type = command.split()[1]
except:
type='none'
suffix = command.replace(prefix + ' ', '')
parameters = suffix.replace(type + ' ', '')
# BASIC COMMAND PARSING (if command == 'h' or c and so on)
# If command in valid_commands
# Else below
# If a non valid prefix is sent
if prefix in prefixes:
if prefix == 'mc' or prefix == 'motorcontroller' or prefix == 'motor_controller':
self.mc.run_motor_command(suffix)
elif prefix == 'sc' or prefix == 'sonarcontroller' or prefix == 'sonar_controller':
data = self.sc.parse_terminal_command(suffix)
elif prefix == 'gc' or prefix == 'gpscontroller' or prefix == 'gps_controller':
data = self.gc.parse_terminal_command(suffix)
elif prefix == 'cc' or prefix == 'compasscontroller' or prefix == 'compass_controller':
data = self.cc.parse_terminal_command(suffix)
elif prefix == 'bc' or prefix == 'bluetoothcontroller' or prefix == 'bluetooth_controller':
for cmd in parameters:
if cmd == 'in_port':
data += str(self.server_in_port) + ','
elif cmd == 'out_port':
data += str(self.server_out_port) + ','
data = data[:-1] + ';'
if type == 'get':
self.client_sock_out.send(data)
elif type == 'print':
print ' ', data
elif split[0] == 'thread':
self.parse_thread_command(split[1])
else:
if command == 'c':
os.system(self.clear)
self.print_menu()
elif command == 'h':
if self.hide_menu:
self.hide_menu = False
else:
self.hide_menu = True
self.parse_terminal_command('c')
elif command == 'r':
self.return_to_main_menu = True
elif command == 'q':
exit(0)
elif type == 'thread':
self.parse_thread_command(split[1])
def print_menu(self):
if self.hide_menu: return
bar = colored('|', 'magenta')
print colored(' {:_^54}'.format(''), 'magenta')
print ' {:1}{:^61}{:1}'.format(bar, colored('SERVER TERMINAL', 'white'), bar)
print colored(' {}{:_^52}{}'.format('|', '', '|'), 'magenta')
print ' {}{:^61}{}'.format(bar, colored('CONNECTION INFORMATION', 'white'), bar)
print ' {} {:68} {}'.format(colored('|', 'magenta'), colored('BLUETOOTH SERVER CONNECTED: {}'.format(
self.is_connected()), 'white'), bar)
print ' {} {:68} {}'.format(bar,
colored('BLUETOOTH SERVER LISTENING: {}'.format(self.thread_status()), 'white'),
bar)
print ' {} {:33} {:34} {}'.format(bar, colored('SERVER ADDRESS: {}'.format(self.server_address), 'white'),
colored('BACKLOG: {}'.format(self.server_backlog), 'white'), bar)
print ' {} {:33} {:34} {}'.format(bar, colored('PORT (IN): {}'.format(self.server_in_port), 'white'),
colored('PORT (OUT): {}'.format(self.server_out_port), 'white'), bar)
print colored(' {}{:_^52}{}'.format('|', '', '|'), 'magenta')
def terminal(self):
os.system(self.clear)
sys.stdout.write("\x1b]2;Bluetooth Controller Terminal\x07")
self.print_menu()
while not self.return_to_main_menu:
cmd = raw_input(colored(' Enter a command: ', 'cyan'))
self.parse_terminal_command(cmd)
self.return_to_main_menu = False
return
if __name__ == "__main__":
bc = BluetoothController()
bc.terminal()
| MJWherry/Greggg-Python | Bluetooth/BluetoothController.py | Python | mit | 13,969 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: RaNaN
"""
from os import remove, stat, fsync
from os.path import exists
from time import sleep
from re import search
import logging
import codecs
import pycurl
from misc.utilsmisc import remove_chars
from misc.utilsfs import fs_encode
from httprequest import HTTPRequest
class WrongFormat(Exception):
pass
class ChunkInfo():
def __init__(self, name):
self.name = unicode(name)
self.size = 0
self.resume = False
self.chunks = []
def __repr__(self):
ret = "ChunkInfo: %s, %s\n" % (self.name, self.size)
for i, c in enumerate(self.chunks):
ret += "%s# %s\n" % (i, c[1])
return ret
def setSize(self, size):
self.size = int(size)
def addChunk(self, name, range):
self.chunks.append((name, range))
def clear(self):
self.chunks = []
def createChunks(self, chunks):
self.clear()
chunk_size = self.size / chunks
current = 0
for i in range(chunks):
end = self.size - 1 if (i == chunks - 1) else current + chunk_size
self.addChunk("%s.chunk%s" % (self.name, i), (current, end))
current += chunk_size + 1
def save(self):
fs_name = fs_encode("%s.chunks" % self.name)
fh = codecs.open(fs_name, "w", "utf_8")
fh.write("name:%s\n" % self.name)
fh.write("size:%s\n" % self.size)
for i, c in enumerate(self.chunks):
fh.write("#%d:\n" % i)
fh.write("\tname:%s\n" % c[0])
fh.write("\trange:%i-%i\n" % c[1])
fh.close()
@staticmethod
def load(name):
fs_name = fs_encode("%s.chunks" % name)
if not exists(fs_name):
raise IOError()
fh = codecs.open(fs_name, "r", "utf_8")
name = fh.readline()[:-1]
size = fh.readline()[:-1]
if name.startswith("name:") and size.startswith("size:"):
name = name[5:]
size = size[5:]
else:
fh.close()
raise WrongFormat()
ci = ChunkInfo(name)
ci.loaded = True
ci.setSize(size)
while True:
if not fh.readline(): #skip line
break
name = fh.readline()[1:-1]
range = fh.readline()[1:-1]
if name.startswith("name:") and range.startswith("range:"):
name = name[5:]
range = range[6:].split("-")
else:
raise WrongFormat()
ci.addChunk(name, (long(range[0]), long(range[1])))
fh.close()
return ci
def remove(self):
fs_name = fs_encode("%s.chunks" % self.name)
if exists(fs_name): remove(fs_name)
def getCount(self):
return len(self.chunks)
def getChunkName(self, index):
return self.chunks[index][0]
def getChunkRange(self, index):
return self.chunks[index][1]
class HTTPChunk(HTTPRequest):
def __init__(self, id, parent, range=None, resume=False):
self.id = id
self.p = parent # HTTPDownload instance
self.range = range # tuple (start, end)
self.resume = resume
self.log = parent.log
self.size = range[1] - range[0] if range else -1
self.arrived = 0
self.lastURL = self.p.referer
self.c = pycurl.Curl()
self.httpheader = ""
self.headerParsed = False #indicates if the header has been processed
self.fp = None #file handle
self.initHandle()
self.setInterface(self.p.options)
self.BOMChecked = False # check and remove byte order mark
self.rep = None
self.sleep = 0.000
self.lastSize = 0
def __repr__(self):
return "<HTTPChunk id=%d, size=%d, arrived=%d>" % (self.id, self.size, self.arrived)
@property
def cj(self):
return self.p.cj
def getHandle(self):
""" returns a Curl handle ready to use for perform/multiperform """
self.setRequestContext(self.p.url, self.p.get, self.p.post, self.p.referer, self.p.cj)
self.c.setopt(pycurl.WRITEFUNCTION, self.writeBody)
self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)
# request all bytes, since some servers in russia seems to have a defect arihmetic unit
fs_name = fs_encode(self.p.info.getChunkName(self.id))
if self.resume:
self.fp = open(fs_name, "ab")
self.arrived = self.fp.tell()
if not self.arrived:
self.arrived = stat(fs_name).st_size
if self.range:
#do nothing if chunk already finished
if self.arrived + self.range[0] >= self.range[1]: return None
if self.id == len(self.p.info.chunks) - 1: #as last chunk dont set end range, so we get everything
range = "%i-" % (self.arrived + self.range[0])
else:
range = "%i-%i" % (self.arrived + self.range[0], min(self.range[1] + 1, self.p.size - 1))
logging.debug("Chunked resume with range %s" % range)
self.c.setopt(pycurl.RANGE, range)
else:
logging.debug("Resume File from %i" % self.arrived)
self.c.setopt(pycurl.RESUME_FROM, self.arrived)
else:
if self.range:
if self.id == len(self.p.info.chunks) - 1: # see above
range = "%i-" % self.range[0]
else:
range = "%i-%i" % (self.range[0], min(self.range[1] + 1, self.p.size - 1))
logging.debug("Chunked with range %s" % range)
self.c.setopt(pycurl.RANGE, range)
self.fp = open(fs_name, "wb")
return self.c
def writeHeader(self, buf):
self.httpheader += buf
#@TODO forward headers?, this is possibly unneeded, when we just parse valid 200 headers
# as first chunk, we will parse the headers
if not self.range and self.httpheader.endswith("\r\n\r\n"):
self.parseHeader()
elif not self.range and buf.startswith("150") and "data connection" in buf: #ftp file size parsing
size = search(r"(\d+) bytes", buf)
if size:
self.p.size = int(size.group(1))
self.p.chunkSupport = True
self.headerParsed = True
def writeBody(self, buf):
#ignore BOM, it confuses unrar
if not self.BOMChecked:
if [ord(b) for b in buf[:3]] == [239, 187, 191]:
buf = buf[3:]
self.BOMChecked = True
size = len(buf)
self.arrived += size
self.fp.write(buf)
if self.p.bucket:
sleep(self.p.bucket.consumed(size))
else:
# Avoid small buffers, increasing sleep time slowly if buffer size gets smaller
# otherwise reduce sleep time percentile (values are based on tests)
# So in general cpu time is saved without reducing bandwidth too much
if size < self.lastSize:
self.sleep += 0.002
else:
self.sleep *= 0.7
self.lastSize = size
sleep(self.sleep)
if self.range and self.arrived > self.size:
return 0 #close if we have enough data
def parseHeader(self):
"""parse data from received header"""
for orgline in self.decodeResponse(self.httpheader).splitlines():
line = orgline.strip().lower()
if line.startswith("accept-ranges") and "bytes" in line:
self.p.chunkSupport = True
if "content-disposition" in line:
m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", line)
if m:
name = remove_chars(m.groupdict()['name'], "\"';/").strip()
self.p._name = name
logging.debug("Content-Disposition: %s" % name)
if not self.resume and line.startswith("content-length"):
self.p.size = int(line.split(":")[1])
self.headerParsed = True
def stop(self):
"""The download will not proceed after next call of writeBody"""
self.range = [0,0]
self.size = 0
def resetRange(self):
""" Reset the range, so the download will load all data available """
self.range = None
def setRange(self, range):
self.range = range
self.size = range[1] - range[0]
def flushFile(self):
""" flush and close file """
self.fp.flush()
fsync(self.fp.fileno()) #make sure everything was written to disk
self.fp.close() #needs to be closed, or merging chunks will fail
def close(self):
""" closes everything, unusable after this """
if self.fp: self.fp.close()
self.c.close()
if hasattr(self, "p"): del self.p
| rfancn/myprojects | spload/network/httpchunk.py | Python | mit | 9,633 |
"constants used for testing. Tightly coupled to settings.py"
# {error_count:set_of_numerals} for Figures representing Numeral 0 to 9
valid_figure_superpositions = (
{0: {'0'}, 1: {'8'}, 2: {'6', '9'}, 3: {'2', '3', '5', '7'}, 4: {'1', '4'}},
{0: {'1'}, 1: {'7'}, 2: {'4'}, 3: {'3'}, 4: {'0', '9'}, 5: {'2', '5', '8'}, 6: {'6'}},
{0: {'2'}, 2: {'3', '8'}, 3: {'0', '6', '9'}, 4: {'5', '7'}, 5: {'1', '4'}},
{0: {'3'}, 1: {'9'}, 2: {'2', '5', '7', '8'}, 3: {'0', '1', '4', '6'}},
{0: {'4'}, 2: {'1', '9'}, 3: {'3', '5', '7', '8'}, 4: {'0', '6'}, 5: {'2'}},
{0: {'5'}, 1: {'6', '9'}, 2: {'3', '8'}, 3: {'0', '4'}, 4: {'2', '7'}, 5: {'1'}},
{0: {'6'}, 1: {'5', '8'}, 2: {'0', '9'}, 3: {'2', '3'}, 4: {'4'}, 5: {'7'}, 6: {'1'}},
{0: {'7'}, 1: {'1'}, 2: {'3'}, 3: {'0', '4', '9'}, 4: {'2', '5', '8'}, 5: {'6'}},
{0: {'8'}, 1: {'0', '6', '9'}, 2: {'2', '3', '5'}, 3: {'4'}, 4: {'7'}, 5: {'1'}},
{0: {'9'}, 1: {'3', '5', '8'}, 2: {'0', '4', '6'}, 3: {'2', '7'}, 4: {'1'}},
)
# account, account_from_superpositions, result
example_accounts = (
('123456789', '123456789', '123456789'),
('111111111', '711111111', '711111111'),
('777777777', '777777177', '777777177'),
('200000000', '200800000', '200800000'),
('333333333', '333393333', '333393333'),
('555555555', '555555555', '555555555 AMB'),
('666666666', '666666666', '666666666 AMB'),
('888888888', '888888888', '888888888 AMB'),
('999999999', '999999999', '999999999 AMB'),
('490067715', '490067715', '490067715 AMB'),
)
# flawed_figure, superposition
flawed_figures = (
(' ' +
' _|' +
' |' +
' ', {1: {'1', '4'}, 2: {'3', '7'}, 3: {'9'}, 4: {'2', '5', '8'}, 5: {'0', '6'}}),
(' ' +
'| |' +
'|_|' +
' ', {1: {'0'}, 2: {'8'}, 3: {'1', '4', '6', '9'}, 4: {'2', '3', '5', '7'}}),
(' _ ' +
' _ ' +
' _|' +
' ', {1: {'3', '5'}, 2: {'6', '9'}, 3: {'2', '7', '8'}, 4: {'0', '1', '4'}}),
)
# account_prefix, flawed_figure_index, account_suffix, account, result
flawed_accounts = (
('', 0, '23456789', '123456789', '123456789'),
('0', 1, '0000051', '000000051', '000000051'),
('49086771', 2, '', '490867715', '490867715'),
('1', 0, '3456789', '1?3456789', '1?3456789 AMB'),
)
class BasicInputFile:
"details regarding the Basic input file"
file_name = 'basic.txt'
accounts = (
'000000000',
'111111111',
'222222222',
'333333333',
'444444444',
'555555555',
'666666666',
'777777777',
'888888888',
'999999999',
'123456789',
)
results = (
'000000000',
'711111111',
'222222222 AMB',
'333393333',
'444444444 AMB',
'555555555 AMB',
'666666666 AMB',
'777777177',
'888888888 AMB',
'999999999 AMB',
'123456789',
)
class AdvancedInputFile:
"details regarding the Advanced input file"
file_name = 'advanced.txt'
results = (
'000000051',
'49006771? AMB',
'123456789',
'200800000',
'490067715 AMB',
'123456789',
'000000051',
'490867715',
)
| gJigsaw/KataBankOCR | parse/test/fixture_constants.py | Python | gpl-2.0 | 3,242 |
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
# csv format is image_number, bits_per_pixel, mem_per_pixel, PSNR, MS-SSIM, MSE
data_folder = os.path.expanduser("~")+"/CAE_Project/CAEs/data/"
balle_file = data_folder+"r_d_Balle.csv"
jpeg_file = data_folder+"r_d_JPEG.csv"
jpeg2k_file = data_folder+"r_d_JPEG2k.csv"
proposed_file0 = data_folder+"r_d_proposed_3072_max_compress_pcm.csv"
proposed_file1 = data_folder+"r_d_proposed_7680_med_compress_pcm.csv"
proposed_file2 = data_folder+"r_d_proposed_32768_min_compress_pcm.csv"
proposed_file3 = data_folder+"r_d_proposed_3072_max_compress_gauss.csv"
proposed_file4 = data_folder+"r_d_proposed_7680_med_compress_gauss.csv"
proposed_file5 = data_folder+"r_d_proposed_32768_min_compress_gauss.csv"
proposed_file6 = data_folder+"r_d_proposed_3072_max_compress_pcm_relu.csv"
proposed_file7 = data_folder+"r_d_proposed_7680_med_compress_pcm_relu.csv"
fig_path = data_folder+"proposed_pcm_balle_r_d_curve.pdf"
jpeg2k_r_d_list = []
with open(jpeg2k_file, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row_idx, row in enumerate(reader):
if row_idx > 0: # first row is header
jpeg2k_r_d_list.append([float(val) for val in row])
jpeg2k_array = np.array(jpeg2k_r_d_list)
balle_r_d_list = []
with open(balle_file, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row_idx, row in enumerate(reader):
if row_idx > 0: # first row is header
balle_r_d_list.append([float(val) for val in row])
balle_array = np.array(balle_r_d_list)
proposed_r_d_list = []
with open(proposed_file0, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row_idx, row in enumerate(reader):
if row_idx > 0: # first row is header
proposed_r_d_list.append([float(val) if val != "NA" else 0 for val in row])
proposed_array0 = np.array(proposed_r_d_list)
proposed_r_d_list = []
with open(proposed_file1, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row_idx, row in enumerate(reader):
if row_idx > 0: # first row is header
proposed_r_d_list.append([float(val) if val != "NA" else 0 for val in row])
proposed_array1 = np.array(proposed_r_d_list)
proposed_r_d_list = []
with open(proposed_file2, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row_idx, row in enumerate(reader):
if row_idx > 0: # first row is header
proposed_r_d_list.append([float(val) if val != "NA" else 0 for val in row])
proposed_array2 = np.array(proposed_r_d_list)
fig = plt.figure()
plt.scatter(jpeg2k_array[:,2], jpeg2k_array[:,5], s=18, c="g", edgecolors="none", alpha=0.25)
plt.scatter(balle_array[:,2], balle_array[:,5], s=32, c="b", edgecolors="none", alpha=0.15)
plt.scatter(proposed_array0[:,2], proposed_array0[:,5], s=19, c="r", edgecolors="none", alpha=0.25)
plt.scatter(proposed_array1[:,2], proposed_array1[:,5], s=19, c="k", edgecolors="none", alpha=0.25)
plt.scatter(proposed_array2[:,2], proposed_array2[:,5], s=19, c="c", edgecolors="none", alpha=0.25)
plt.ylabel("MSE")
plt.xlabel("Memristors Per Pixel")
plt.ylim([0, 450])
plt.xlim([0, 1.0])
plt.legend(["JPEG2k", "Balle", "Proposed_dn_pcm_max", "Proposed_dn_pcm_med", "Proposed_dn_pcm_min"])
#plt.legend(["JPEG2k", "Balle", "Proposed"])
#plt.legend(["JPEG2k", "Balle", "Proposed"])
fig.savefig(fig_path)
plt.close(fig)
| rzarcone/CAEs | utils/r_d_curves.py | Python | bsd-2-clause | 3,358 |
from viperid import app
import unittest
class ViperidTestCase(unittest.TestCase):
contract_1 = {
'code': 'import os\ndef foo(x: num) -> num:\n return x * 2'
}
error = {'error': {'line_no': 1, 'message': 'Invalid top-level statement', 'source_code': ['import os', 'def foo(x: num) -> num:', ' return x * 2'], 'text': ''}}
def setUp(self):
app.testing = True
def test_compile_to_abi(self):
with app.test_client() as c:
rv = c.post('/abi/', json=self.contract_1)
assert rv.status_code == 400
assert rv.is_json
assert rv.get_json() == self.error
def test_compile_to_ir(self):
with app.test_client() as c:
rv = c.post('/ir/', json=self.contract_1)
assert rv.status_code == 400
assert rv.is_json
assert rv.get_json() == self.error
def test_compile_to_bytecode(self):
with app.test_client() as c:
rv = c.post('/bytecode/', json=self.contract_1)
assert rv.status_code == 400
assert rv.is_json
assert rv.get_json() == self.error
if __name__ == '__main__':
unittest.main() | yograterol/viperid | backend/tests/tests_invalid_viperid.py | Python | mit | 1,191 |
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HierarchicUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=75, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('show_email', models.BooleanField(default=False, verbose_name='afficher l\u2019email')),
('website', models.URLField(verbose_name='site internet', blank=True)),
('website_verbose', models.CharField(max_length=50, verbose_name='nom affich\xe9 du site internet', blank=True)),
('legal_person', models.BooleanField(default=False, help_text='Cochez si vous \xeates une institution ou un ensemble.', verbose_name='personne morale')),
('object_id', models.PositiveIntegerField(null=True, verbose_name='identifiant de l\u2019autorit\xe9 associ\xe9e', blank=True)),
('willing_to_be_mentor', models.BooleanField(default=False, verbose_name='Veut \xeatre mentor')),
('avatar', models.ImageField(upload_to='avatars/', null=True, verbose_name='avatar', blank=True)),
('presentation', models.TextField(blank=True, verbose_name='pr\xe9sentation', validators=[django.core.validators.MaxLengthValidator(5000)])),
('fonctions', models.TextField(blank=True, verbose_name='fonctions au sein de l\u2019\xe9quipe', validators=[django.core.validators.MaxLengthValidator(200)])),
('literature', models.TextField(verbose_name='publications', blank=True)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('content_type', models.ForeignKey(verbose_name='type d\u2019autorit\xe9 associ\xe9e', blank=True, to='contenttypes.ContentType', null=True, on_delete=django.db.models.CASCADE)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('mentor', models.ForeignKey(related_name='disciples', verbose_name='mentor', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=django.db.models.CASCADE)),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'ordering': ('last_name', 'first_name'),
'verbose_name': 'utilisateur',
'verbose_name_plural': 'utilisateurs',
},
bases=(models.Model,),
),
]
| dezede/dezede | accounts/migrations/0001_initial.py | Python | bsd-3-clause | 4,770 |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KNOWN_GUIDS = {
'{00000000-0000-0000-C000-000000000046}': 'IUnknown',
'{00020400-0000-0000-C000-000000000046}': 'IDispatch',
}
KNOWN_TYPE_VARIANTS = {
0: 'Network Computing System',
2: 'Standard',
6: 'MS COM',
7: 'Reserved',
};
# https://secure.wikimedia.org/wikipedia/en/wiki/Globally_unique_identifier
def struct_GUID(stream, offset, max_size, parent, name):
import C;
result = C.STRUCT(stream, offset, max_size, parent, name, 'GUID', \
('Data1', C.DWORD),
('Data2', C.WORD),
('Data3', C.WORD),
('Data4', {C.ARRAY: (8, C.BYTE)}),
);
result.dump_simplified = True;
result.string_value = '{%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}' % \
(result._Data1.value, result._Data2.value, result._Data3.value,
result._Data4._values[0], result._Data4._values[1],
result._Data4._values[2], result._Data4._values[3],
result._Data4._values[4], result._Data4._values[5],
result._Data4._values[6], result._Data4._values[7]);
result.notes.append(result.string_value);
if result.string_value in KNOWN_GUIDS:
result.notes.append('(%s)' % KNOWN_GUIDS[result.string_value]);
type_variant = result._Data4._values[1] >> 5; # Upper 3 bits
if type_variant in KNOWN_TYPE_VARIANTS:
result.notes.append('type=%s' % KNOWN_TYPE_VARIANTS[type_variant]);
else:
result.warnings.append(
'Unknown type variant in Data4 byte 2 upper 3 bits: %d' % type_variant);
version = result._Data3.value >> 12; # Upper 4 bits.
if version == 0:
pass; # Nothing specified; ignored.
elif version in [1,4]:
result.notes.append('version=%d' % version);
else:
result.warnings.append(
'Unknown version in Data3 upper 4 bits: %d (expected 0, 1 or 4)' % \
version);
return result; | SkyLined/headsup | decode/struct_GUID.py | Python | apache-2.0 | 2,438 |
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):10809")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
| onnz423/Scorecoin | contrib/seeds/makeseeds.py | Python | mit | 709 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Logs
def filter_comments(filename):
txt=Utils.readf(filename)
i=0
buf=[]
max=len(txt)
begin=0
while i<max:
c=txt[i]
if c=='"'or c=="'":
buf.append(txt[begin:i])
delim=c
i+=1
while i<max:
c=txt[i]
if c==delim:break
elif c=='\\':
i+=1
i+=1
i+=1
begin=i
elif c=='/':
buf.append(txt[begin:i])
i+=1
if i==max:break
c=txt[i]
if c=='+':
i+=1
nesting=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='/'and c=='+':
nesting+=1
c=None
elif prev=='+'and c=='/':
nesting-=1
if nesting==0:break
c=None
i+=1
elif c=='*':
i+=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='*'and c=='/':break
i+=1
elif c=='/':
i+=1
while i<max and txt[i]!='\n':
i+=1
else:
begin=i-1
continue
i+=1
begin=i
buf.append(' ')
else:
i+=1
buf.append(txt[begin:])
return buf
class d_parser(object):
def __init__(self,env,incpaths):
self.allnames=[]
self.re_module=re.compile("module\s+([^;]+)")
self.re_import=re.compile("import\s+([^;]+)")
self.re_import_bindings=re.compile("([^:]+):(.*)")
self.re_import_alias=re.compile("[^=]+=(.+)")
self.env=env
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def tryfind(self,filename):
found=0
for n in self.incpaths:
found=n.find_resource(filename.replace('.','/')+'.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self,code):
self.module=''
lst=[]
mod_name=self.re_module.search(code)
if mod_name:
self.module=re.sub('\s+','',mod_name.group(1))
import_iterator=self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str=re.sub('\s+','',import_match.group(1))
bindings_match=self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str=bindings_match.group(1)
matches=import_match_str.split(',')
for match in matches:
alias_match=self.re_import_alias.match(match)
if alias_match:
match=alias_match.group(1)
lst.append(match)
return lst
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
code="".join(filter_comments(path))
names=self.get_strings(code)
for x in names:
if x in self.allnames:continue
self.allnames.append(x)
self.tryfind(x)
def scan(self):
env=self.env
gruik=d_parser(env,self.generator.includes_nodes)
node=self.inputs[0]
gruik.start(node)
nodes=gruik.nodes
names=gruik.names
if Logs.verbose:
Logs.debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
| bit-trade-one/SoundModuleAP | lib-src/lv2/lv2/waflib/Tools/d_scan.py | Python | gpl-2.0 | 3,162 |
from setuptools import setup, find_packages
import os
import poser
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
setup(
author="Elias Showk @ CommOnEcoute SAS",
author_email="elias.showk@commonecoute.fr",
name='django-poser',
version=poser.__version__,
description='A publication manager of REST resources',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
url='https://github.com/elishowk/django-poser',
license='GNU Affero GPL v3',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.3.5,<1.5',
'south>=0.7.2',
'django-guardian',
],
tests_require=[
'Pillow==1.7.7',
'Sphinx==1.1.3',
'Jinja2==2.6',
'Pygments==1.5',
],
packages=find_packages(exclude = ["project", "project.*"]),
include_package_data=True,
zip_safe = False,
test_suite = 'runtests.main',
)
| elishowk/django-poser | setup.py | Python | agpl-3.0 | 1,378 |
# The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.mfd.evenly_discretized` defines an evenly
discretized MFD.
"""
from openquake.hazardlib.mfd.base import BaseMFD
from openquake.baselib.slots import with_slots
@with_slots
class EvenlyDiscretizedMFD(BaseMFD):
"""
Evenly discretized MFD is defined as a precalculated histogram.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
MODIFICATIONS = set(('set_mfd',))
_slots_ = 'min_mag bin_width occurrence_rates'.split()
def __init__(self, min_mag, bin_width, occurrence_rates):
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
def check_constraints(self):
"""
Checks the following constraints:
* Bin width is positive.
* Occurrence rates list is not empty.
* Each number in occurrence rates list is non-negative.
* Minimum magnitude is positive.
"""
if not self.bin_width > 0:
raise ValueError('bin width must be positive')
if not self.occurrence_rates:
raise ValueError('at least one bin must be specified')
if not all(value >= 0 for value in self.occurrence_rates):
raise ValueError('all occurrence rates must not be negative')
if not any(value > 0 for value in self.occurrence_rates):
raise ValueError('at least one occurrence rate must be positive')
if not self.min_mag >= 0:
raise ValueError('minimum magnitude must be non-negative')
def get_annual_occurrence_rates(self):
"""
Returns the predefined annual occurrence rates.
"""
return [(self.min_mag + i * self.bin_width, occurrence_rate)
for i, occurrence_rate in enumerate(self.occurrence_rates)]
def get_min_max_mag(self):
"""
Returns the minumun and maximum magnitudes
"""
return self.min_mag, self.min_mag + self. bin_width * (
len(self.occurrence_rates) - 1)
def modify_set_mfd(self, min_mag, bin_width, occurrence_rates):
"""
Applies absolute modification of the MFD from the ``min_mag``,
``bin_width`` and ``occurrence_rates`` modification.
:param min_mag:
Positive float value representing the middle point of the first
bin in the histogram.
:param bin_width:
A positive float value -- the width of a single histogram bin.
:param occurrence_rates:
The list of non-negative float values representing the actual
annual occurrence rates. The resulting histogram has as many bins
as this list length.
"""
self.min_mag = min_mag
self.bin_width = bin_width
self.occurrence_rates = occurrence_rates
self.check_constraints()
| silviacanessa/oq-hazardlib | openquake/hazardlib/mfd/evenly_discretized.py | Python | agpl-3.0 | 3,967 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""""""
from colony.entities.actingentity import ActingEntity
from colony.entities.attributes import Limbs
# from .references import get_interval
from colony.references import interval
__title__ = "MovingEntity"
__author__ = "DeflatedPickle"
__version__ = "1.0.3"
class MovingEntity(ActingEntity, Limbs):
"""Creates an entity capable of movement."""
def __init__(self, parent, x: int = 0, y: int = 0, entity_type: str = "moving entity"):
ActingEntity.__init__(self, parent, x, y, entity_type)
Limbs.__init__(self)
self.parent = parent
# TODO: Actually use the colonist speed.
self.move_speed = 2
# TODO: Add body parts that will have effects if lost.
self.moving = self.move_until
self.after_actions.append(self.moving)
def move_entity(self, x, y):
"""Moves the entity."""
self.parent.game_area.move(self.entity, x, y)
self.parent.game_area.move(self.entity_name, x, y)
self.parent.game_area.move(self.entity_health, x, y)
self.set_coordinates(self.find_coordinates_own()[0], self.find_coordinates_own()[1])
def move_to(self, x, y, because):
self.stop_actions()
entity_location = self.parent.game_area.coords(self.entity)
move_x = (x - entity_location[0])
direction_x = True # Forwards
if move_x < 0:
move_x = abs(move_x)
direction_x = False # Backwards
move_y = (y - entity_location[1])
direction_y = True # Down
if move_y < 0:
move_y = abs(move_y)
direction_y = False # Up
self.action = because
self.move_until(x, y, move_x, move_y, direction_x, direction_y)
def move_until(self, prev_x, prev_y, x, y, direction_x, direction_y):
self.reached_destination = False
try:
if self.find_coordinates_own()[0] != prev_x:
# print("X: {}\nPrev X: {}".format(x, prev_x))
if x < prev_x and direction_x:
# print("Moved right.")
self.move_entity(1, 0)
x -= 1
elif x < prev_x and not direction_x:
# print("Moved left.")
self.move_entity(-1, 0)
x -= 1
except IndexError:
pass
try:
if self.find_coordinates_own()[1] != prev_y:
# print("Y: {}\nPrev Y: {}".format(y, prev_y))
if y < prev_y and direction_y:
# print("Moved down.")
self.move_entity(0, 1)
y -= 1
elif y < prev_y and not direction_y:
# print("Moved up.")
self.move_entity(0, -1)
y -= 1
except IndexError:
pass
self.decrease_energy(0.02)
if self.find_coordinates_own() == [prev_x, prev_y]:
# print("Stopped!")
self.parent.parent.after_cancel(self.moving)
if self.action == "going to work":
self.action = "working"
else:
self.action = "standing around"
self.reached_destination = True
else:
# print("Not the same!")
self.after_actions.remove(self.moving)
self.moving = self.parent.parent.after(interval.get_interval(), lambda: self.move_until(prev_x, prev_y, x, y, direction_x, direction_y))
self.after_actions.append(self.moving)
| DeflatedPickle/Colony | colony/entities/movingentity.py | Python | mit | 3,590 |
"""
Convert a QuinCe CSV output file to netCDF
"""
import argparse
import os.path
import yaml
import pandas as pd
from datetime import datetime
from netCDF4 import Dataset
from cftime import date2num
def make_netcdf(config, in_file):
"""
Main program function
"""
out_file = f"{os.path.splitext(in_file)[0]}.nc"
quince = pd.read_csv(in_file, parse_dates=["Date/Time"], low_memory=False)
with Dataset(out_file, "w", format="NETCDF4") as nc:
# Set global attributes from config
for attr in config["global_attributes"]:
nc.setncattr(attr, config["global_attributes"][attr])
nc.setncattr("creation_date", datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'))
nc.createDimension("time")
timevar = nc.createVariable("time", "f8", ("time",))
timevar.units = "days since 1970-01-01 00:00:00.0"
timevar[:] = date2num(quince["Date/Time"], timevar.units)
for column in config["columns"]:
column_info = config["columns"][column]
var = nc.createVariable(column_info["variable"], column_info["type"], ("time",))
var.units = column_info["units"]
var[:] = quince[column]
def parse_config(config_file):
"""
Check the supplied configuration file
:param config_file: The path to the config file
:return: true/false if the config file is good/bad
"""
with open(config_file, "r") as stream:
return yaml.safe_load(stream)
def check_input_file(path):
if not os.path.exists(path):
print("Specified input file does not exist")
return False
try:
with open(path, "r") as csv:
return True
except OSError as ex:
print(f"Failed to open input file: {ex}")
return False
# Command line processor
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert a QuinCe output file to netCDF")
parser.add_argument("config_file", help="Configuration file")
parser.add_argument("csv_file", help="QuinCe CSV File")
args = parser.parse_args()
try:
parsed_config = parse_config(args.config_file)
except Exception as e:
print(f"Failed to parse configuration file: {e}")
exit()
if not check_input_file(args.csv_file):
exit()
make_netcdf(parsed_config, args.csv_file)
| BjerknesClimateDataCentre/QuinCe | DataPreparation/HaraldSodemann/netCDFConverter/make_netcdf.py | Python | gpl-3.0 | 2,368 |
# Copyright (c) 2011 by Zocolab <pablo@zocolab.es>
#
# This software is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
| pablorecio/django-qrcode | qrcode/utils.py | Python | lgpl-3.0 | 712 |
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
import re
def get_version():
with open('../pykafka/__init__.py') as version_file:
return re.search(r"""__version__\s+=\s+(['"])(?P<version>.+?)\1""",
version_file.read()).group('version')
project = u'pykafka'
copyright = u'2015, Parse.ly'
version = release = get_version()
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
exclude_patterns = ['_build']
html_static_path = ['_static']
source_suffix = '.rst'
master_doc = 'index'
html_theme = 'sphinx_rtd_theme'
pygments_style = 'sphinx'
htmlhelp_basename = 'pykafkadoc'
autodoc_default_flags = ['special-members', 'private-members', 'show-inheritance']
| jofusa/pykafka | doc/conf.py | Python | apache-2.0 | 1,267 |
# -*- coding: utf-8 -*-
'''
Wheel system wrapper for key system
'''
from __future__ import absolute_import
# Import python libs
import os
import hashlib
# Import salt libs
import salt.key
import salt.crypt
__func_alias__ = {
'list_': 'list'
}
def list_(match):
'''
List all the keys under a named status
'''
skey = salt.key.Key(__opts__)
return skey.list_status(match)
def list_all():
'''
List all the keys
'''
skey = salt.key.Key(__opts__)
return skey.all_keys()
def accept(match, include_rejected=False, include_denied=False):
'''
Accept keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.accept(match, include_rejected=include_rejected, include_denied=include_denied)
def accept_dict(match):
'''
Accept keys based on a dict of keys
Example to move a list of keys from the `minions_pre` (pending) directory
to the `minions` (accepted) directory:
.. code-block:: python
{
'minions_pre': [
'jerry',
'stuart',
'bob',
],
}
'''
skey = salt.key.Key(__opts__)
return skey.accept(match_dict=match)
def delete(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match)
def delete_dict(match):
'''
Delete keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.delete_key(match_dict=match)
def reject(match, include_accepted=False, include_denied=False):
'''
Reject keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.reject(match, include_accepted=include_accepted, include_denied=include_denied)
def reject_dict(match):
'''
Reject keys based on a dict of keys
'''
skey = salt.key.Key(__opts__)
return skey.reject(match_dict=match)
def key_str(match):
'''
Return the key strings
'''
skey = salt.key.Key(__opts__)
return skey.key_str(match)
def finger(match):
'''
Return the matching key fingerprints
'''
skey = salt.key.Key(__opts__)
return skey.finger(match)
def gen(id_=None, keysize=2048):
'''
Generate a key pair. No keys are stored on the master, a keypair is
returned as a dict containing pub and priv keys
'''
if id_ is None:
id_ = hashlib.sha512(os.urandom(32)).hexdigest()
ret = {'priv': '',
'pub': ''}
priv = salt.crypt.gen_keys(__opts__['pki_dir'], id_, keysize)
pub = '{0}.pub'.format(priv[:priv.rindex('.')])
with salt.utils.fopen(priv) as fp_:
ret['priv'] = fp_.read()
with salt.utils.fopen(pub) as fp_:
ret['pub'] = fp_.read()
os.remove(priv)
os.remove(pub)
return ret
def gen_accept(id_, keysize=2048, force=False):
'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master.
'''
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__['pki_dir'], 'minions', id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.fopen(acc_path, 'w+') as fp_:
fp_.write(ret['pub'])
return ret
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/wheel/key.py | Python | apache-2.0 | 3,278 |
#!/usr/bin/env python3
import random
import unittest
from panda import pack_can_buffer, unpack_can_buffer
class PandaTestPackUnpack(unittest.TestCase):
def test_panda_lib_pack_unpack(self):
to_pack = []
for _ in range(10000):
address = random.randint(1, 0x1FFFFFFF)
data = bytes([random.getrandbits(8) for _ in range(random.randrange(1, 9))])
to_pack.append((address, 0, data, 0))
packed = pack_can_buffer(to_pack)
unpacked = []
for dat in packed:
unpacked.extend(unpack_can_buffer(dat))
assert unpacked == to_pack
if __name__ == "__main__":
unittest.main()
| commaai/panda | tests/usbprotocol/test_pandalib.py | Python | mit | 615 |
"""HTML form handling for web clients.
ClientForm is a Python module for handling HTML forms on the client
side, useful for parsing HTML forms, filling them in and returning the
completed forms to the server. It has developed from a port of Gisle
Aas' Perl module HTML::Form, from the libwww-perl library, but the
interface is not the same.
The most useful docstring is the one for HTMLForm.
RFC 1866: HTML 2.0
RFC 1867: Form-based File Upload in HTML
RFC 2388: Returning Values from Forms: multipart/form-data
HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX)
HTML 4.01 Specification, W3C Recommendation 24 December 1999
Copyright 2002-2006 John J. Lee <jjl@pobox.com>
Copyright 2005 Gary Poster
Copyright 2005 Zope Corporation
Copyright 1998-2000 Gisle Aas.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
"""
# XXX
# Remove unescape_attr method
# Remove parser testing hack
# safeUrl()-ize action
# Really should to merge CC, CF, pp and mechanize as soon as mechanize
# goes to beta...
# Add url attribute to ParseError
# Switch to unicode throughout (would be 0.3.x)
# See Wichert Akkerman's 2004-01-22 message to c.l.py.
# Add charset parameter to Content-type headers? How to find value??
# Add some more functional tests
# Especially single and multiple file upload on the internet.
# Does file upload work when name is missing? Sourceforge tracker form
# doesn't like it. Check standards, and test with Apache. Test
# binary upload with Apache.
# Controls can have name=None (e.g. forms constructed partly with
# JavaScript), but find_control can't be told to find a control
# with that name, because None there means 'unspecified'. Can still
# get at by nr, but would be nice to be able to specify something
# equivalent to name=None, too.
# mailto submission & enctype text/plain
# I'm not going to fix this unless somebody tells me what real servers
# that want this encoding actually expect: If enctype is
# application/x-www-form-urlencoded and there's a FILE control present.
# Strictly, it should be 'name=data' (see HTML 4.01 spec., section
# 17.13.2), but I send "name=" ATM. What about multiple file upload??
# Would be nice, but I'm not going to do it myself:
# -------------------------------------------------
# Maybe a 0.4.x?
# Replace by_label etc. with moniker / selector concept. Allows, eg.,
# a choice between selection by value / id / label / element
# contents. Or choice between matching labels exactly or by
# substring. Etc.
# Remove deprecated methods.
# ...what else?
# Work on DOMForm.
# XForms? Don't know if there's a need here.
try: True
except NameError:
True = 1
False = 0
try: bool
except NameError:
def bool(expr):
if expr: return True
else: return False
try:
import logging
except ImportError:
def debug(msg, *args, **kwds):
pass
else:
_logger = logging.getLogger("ClientForm")
OPTIMIZATION_HACK = True
def debug(msg, *args, **kwds):
if OPTIMIZATION_HACK:
return
try:
raise Exception()
except:
caller_name = (
sys.exc_info()[2].tb_frame.f_back.f_back.f_code.co_name)
extended_msg = '%%s %s' % msg
extended_args = (caller_name,)+args
debug = _logger.debug(extended_msg, *extended_args, **kwds)
def _show_debug_messages():
global OPTIMIZATION_HACK
OPTIMIZATION_HACK = False
_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
import sys, urllib, urllib2, types, mimetools, copy, urlparse, \
htmlentitydefs, re, random
from urlparse import urljoin
from cStringIO import StringIO
try:
import warnings
except ImportError:
def deprecation(message):
pass
else:
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=2)
VERSION = "0.2.2"
CHUNK = 1024 # size of chunks fed to parser, in bytes
DEFAULT_ENCODING = "latin-1"
_compress_re = re.compile(r"\s+")
def compress_text(text): return _compress_re.sub(" ", text.strip())
# This version of urlencode is from my Python 1.5.2 back-port of the
# Python 2.1 CVS maintenance branch of urllib. It will accept a sequence
# of pairs instead of a mapping -- the 2.0 version only accepts a mapping.
def urlencode(query,doseq=False,):
"""Encode a sequence of two-element tuples or dictionary into a URL query \
string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
x = len(query)
# non-empty strings will fail this
if len(query) and type(query[0]) != types.TupleType:
raise TypeError()
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping "
"object", tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = urllib.quote_plus(str(k))
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = urllib.quote_plus(str(k))
if type(v) == types.StringType:
v = urllib.quote_plus(v)
l.append(k + '=' + v)
elif type(v) == types.UnicodeType:
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = urllib.quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = urllib.quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + urllib.quote_plus(str(elt)))
return '&'.join(l)
def unescape(data, entities, encoding=DEFAULT_ENCODING):
if data is None or "&" not in data:
return data
def replace_entities(match, entities=entities, encoding=encoding):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent)
if repl is not None:
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
def get_entitydefs():
import htmlentitydefs
from codecs import latin_1_decode
entitydefs = {}
try:
htmlentitydefs.name2codepoint
except AttributeError:
entitydefs = {}
for name, char in htmlentitydefs.entitydefs.items():
uc = latin_1_decode(char)[0]
if uc.startswith("&#") and uc.endswith(";"):
uc = unescape_charref(uc[2:-1], None)
entitydefs["&%s;" % name] = uc
else:
for name, codepoint in htmlentitydefs.name2codepoint.items():
entitydefs["&%s;" % name] = unichr(codepoint)
return entitydefs
def issequence(x):
try:
x[0]
except (TypeError, KeyError):
return False
except IndexError:
pass
return True
def isstringlike(x):
try: x+""
except: return False
else: return True
def choose_boundary():
"""Return a string usable as a multipart boundary."""
# follow IE and firefox
nonce = "".join([str(random.randint(0, sys.maxint-1)) for i in 0,1,2])
return "-"*27 + nonce
# This cut-n-pasted MimeWriter from standard library is here so can add
# to HTTP headers rather than message body when appropriate. It also uses
# \r\n in place of \n. This is a bit nasty.
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-type header goes.
"""
def __init__(self, fp, http_hdrs=None):
self._http_hdrs = http_hdrs
self._fp = fp
self._headers = []
self._boundary = []
self._first_part = True
def addheader(self, key, value, prefix=0,
add_to_http_hdrs=0):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
lines = value.split("\r\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
if add_to_http_hdrs:
value = "".join(lines)
self._http_hdrs.append((key, value))
else:
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\r\n".join(lines) + "\r\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
if content_type and ctype:
for name, value in plist:
ctype = ctype + ';\r\n %s=%s' % (name, value)
self.addheader("Content-type", ctype, prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs)
self.flushheaders()
if not add_to_http_hdrs: self._fp.write("\r\n")
self._first_part = True
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
boundary = boundary or choose_boundary()
self._boundary.append(boundary)
return self.startbody("multipart/" + subtype,
[("boundary", boundary)] + plist,
prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs,
content_type=content_type)
def nextpart(self):
boundary = self._boundary[-1]
if self._first_part:
self._first_part = False
else:
self._fp.write("\r\n")
self._fp.write("--" + boundary + "\r\n")
return self.__class__(self._fp)
def lastpart(self):
if self._first_part:
self.nextpart()
boundary = self._boundary.pop()
self._fp.write("\r\n--" + boundary + "--\r\n")
class LocateError(ValueError): pass
class AmbiguityError(LocateError): pass
class ControlNotFoundError(LocateError): pass
class ItemNotFoundError(LocateError): pass
class ItemCountError(ValueError): pass
class ParseError(Exception): pass
class _AbstractFormParser:
"""forms attribute contains HTMLForm instances on completion."""
# thanks to Moshe Zadka for an example of sgmllib/htmllib usage
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
if entitydefs is None:
entitydefs = get_entitydefs()
self._entitydefs = entitydefs
self._encoding = encoding
self.base = None
self.forms = []
self.labels = []
self._current_label = None
self._current_form = None
self._select = None
self._optgroup = None
self._option = None
self._textarea = None
def do_base(self, attrs):
debug("%s", attrs)
for key, value in attrs:
if key == "href":
self.base = value
def end_body(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is not None:
self.end_form()
def start_form(self, attrs):
debug("%s", attrs)
if self._current_form is not None:
raise ParseError("nested FORMs")
name = None
action = None
enctype = "application/x-www-form-urlencoded"
method = "GET"
d = {}
for key, value in attrs:
if key == "name":
name = value
elif key == "action":
action = value
elif key == "method":
method = value.upper()
elif key == "enctype":
enctype = value.lower()
d[key] = value
controls = []
self._current_form = (name, action, method, enctype), d, controls
def end_form(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is None:
raise ParseError("end of FORM before start")
self.forms.append(self._current_form)
self._current_form = None
def start_select(self, attrs):
debug("%s", attrs)
if self._current_form is None:
raise ParseError("start of SELECT before start of FORM")
if self._select is not None:
raise ParseError("nested SELECTs")
if self._textarea is not None:
raise ParseError("SELECT inside TEXTAREA")
d = {}
for key, val in attrs:
d[key] = val
self._select = d
self._add_label(d)
self._append_select_control({"__select": d})
def end_select(self):
debug("")
if self._current_form is None:
raise ParseError("end of SELECT before start of FORM")
if self._select is None:
raise ParseError("end of SELECT before start")
if self._option is not None:
self._end_option()
self._select = None
def start_optgroup(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTGROUP outside of SELECT")
d = {}
for key, val in attrs:
d[key] = val
self._optgroup = d
def end_optgroup(self):
debug("")
if self._optgroup is None:
raise ParseError("end of OPTGROUP before start")
self._optgroup = None
def _start_option(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTION outside of SELECT")
if self._option is not None:
self._end_option()
d = {}
for key, val in attrs:
d[key] = val
self._option = {}
self._option.update(d)
if (self._optgroup and self._optgroup.has_key("disabled") and
not self._option.has_key("disabled")):
self._option["disabled"] = None
def _end_option(self):
debug("")
if self._option is None:
raise ParseError("end of OPTION before start")
contents = self._option.get("contents", "").strip()
self._option["contents"] = contents
if not self._option.has_key("value"):
self._option["value"] = contents
if not self._option.has_key("label"):
self._option["label"] = contents
# stuff dict of SELECT HTML attrs into a special private key
# (gets deleted again later)
self._option["__select"] = self._select
self._append_select_control(self._option)
self._option = None
def _append_select_control(self, attrs):
debug("%s", attrs)
controls = self._current_form[2]
name = self._select.get("name")
controls.append(("select", name, attrs))
def start_textarea(self, attrs):
debug("%s", attrs)
if self._current_form is None:
raise ParseError("start of TEXTAREA before start of FORM")
if self._textarea is not None:
raise ParseError("nested TEXTAREAs")
if self._select is not None:
raise ParseError("TEXTAREA inside SELECT")
d = {}
for key, val in attrs:
d[key] = val
self._add_label(d)
self._textarea = d
def end_textarea(self):
debug("")
if self._current_form is None:
raise ParseError("end of TEXTAREA before start of FORM")
if self._textarea is None:
raise ParseError("end of TEXTAREA before start")
controls = self._current_form[2]
name = self._textarea.get("name")
controls.append(("textarea", name, self._textarea))
self._textarea = None
def start_label(self, attrs):
debug("%s", attrs)
if self._current_label:
self.end_label()
d = {}
for key, val in attrs:
d[key] = val
taken = bool(d.get("for")) # empty id is invalid
d["__text"] = ""
d["__taken"] = taken
if taken:
self.labels.append(d)
self._current_label = d
def end_label(self):
debug("")
label = self._current_label
if label is None:
# something is ugly in the HTML, but we're ignoring it
return
self._current_label = None
label["__text"] = label["__text"]
# if it is staying around, it is True in all cases
del label["__taken"]
def _add_label(self, d):
#debug("%s", d)
if self._current_label is not None:
if self._current_label["__taken"]:
self.end_label() # be fuzzy
else:
self._current_label["__taken"] = True
d["__label"] = self._current_label
def handle_data(self, data):
# according to http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.1
# line break immediately after start tags or immediately before end
# tags must be ignored, but real browsers only ignore a line break
# after a start tag, so we'll do that.
if data[0:1] == '\n':
data = data[1:]
debug("%s", data)
if self._option is not None:
# self._option is a dictionary of the OPTION element's HTML
# attributes, but it has two special keys, one of which is the
# special "contents" key contains text between OPTION tags (the
# other is the "__select" key: see the end_option method)
map = self._option
key = "contents"
elif self._textarea is not None:
map = self._textarea
key = "value"
# not if within option or textarea
elif self._current_label is not None:
map = self._current_label
key = "__text"
else:
return
if not map.has_key(key):
map[key] = data
else:
map[key] = map[key] + data
def do_button(self, attrs):
debug("%s", attrs)
if self._current_form is None:
raise ParseError("start of BUTTON before start of FORM")
d = {}
d["type"] = "submit" # default
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
# we don't want to lose information, so use a type string that
# doesn't clash with INPUT TYPE={SUBMIT,RESET,BUTTON}
# e.g. type for BUTTON/RESET is "resetbutton"
# (type for INPUT/RESET is "reset")
type = type+"button"
self._add_label(d)
controls.append((type, name, d))
def do_input(self, attrs):
debug("%s", attrs)
if self._current_form is None:
raise ParseError("start of INPUT before start of FORM")
d = {}
d["type"] = "text" # default
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
self._add_label(d)
controls.append((type, name, d))
def do_isindex(self, attrs):
debug("%s", attrs)
if self._current_form is None:
raise ParseError("start of ISINDEX before start of FORM")
d = {}
for key, val in attrs:
d[key] = val
controls = self._current_form[2]
self._add_label(d)
# isindex doesn't have type or name HTML attributes
controls.append(("isindex", None, d))
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
try:
val.items
except AttributeError:
escaped_attrs[key] = self.unescape_attr(val)
else:
# e.g. "__select" -- yuck!
escaped_attrs[key] = self.unescape_attrs(val)
return escaped_attrs
def unknown_entityref(self, ref): self.handle_data("&%s;" % ref)
def unknown_charref(self, ref): self.handle_data("&#%s;" % ref)
# HTMLParser.HTMLParser is recent, so live without it if it's not available
# (also, htmllib.HTMLParser is much more tolerant of bad HTML)
try:
import HTMLParser
except ImportError:
class XHTMLCompatibleFormParser:
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
raise ValueError("HTMLParser could not be imported")
else:
class XHTMLCompatibleFormParser(_AbstractFormParser, HTMLParser.HTMLParser):
"""Good for XHTML, bad for tolerance of incorrect HTML."""
# thanks to Michael Howitz for this!
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
HTMLParser.HTMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
def start_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
def end_option(self):
_AbstractFormParser._end_option(self)
def handle_starttag(self, tag, attrs):
try:
method = getattr(self, "start_" + tag)
except AttributeError:
try:
method = getattr(self, "do_" + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
try:
method = getattr(self, "end_" + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
def unescape_attrs_if_required(self, attrs):
return attrs # ditto
import sgmllib
# monkeypatch to fix http://www.python.org/sf/803422 :-(
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
class _AbstractSgmllibParser(_AbstractFormParser):
def do_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def unescape_attrs_if_required(self, attrs):
return self.unescape_attrs(attrs)
class FormParser(_AbstractSgmllibParser, sgmllib.SGMLParser):
"""Good for tolerance of incorrect HTML, bad for XHTML."""
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
sgmllib.SGMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
try:
if sys.version_info[:2] < (2, 2):
raise ImportError # BeautifulSoup uses generators
import BeautifulSoup
except ImportError:
pass
else:
class _AbstractBSFormParser(_AbstractSgmllibParser):
bs_base_class = None
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
_AbstractFormParser.__init__(self, entitydefs, encoding)
self.bs_base_class.__init__(self)
def handle_data(self, data):
_AbstractFormParser.handle_data(self, data)
self.bs_base_class.handle_data(self, data)
class RobustFormParser(_AbstractBSFormParser, BeautifulSoup.BeautifulSoup):
"""Tries to be highly tolerant of incorrect HTML."""
bs_base_class = BeautifulSoup.BeautifulSoup
class NestingRobustFormParser(_AbstractBSFormParser,
BeautifulSoup.ICantBelieveItsBeautifulSoup):
"""Tries to be highly tolerant of incorrect HTML.
Different from RobustFormParser in that it more often guesses nesting
above missing end tags (see BeautifulSoup docs).
"""
bs_base_class = BeautifulSoup.ICantBelieveItsBeautifulSoup
#FormParser = XHTMLCompatibleFormParser # testing hack
#FormParser = RobustFormParser # testing hack
def ParseResponse(response, select_default=False,
ignore_errors=False, # ignored!
form_parser_class=FormParser,
request_class=urllib2.Request,
entitydefs=None,
backwards_compat=True,
encoding=DEFAULT_ENCODING,
):
"""Parse HTTP response and return a list of HTMLForm instances.
The return value of urllib2.urlopen can be conveniently passed to this
function as the response parameter.
ClientForm.ParseError is raised on parse errors.
response: file-like object (supporting read() method) with a method
geturl(), returning the URI of the HTTP response
select_default: for multiple-selection SELECT controls and RADIO controls,
pick the first item as the default if none are selected in the HTML
form_parser_class: class to instantiate and use to pass
request_class: class to return from .click() method (default is
urllib2.Request)
entitydefs: mapping like {"&": "&", ...} containing HTML entity
definitions (a sensible default is used)
encoding: character encoding used for encoding numeric character references
when matching link text. ClientForm does not attempt to find the encoding
in a META HTTP-EQUIV attribute in the document itself (mechanize, for
example, does do that and will pass the correct value to ClientForm using
this parameter).
backwards_compat: boolean that determines whether the returned HTMLForm
objects are backwards-compatible with old code. If backwards_compat is
true:
- ClientForm 0.1 code will continue to work as before.
- Label searches that do not specify a nr (number or count) will always
get the first match, even if other controls match. If
backwards_compat is False, label searches that have ambiguous results
will raise an AmbiguityError.
- Item label matching is done by strict string comparison rather than
substring matching.
- De-selecting individual list items is allowed even if the Item is
disabled.
The backwards_compat argument will be deprecated in a future release.
Pass a true value for select_default if you want the behaviour specified by
RFC 1866 (the HTML 2.0 standard), which is to select the first item in a
RADIO or multiple-selection SELECT control if none were selected in the
HTML. Most browsers (including Microsoft Internet Explorer (IE) and
Netscape Navigator) instead leave all items unselected in these cases. The
W3C HTML 4.0 standard leaves this behaviour undefined in the case of
multiple-selection SELECT controls, but insists that at least one RADIO
button should be checked at all times, in contradiction to browser
behaviour.
There is a choice of parsers. ClientForm.XHTMLCompatibleFormParser (uses
HTMLParser.HTMLParser) works best for XHTML, ClientForm.FormParser (uses
sgmllib.SGMLParser) (the default) works better for ordinary grubby HTML.
Note that HTMLParser is only available in Python 2.2 and later. You can
pass your own class in here as a hack to work around bad HTML, but at your
own risk: there is no well-defined interface.
"""
return ParseFile(response, response.geturl(), select_default,
False,
form_parser_class,
request_class,
entitydefs,
backwards_compat,
encoding,
)
def ParseFile(file, base_uri, select_default=False,
ignore_errors=False, # ignored!
form_parser_class=FormParser,
request_class=urllib2.Request,
entitydefs=None,
backwards_compat=True,
encoding=DEFAULT_ENCODING,
):
"""Parse HTML and return a list of HTMLForm instances.
ClientForm.ParseError is raised on parse errors.
file: file-like object (supporting read() method) containing HTML with zero
or more forms to be parsed
base_uri: the URI of the document (note that the base URI used to submit
the form will be that given in the BASE element if present, not that of
the document)
For the other arguments and further details, see ParseResponse.__doc__.
"""
if backwards_compat:
deprecation("operating in backwards-compatibility mode")
fp = form_parser_class(entitydefs, encoding)
while 1:
data = file.read(CHUNK)
try:
fp.feed(data)
except ParseError, e:
e.base_uri = base_uri
raise
if len(data) != CHUNK: break
if fp.base is not None:
# HTML BASE element takes precedence over document URI
base_uri = fp.base
labels = [] # Label(label) for label in fp.labels]
id_to_labels = {}
for l in fp.labels:
label = Label(l)
labels.append(label)
for_id = l["for"]
coll = id_to_labels.get(for_id)
if coll is None:
id_to_labels[for_id] = [label]
else:
coll.append(label)
forms = []
for (name, action, method, enctype), attrs, controls in fp.forms:
if action is None:
action = base_uri
else:
action = urljoin(base_uri, action)
action = fp.unescape_attr_if_required(action)
name = fp.unescape_attr_if_required(name)
attrs = fp.unescape_attrs_if_required(attrs)
# would be nice to make HTMLForm class (form builder) pluggable
form = HTMLForm(
action, method, enctype, name, attrs, request_class,
forms, labels, id_to_labels, backwards_compat)
for ii in range(len(controls)):
type, name, attrs = controls[ii]
attrs = fp.unescape_attrs_if_required(attrs)
name = fp.unescape_attr_if_required(name)
# index=ii*10 allows ImageControl to return multiple ordered pairs
form.new_control(type, name, attrs, select_default=select_default,
index=ii*10)
forms.append(form)
for form in forms:
form.fixup()
return forms
class Label:
def __init__(self, attrs):
self.id = attrs.get("for")
self._text = attrs.get("__text").strip()
self._ctext = compress_text(self._text)
self.attrs = attrs
self._backwards_compat = False # maintained by HTMLForm
def __getattr__(self, name):
if name == "text":
if self._backwards_compat:
return self._text
else:
return self._ctext
return getattr(Label, name)
def __setattr__(self, name, value):
if name == "text":
# don't see any need for this, so make it read-only
raise AttributeError("text attribute is read-only")
self.__dict__[name] = value
def __str__(self):
return "<Label(id=%r, text=%r)>" % (self.id, self.text)
def _get_label(attrs):
text = attrs.get("__label")
if text is not None:
return Label(text)
else:
return None
class Control:
"""An HTML form control.
An HTMLForm contains a sequence of Controls. The Controls in an HTMLForm
are accessed using the HTMLForm.find_control method or the
HTMLForm.controls attribute.
Control instances are usually constructed using the ParseFile /
ParseResponse functions. If you use those functions, you can ignore the
rest of this paragraph. A Control is only properly initialised after the
fixup method has been called. In fact, this is only strictly necessary for
ListControl instances. This is necessary because ListControls are built up
from ListControls each containing only a single item, and their initial
value(s) can only be known after the sequence is complete.
The types and values that are acceptable for assignment to the value
attribute are defined by subclasses.
If the disabled attribute is true, this represents the state typically
represented by browsers by 'greying out' a control. If the disabled
attribute is true, the Control will raise AttributeError if an attempt is
made to change its value. In addition, the control will not be considered
'successful' as defined by the W3C HTML 4 standard -- ie. it will
contribute no data to the return value of the HTMLForm.click* methods. To
enable a control, set the disabled attribute to a false value.
If the readonly attribute is true, the Control will raise AttributeError if
an attempt is made to change its value. To make a control writable, set
the readonly attribute to a false value.
All controls have the disabled and readonly attributes, not only those that
may have the HTML attributes of the same names.
On assignment to the value attribute, the following exceptions are raised:
TypeError, AttributeError (if the value attribute should not be assigned
to, because the control is disabled, for example) and ValueError.
If the name or value attributes are None, or the value is an empty list, or
if the control is disabled, the control is not successful.
Public attributes:
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values) (readonly)
name: name of control (readonly)
value: current value of control (subclasses may allow a single value, a
sequence of values, or either)
disabled: disabled state
readonly: readonly state
id: value of id HTML attribute
"""
def __init__(self, type, name, attrs, index=None):
"""
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values)
name: control name
attrs: HTML attributes of control's HTML element
"""
raise NotImplementedError()
def add_to_form(self, form):
self._form = form
form.controls.append(self)
def fixup(self):
pass
def is_of_kind(self, kind):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def __getattr__(self, name): raise NotImplementedError()
def __setattr__(self, name, value): raise NotImplementedError()
def pairs(self):
"""Return list of (key, value) pairs suitable for passing to urlencode.
"""
return [(k, v) for (i, k, v) in self._totally_ordered_pairs()]
def _totally_ordered_pairs(self):
"""Return list of (key, value, index) tuples.
Like pairs, but allows preserving correct ordering even where several
controls are involved.
"""
raise NotImplementedError()
def _write_mime_data(self, mw, name, value):
"""Write data for a subitem of this control to a MimeWriter."""
# called by HTMLForm
mw2 = mw.nextpart()
mw2.addheader("Content-disposition",
'form-data; name="%s"' % name, 1)
f = mw2.startbody(prefix=0)
f.write(value)
def __str__(self):
raise NotImplementedError()
def get_labels(self):
"""Return all labels (Label instances) for this control.
If the control was surrounded by a <label> tag, that will be the first
label; all other labels, connected by 'for' and 'id', are in the order
that appear in the HTML.
"""
res = []
if self._label:
res.append(self._label)
if self.id:
res.extend(self._form._id_to_labels.get(self.id, ()))
return res
#---------------------------------------------------
class ScalarControl(Control):
"""Control whose value is not restricted to one of a prescribed set.
Some ScalarControls don't accept any value attribute. Otherwise, takes a
single value, which must be string-like.
Additional read-only public attribute:
attrs: dictionary mapping the names of original HTML attributes of the
control to their values
"""
def __init__(self, type, name, attrs, index=None):
self._index = index
self._label = _get_label(attrs)
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = attrs.has_key("disabled")
self.readonly = attrs.has_key("readonly")
self.id = attrs.get("id")
self.attrs = attrs.copy()
self._clicked = False
def __getattr__(self, name):
if name == "value":
return self.__dict__["_value"]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if not isstringlike(value):
raise TypeError("must assign a string")
elif self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
elif self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
self.__dict__["_value"] = value
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _totally_ordered_pairs(self):
name = self.name
value = self.value
if name is None or value is None or self.disabled:
return []
return [(self._index, name, value)]
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self.__dict__["_value"] = None
def __str__(self):
name = self.name
value = self.value
if name is None: name = "<None>"
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class TextControl(ScalarControl):
"""Textual input control.
Covers:
INPUT/TEXT
INPUT/PASSWORD
INPUT/HIDDEN
TEXTAREA
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self.type == "hidden": self.readonly = True
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind == "text"
#---------------------------------------------------
class FileControl(ScalarControl):
"""File upload with INPUT TYPE=FILE.
The value attribute of a FileControl is always None. Use add_file instead.
Additional public method: add_file
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
self._upload_data = []
def is_of_kind(self, kind): return kind == "file"
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._upload_data = []
def __setattr__(self, name, value):
if name in ("value", "name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def add_file(self, file_object, content_type=None, filename=None):
if not hasattr(file_object, "read"):
raise TypeError("file-like object must have read method")
if content_type is not None and not isstringlike(content_type):
raise TypeError("content type must be None or string-like")
if filename is not None and not isstringlike(filename):
raise TypeError("filename must be None or string-like")
if content_type is None:
content_type = "application/octet-stream"
self._upload_data.append((file_object, content_type, filename))
def _totally_ordered_pairs(self):
# XXX should it be successful even if unnamed?
if self.name is None or self.disabled:
return []
return [(self._index, self.name, "")]
def _write_mime_data(self, mw, _name, _value):
# called by HTMLForm
# assert _name == self.name and _value == ''
if len(self._upload_data) == 1:
# single file
file_object, content_type, filename = self._upload_data[0]
mw2 = mw.nextpart()
fn_part = filename and ('; filename="%s"' % filename) or ""
disp = 'form-data; name="%s"%s' % (self.name, fn_part)
mw2.addheader("Content-disposition", disp, prefix=1)
fh = mw2.startbody(content_type, prefix=0)
fh.write(file_object.read())
elif len(self._upload_data) != 0:
# multiple files
mw2 = mw.nextpart()
disp = 'form-data; name="%s"' % self.name
mw2.addheader("Content-disposition", disp, prefix=1)
fh = mw2.startmultipartbody("mixed", prefix=0)
for file_object, content_type, filename in self._upload_data:
mw3 = mw2.nextpart()
fn_part = filename and ('; filename="%s"' % filename) or ""
disp = "file%s" % fn_part
mw3.addheader("Content-disposition", disp, prefix=1)
fh2 = mw3.startbody(content_type, prefix=0)
fh2.write(file_object.read())
mw2.lastpart()
def __str__(self):
name = self.name
if name is None: name = "<None>"
if not self._upload_data:
value = "<No files added>"
else:
value = []
for file, ctype, filename in self._upload_data:
if filename is None:
value.append("<Unnamed file>")
else:
value.append(filename)
value = ", ".join(value)
info = []
if self.disabled: info.append("disabled")
if self.readonly: info.append("readonly")
info = ", ".join(info)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class IsindexControl(ScalarControl):
"""ISINDEX control.
ISINDEX is the odd-one-out of HTML form controls. In fact, it isn't really
part of regular HTML forms at all, and predates it. You're only allowed
one ISINDEX per HTML document. ISINDEX and regular form submission are
mutually exclusive -- either submit a form, or the ISINDEX.
Having said this, since ISINDEX controls may appear in forms (which is
probably bad HTML), ParseFile / ParseResponse will include them in the
HTMLForm instances it returns. You can set the ISINDEX's value, as with
any other control (but note that ISINDEX controls have no name, so you'll
need to use the type argument of set_value!). When you submit the form,
the ISINDEX will not be successful (ie., no data will get returned to the
server as a result of its presence), unless you click on the ISINDEX
control, in which case the ISINDEX gets submitted instead of the form:
form.set_value("my isindex value", type="isindex")
urllib2.urlopen(form.click(type="isindex"))
ISINDEX elements outside of FORMs are ignored. If you want to submit one
by hand, do it like so:
url = urlparse.urljoin(page_uri, "?"+urllib.quote_plus("my isindex value"))
result = urllib2.urlopen(url)
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind in ["text", "clickable"]
def _totally_ordered_pairs(self):
return []
def _click(self, form, coord, return_type, request_class=urllib2.Request):
# Relative URL for ISINDEX submission: instead of "foo=bar+baz",
# want "bar+baz".
# This doesn't seem to be specified in HTML 4.01 spec. (ISINDEX is
# deprecated in 4.01, but it should still say how to submit it).
# Submission of ISINDEX is explained in the HTML 3.2 spec, though.
parts = urlparse.urlparse(form.action)
rest, (query, frag) = parts[:-2], parts[-2:]
parts = rest + (urllib.quote_plus(self.value), "")
url = urlparse.urlunparse(parts)
req_data = url, None, []
if return_type == "pairs":
return []
elif return_type == "request_data":
return req_data
else:
return request_class(url)
def __str__(self):
value = self.value
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s)%s>" % (self.__class__.__name__, value, info)
#---------------------------------------------------
class IgnoreControl(ScalarControl):
"""Control that we're not interested in.
Covers:
INPUT/RESET
BUTTON/RESET
INPUT/BUTTON
BUTTON/BUTTON
These controls are always unsuccessful, in the terminology of HTML 4 (ie.
they never require any information to be returned to the server).
BUTTON/BUTTON is used to generate events for script embedded in HTML.
The value attribute of IgnoreControl is always None.
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
def is_of_kind(self, kind): return False
def __setattr__(self, name, value):
if name == "value":
raise AttributeError(
"control '%s' is ignored, hence read-only" % self.name)
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
#---------------------------------------------------
# ListControls
# helpers and subsidiary classes
class Item:
def __init__(self, control, attrs, index=None):
label = _get_label(attrs)
self.__dict__.update({
"name": attrs["value"],
"_labels": label and [label] or [],
"attrs": attrs,
"_control": control,
"disabled": attrs.has_key("disabled"),
"_selected": False,
"id": attrs.get("id"),
"_index": index,
})
control.items.append(self)
def get_labels(self):
"""Return all labels (Label instances) for this item.
For items that represent radio buttons or checkboxes, if the item was
surrounded by a <label> tag, that will be the first label; all other
labels, connected by 'for' and 'id', are in the order that appear in
the HTML.
For items that represent select options, if the option had a label
attribute, that will be the first label. If the option has contents
(text within the option tags) and it is not the same as the label
attribute (if any), that will be a label. There is nothing in the
spec to my knowledge that makes an option with an id unable to be the
target of a label's for attribute, so those are included, if any, for
the sake of consistency and completeness.
"""
res = []
res.extend(self._labels)
if self.id:
res.extend(self._control._form._id_to_labels.get(self.id, ()))
return res
def __getattr__(self, name):
if name=="selected":
return self._selected
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "selected":
self._control._set_selected_state(self, value)
elif name == "disabled":
self.__dict__["disabled"] = bool(value)
else:
raise AttributeError(name)
def __str__(self):
res = self.name
if self.selected:
res = "*" + res
if self.disabled:
res = "(%s)" % res
return res
def __repr__(self):
attrs = [("name", self.name), ("id", self.id)]+self.attrs.items()
return "<%s %s>" % (
self.__class__.__name__,
" ".join(["%s=%r" % (k, v) for k, v in attrs])
)
def disambiguate(items, nr, **kwds):
msgs = []
for key, value in kwds.items():
msgs.append("%s=%r" % (key, value))
msg = " ".join(msgs)
if not items:
raise ItemNotFoundError(msg)
if nr is None:
if len(items) > 1:
raise AmbiguityError(msg)
nr = 0
if len(items) <= nr:
raise ItemNotFoundError(msg)
return items[nr]
class ListControl(Control):
"""Control representing a sequence of items.
The value attribute of a ListControl represents the successful list items
in the control. The successful list items are those that are selected and
not disabled.
ListControl implements both list controls that take a length-1 value
(single-selection) and those that take length >1 values
(multiple-selection).
ListControls accept sequence values only. Some controls only accept
sequences of length 0 or 1 (RADIO, and single-selection SELECT).
In those cases, ItemCountError is raised if len(sequence) > 1. CHECKBOXes
and multiple-selection SELECTs (those having the "multiple" HTML attribute)
accept sequences of any length.
Note the following mistake:
control.value = some_value
assert control.value == some_value # not necessarily true
The reason for this is that the value attribute always gives the list items
in the order they were listed in the HTML.
ListControl items can also be referred to by their labels instead of names.
Use the label argument to .get(), and the .set_value_by_label(),
.get_value_by_label() methods.
Note that, rather confusingly, though SELECT controls are represented in
HTML by SELECT elements (which contain OPTION elements, representing
individual list items), CHECKBOXes and RADIOs are not represented by *any*
element. Instead, those controls are represented by a collection of INPUT
elements. For example, this is a SELECT control, named "control1":
<select name="control1">
<option>foo</option>
<option value="1">bar</option>
</select>
and this is a CHECKBOX control, named "control2":
<input type="checkbox" name="control2" value="foo" id="cbe1">
<input type="checkbox" name="control2" value="bar" id="cbe2">
The id attribute of a CHECKBOX or RADIO ListControl is always that of its
first element (for example, "cbe1" above).
Additional read-only public attribute: multiple.
"""
# ListControls are built up by the parser from their component items by
# creating one ListControl per item, consolidating them into a single
# master ListControl held by the HTMLForm:
# -User calls form.new_control(...)
# -Form creates Control, and calls control.add_to_form(self).
# -Control looks for a Control with the same name and type in the form,
# and if it finds one, merges itself with that control by calling
# control.merge_control(self). The first Control added to the form, of
# a particular name and type, is the only one that survives in the
# form.
# -Form calls control.fixup for all its controls. ListControls in the
# form know they can now safely pick their default values.
# To create a ListControl without an HTMLForm, use:
# control.merge_control(new_control)
# (actually, it's much easier just to use ParseFile)
_label = None
def __init__(self, type, name, attrs={}, select_default=False,
called_as_base_class=False, index=None):
"""
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present
"""
if not called_as_base_class:
raise NotImplementedError()
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = False
self.readonly = False
self.id = attrs.get("id")
# As Controls are merged in with .merge_control(), self.attrs will
# refer to each Control in turn -- always the most recently merged
# control. Each merged-in Control instance corresponds to a single
# list item: see ListControl.__doc__.
self.items = []
self._form = None
self._select_default = select_default
self._clicked = False
def clear(self):
self.value = []
def is_of_kind(self, kind):
if kind == "list":
return True
elif kind == "multilist":
return bool(self.multiple)
elif kind == "singlelist":
return not self.multiple
else:
return False
def get_items(self, name=None, label=None, id=None,
exclude_disabled=False):
"""Return matching items by name or label.
For argument docs, see the docstring for .get()
"""
if name is not None and not isstringlike(name):
raise TypeError("item name must be string-like")
if label is not None and not isstringlike(label):
raise TypeError("item label must be string-like")
if id is not None and not isstringlike(id):
raise TypeError("item id must be string-like")
items = [] # order is important
compat = self._form.backwards_compat
for o in self.items:
if exclude_disabled and o.disabled:
continue
if name is not None and o.name != name:
continue
if label is not None:
for l in o.get_labels():
if ((compat and l.text == label) or
(not compat and l.text.find(label) > -1)):
break
else:
continue
if id is not None and o.id != id:
continue
items.append(o)
return items
def get(self, name=None, label=None, id=None, nr=None,
exclude_disabled=False):
"""Return item by name or label, disambiguating if necessary with nr.
All arguments must be passed by name, with the exception of 'name',
which may be used as a positional argument.
If name is specified, then the item must have the indicated name.
If label is specified, then the item must have a label whose
whitespace-compressed, stripped, text substring-matches the indicated
label string (eg. label="please choose" will match
" Do please choose an item ").
If id is specified, then the item must have the indicated id.
nr is an optional 0-based index of the items matching the query.
If nr is the default None value and more than item is found, raises
AmbiguityError (unless the HTMLForm instance's backwards_compat
attribute is true).
If no item is found, or if items are found but nr is specified and not
found, raises ItemNotFoundError.
Optionally excludes disabled items.
"""
if nr is None and self._form.backwards_compat:
nr = 0 # :-/
items = self.get_items(name, label, id, exclude_disabled)
return disambiguate(items, nr, name=name, label=label, id=id)
def _get(self, name, by_label=False, nr=None, exclude_disabled=False):
# strictly for use by deprecated methods
if by_label:
name, label = None, name
else:
name, label = name, None
return self.get(name, label, nr, exclude_disabled)
def toggle(self, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, toggle the matching item's selection.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"item = control.get(...); item.selected = not item.selected")
o = self._get(name, by_label, nr)
self._set_selected_state(o, not o.selected)
def set(self, selected, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, set the matching item's selection to the bool value of selected.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"control.get(...).selected = <boolean>")
self._set_selected_state(self._get(name, by_label, nr), selected)
def _set_selected_state(self, item, action):
# action:
# bool False: off
# bool True: on
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
action == bool(action)
compat = self._form.backwards_compat
if not compat and item.disabled:
raise AttributeError("item is disabled")
else:
if compat and item.disabled and action:
raise AttributeError("item is disabled")
if self.multiple:
item.__dict__["_selected"] = action
else:
if not action:
item.__dict__["_selected"] = False
else:
for o in self.items:
o.__dict__["_selected"] = False
item.__dict__["_selected"] = True
def toggle_single(self, by_label=None):
"""Deprecated: toggle the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = not control.items[0].selected")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
item = self.items[0]
self._set_selected_state(item, not item.selected)
def set_single(self, selected, by_label=None):
"""Deprecated: set the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = <boolean>")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
self._set_selected_state(self.items[0], selected)
def get_item_disabled(self, name, by_label=False, nr=None):
"""Get disabled state of named list item in a ListControl."""
deprecation(
"control.get(...).disabled")
return self._get(name, by_label, nr).disabled
def set_item_disabled(self, disabled, name, by_label=False, nr=None):
"""Set disabled state of named list item in a ListControl.
disabled: boolean disabled state
"""
deprecation(
"control.get(...).disabled = <boolean>")
self._get(name, by_label, nr).disabled = disabled
def set_all_items_disabled(self, disabled):
"""Set disabled state of all list items in a ListControl.
disabled: boolean disabled state
"""
for o in self.items:
o.disabled = disabled
def get_item_attrs(self, name, by_label=False, nr=None):
"""Return dictionary of HTML attributes for a single ListControl item.
The HTML element types that describe list items are: OPTION for SELECT
controls, INPUT for the rest. These elements have HTML attributes that
you may occasionally want to know about -- for example, the "alt" HTML
attribute gives a text string describing the item (graphical browsers
usually display this as a tooltip).
The returned dictionary maps HTML attribute names to values. The names
and values are taken from the original HTML.
"""
deprecation(
"control.get(...).attrs")
return self._get(name, by_label, nr).attrs
def add_to_form(self, form):
assert self._form is None or form == self._form, (
"can't add control to more than one form")
self._form = form
try:
control = form.find_control(self.name, self.type)
except ControlNotFoundError:
Control.add_to_form(self, form)
else:
control.merge_control(self)
def merge_control(self, control):
assert bool(control.multiple) == bool(self.multiple)
# usually, isinstance(control, self.__class__)
self.items.extend(control.items)
def fixup(self):
"""
ListControls are built up from component list items (which are also
ListControls) during parsing. This method should be called after all
items have been added. See ListControl.__doc__ for the reason this is
required.
"""
# Need to set default selection where no item was indicated as being
# selected by the HTML:
# CHECKBOX:
# Nothing should be selected.
# SELECT/single, SELECT/multiple and RADIO:
# RFC 1866 (HTML 2.0): says first item should be selected.
# W3C HTML 4.01 Specification: says that client behaviour is
# undefined in this case. For RADIO, exactly one must be selected,
# though which one is undefined.
# Both Netscape and Microsoft Internet Explorer (IE) choose first
# item for SELECT/single. However, both IE5 and Mozilla (both 1.0
# and Firebird 0.6) leave all items unselected for RADIO and
# SELECT/multiple.
# Since both Netscape and IE all choose the first item for
# SELECT/single, we do the same. OTOH, both Netscape and IE
# leave SELECT/multiple with nothing selected, in violation of RFC 1866
# (but not in violation of the W3C HTML 4 standard); the same is true
# of RADIO (which *is* in violation of the HTML 4 standard). We follow
# RFC 1866 if the _select_default attribute is set, and Netscape and IE
# otherwise. RFC 1866 and HTML 4 are always violated insofar as you
# can deselect all items in a RadioControl.
for o in self.items:
# set items' controls to self, now that we've merged
o.__dict__["_control"] = self
def __getattr__(self, name):
if name == "value":
compat = self._form.backwards_compat
return [o.name for o in self.items if o.selected and
(not o.disabled or compat)]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._set_value(value)
elif name in ("name", "type", "multiple"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _set_value(self, value):
if value is None or isstringlike(value):
raise TypeError("ListControl, must set a sequence")
if not value:
compat = self._form.backwards_compat
for o in self.items:
if not o.disabled or compat:
o.selected = False
elif self.multiple:
self._multiple_set_value(value)
elif len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
else:
self._single_set_value(value)
def _get_items(self, name, target=1):
all_items = self.get_items(name)
items = [o for o in all_items if not o.disabled]
if len(items) < target:
if len(all_items) < target:
raise ItemNotFoundError(
"insufficient items with name %r" % name)
else:
raise AttributeError(
"insufficient non-disabled items with name %s" % name)
on = []
off = []
for o in items:
if o.selected:
on.append(o)
else:
off.append(o)
return on, off
def _single_set_value(self, value):
assert len(value) == 1
on, off = self._get_items(value[0])
assert len(on) <= 1
if not on:
off[0].selected = True
def _multiple_set_value(self, value):
compat = self._form.backwards_compat
turn_on = [] # transactional-ish
turn_off = [item for item in self.items if
item.selected and (not item.disabled or compat)]
names = {}
for nn in value:
if nn in names.keys():
names[nn] += 1
else:
names[nn] = 1
for name, count in names.items():
on, off = self._get_items(name, count)
for i in range(count):
if on:
item = on[0]
del on[0]
del turn_off[turn_off.index(item)]
else:
item = off[0]
del off[0]
turn_on.append(item)
for item in turn_off:
item.selected = False
for item in turn_on:
item.selected = True
def set_value_by_label(self, value):
"""Set the value of control by item labels.
value is expected to be an iterable of strings that are substrings of
the item labels that should be selected. Before substring matching is
performed, the original label text is whitespace-compressed
(consecutive whitespace characters are converted to a single space
character) and leading and trailing whitespace is stripped. Ambiguous
labels are accepted without complaint if the form's backwards_compat is
True; otherwise, it will not complain as long as all ambiguous labels
share the same item name (e.g. OPTION value).
"""
if isstringlike(value):
raise TypeError(value)
if not self.multiple and len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
items = []
for nn in value:
found = self.get_items(label=nn)
if len(found) > 1:
if not self._form.backwards_compat:
# ambiguous labels are fine as long as item names (e.g.
# OPTION values) are same
opt_name = found[0].name
if [o for o in found[1:] if o.name != opt_name]:
raise AmbiguityError(nn)
else:
# OK, we'll guess :-( Assume first available item.
found = found[:1]
for o in found:
# For the multiple-item case, we could try to be smarter,
# saving them up and trying to resolve, but that's too much.
if self._form.backwards_compat or o not in items:
items.append(o)
break
else: # all of them are used
raise ItemNotFoundError(nn)
# now we have all the items that should be on
# let's just turn everything off and then back on.
self.value = []
for o in items:
o.selected = True
def get_value_by_label(self):
"""Return the value of the control as given by normalized labels."""
res = []
compat = self._form.backwards_compat
for o in self.items:
if (not o.disabled or compat) and o.selected:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
def possible_items(self, by_label=False):
"""Deprecated: return the names or labels of all possible items.
Includes disabled items, which may be misleading for some use cases.
"""
deprecation(
"[item.name for item in self.items]")
if by_label:
res = []
for o in self.items:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
return [o.name for o in self.items]
def _totally_ordered_pairs(self):
if self.disabled:
return []
else:
return [(o._index, self.name, o.name) for o in self.items
if o.selected and not o.disabled]
def __str__(self):
name = self.name
if name is None: name = "<None>"
display = [str(o) for o in self.items]
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=[%s])%s>" % (self.__class__.__name__,
name, ", ".join(display), info)
class RadioControl(ListControl):
"""
Covers:
INPUT/RADIO
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = False
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def fixup(self):
ListControl.fixup(self)
found = [o for o in self.items if o.selected and not o.disabled]
if not found:
if self._select_default:
for o in self.items:
if not o.disabled:
o.selected = True
break
else:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
def get_labels(self):
return []
class CheckboxControl(ListControl):
"""
Covers:
INPUT/CHECKBOX
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = True
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def get_labels(self):
return []
class SelectControl(ListControl):
"""
Covers:
SELECT (and OPTION)
OPTION 'values', in HTML parlance, are Item 'names' in ClientForm parlance.
SELECT control values and labels are subject to some messy defaulting
rules. For example, if the HTML representation of the control is:
<SELECT name=year>
<OPTION value=0 label="2002">current year</OPTION>
<OPTION value=1>2001</OPTION>
<OPTION>2000</OPTION>
</SELECT>
The items, in order, have labels "2002", "2001" and "2000", whereas their
names (the OPTION values) are "0", "1" and "2000" respectively. Note that
the value of the last OPTION in this example defaults to its contents, as
specified by RFC 1866, as do the labels of the second and third OPTIONs.
The OPTION labels are sometimes more meaningful than the OPTION values,
which can make for more maintainable code.
Additional read-only public attribute: attrs
The attrs attribute is a dictionary of the original HTML attributes of the
SELECT element. Other ListControls do not have this attribute, because in
other cases the control as a whole does not correspond to any single HTML
element. control.get(...).attrs may be used as usual to get at the HTML
attributes of the HTML elements corresponding to individual list items (for
SELECT controls, these are OPTION elements).
Another special case is that the Item.attrs dictionaries have a special key
"contents" which does not correspond to any real HTML attribute, but rather
contains the contents of the OPTION element:
<OPTION>this bit</OPTION>
"""
# HTML attributes here are treated slightly differently from other list
# controls:
# -The SELECT HTML attributes dictionary is stuffed into the OPTION
# HTML attributes dictionary under the "__select" key.
# -The content of each OPTION element is stored under the special
# "contents" key of the dictionary.
# After all this, the dictionary is passed to the SelectControl constructor
# as the attrs argument, as usual. However:
# -The first SelectControl constructed when building up a SELECT control
# has a constructor attrs argument containing only the __select key -- so
# this SelectControl represents an empty SELECT control.
# -Subsequent SelectControls have both OPTION HTML-attribute in attrs and
# the __select dictionary containing the SELECT HTML-attributes.
def __init__(self, type, name, attrs, select_default=False, index=None):
# fish out the SELECT HTML attributes from the OPTION HTML attributes
# dictionary
self.attrs = attrs["__select"].copy()
self.__dict__["_label"] = _get_label(self.attrs)
self.__dict__["id"] = self.attrs.get("id")
self.__dict__["multiple"] = self.attrs.has_key("multiple")
# the majority of the contents, label, and value dance already happened
contents = attrs.get("contents")
attrs = attrs.copy()
del attrs["__select"]
ListControl.__init__(self, type, name, self.attrs, select_default,
called_as_base_class=True, index=index)
self.disabled = self.attrs.has_key("disabled")
self.readonly = self.attrs.has_key("readonly")
if attrs.has_key("value"):
# otherwise it is a marker 'select started' token
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("selected")
# add 'label' label and contents label, if different. If both are
# provided, the 'label' label is used for display in HTML
# 4.0-compliant browsers (and any lower spec? not sure) while the
# contents are used for display in older or less-compliant
# browsers. We make label objects for both, if the values are
# different.
label = attrs.get("label")
if label:
o._labels.append(Label({"__text": label}))
if contents and contents != label:
o._labels.append(Label({"__text": contents}))
elif contents:
o._labels.append(Label({"__text": contents}))
def fixup(self):
ListControl.fixup(self)
# Firefox doesn't exclude disabled items from those considered here
# (i.e. from 'found', for both branches of the if below). Note that
# IE6 doesn't support the disabled attribute on OPTIONs at all.
found = [o for o in self.items if o.selected]
if not found:
if not self.multiple or self._select_default:
for o in self.items:
if not o.disabled:
was_disabled = self.disabled
self.disabled = False
try:
o.selected = True
finally:
o.disabled = was_disabled
break
elif not self.multiple:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
#---------------------------------------------------
class SubmitControl(ScalarControl):
"""
Covers:
INPUT/SUBMIT
BUTTON/SUBMIT
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
# IE5 defaults SUBMIT value to "Submit Query"; Firebird 0.6 leaves it
# blank, Konqueror 3.1 defaults to "Submit". HTML spec. doesn't seem
# to define this.
if self.value is None: self.value = ""
self.readonly = True
def get_labels(self):
res = []
if self.value:
res.append(Label({"__text": self.value}))
res.extend(ScalarControl.get_labels(self))
return res
def is_of_kind(self, kind): return kind == "clickable"
def _click(self, form, coord, return_type, request_class=urllib2.Request):
self._clicked = coord
r = form._switch_click(return_type, request_class)
self._clicked = False
return r
def _totally_ordered_pairs(self):
if not self._clicked:
return []
return ScalarControl._totally_ordered_pairs(self)
#---------------------------------------------------
class ImageControl(SubmitControl):
"""
Covers:
INPUT/IMAGE
Coordinates are specified using one of the HTMLForm.click* methods.
"""
def __init__(self, type, name, attrs, index=None):
SubmitControl.__init__(self, type, name, attrs, index)
self.readonly = False
def _totally_ordered_pairs(self):
clicked = self._clicked
if self.disabled or not clicked:
return []
name = self.name
if name is None: return []
pairs = [
(self._index, "%s.x" % name, str(clicked[0])),
(self._index+1, "%s.y" % name, str(clicked[1])),
]
value = self._value
if value:
pairs.append((self._index+2, name, value))
return pairs
get_labels = ScalarControl.get_labels
# aliases, just to make str(control) and str(form) clearer
class PasswordControl(TextControl): pass
class HiddenControl(TextControl): pass
class TextareaControl(TextControl): pass
class SubmitButtonControl(SubmitControl): pass
def is_listcontrol(control): return control.is_of_kind("list")
class HTMLForm:
"""Represents a single HTML <form> ... </form> element.
A form consists of a sequence of controls that usually have names, and
which can take on various values. The values of the various types of
controls represent variously: text, zero-or-one-of-many or many-of-many
choices, and files to be uploaded. Some controls can be clicked on to
submit the form, and clickable controls' values sometimes include the
coordinates of the click.
Forms can be filled in with data to be returned to the server, and then
submitted, using the click method to generate a request object suitable for
passing to urllib2.urlopen (or the click_request_data or click_pairs
methods if you're not using urllib2).
import ClientForm
forms = ClientForm.ParseFile(html, base_uri)
form = forms[0]
form["query"] = "Python"
form.find_control("nr_results").get("lots").selected = True
response = urllib2.urlopen(form.click())
Usually, HTMLForm instances are not created directly. Instead, the
ParseFile or ParseResponse factory functions are used. If you do construct
HTMLForm objects yourself, however, note that an HTMLForm instance is only
properly initialised after the fixup method has been called (ParseFile and
ParseResponse do this for you). See ListControl.__doc__ for the reason
this is required.
Indexing a form (form["control_name"]) returns the named Control's value
attribute. Assignment to a form index (form["control_name"] = something)
is equivalent to assignment to the named Control's value attribute. If you
need to be more specific than just supplying the control's name, use the
set_value and get_value methods.
ListControl values are lists of item names (specifically, the names of the
items that are selected and not disabled, and hence are "successful" -- ie.
cause data to be returned to the server). The list item's name is the
value of the corresponding HTML element's"value" attribute.
Example:
<INPUT type="CHECKBOX" name="cheeses" value="leicester"></INPUT>
<INPUT type="CHECKBOX" name="cheeses" value="cheddar"></INPUT>
defines a CHECKBOX control with name "cheeses" which has two items, named
"leicester" and "cheddar".
Another example:
<SELECT name="more_cheeses">
<OPTION>1</OPTION>
<OPTION value="2" label="CHEDDAR">cheddar</OPTION>
</SELECT>
defines a SELECT control with name "more_cheeses" which has two items,
named "1" and "2" (because the OPTION element's value HTML attribute
defaults to the element contents -- see SelectControl.__doc__ for more on
these defaulting rules).
To select, deselect or otherwise manipulate individual list items, use the
HTMLForm.find_control() and ListControl.get() methods. To set the whole
value, do as for any other control: use indexing or the set_/get_value
methods.
Example:
# select *only* the item named "cheddar"
form["cheeses"] = ["cheddar"]
# select "cheddar", leave other items unaffected
form.find_control("cheeses").get("cheddar").selected = True
Some controls (RADIO and SELECT without the multiple attribute) can only
have zero or one items selected at a time. Some controls (CHECKBOX and
SELECT with the multiple attribute) can have multiple items selected at a
time. To set the whole value of a ListControl, assign a sequence to a form
index:
form["cheeses"] = ["cheddar", "leicester"]
If the ListControl is not multiple-selection, the assigned list must be of
length one.
To check if a control has an item, if an item is selected, or if an item is
successful (selected and not disabled), respectively:
"cheddar" in [item.name for item in form.find_control("cheeses").items]
"cheddar" in [item.name for item in form.find_control("cheeses").items and
item.selected]
"cheddar" in form["cheeses"] # (or "cheddar" in form.get_value("cheeses"))
Note that some list items may be disabled (see below).
Note the following mistake:
form[control_name] = control_value
assert form[control_name] == control_value # not necessarily true
The reason for this is that form[control_name] always gives the list items
in the order they were listed in the HTML.
List items (hence list values, too) can be referred to in terms of list
item labels rather than list item names using the appropriate label
arguments. Note that each item may have several labels.
The question of default values of OPTION contents, labels and values is
somewhat complicated: see SelectControl.__doc__ and
ListControl.get_item_attrs.__doc__ if you think you need to know.
Controls can be disabled or readonly. In either case, the control's value
cannot be changed until you clear those flags (see example below).
Disabled is the state typically represented by browsers by 'greying out' a
control. Disabled controls are not 'successful' -- they don't cause data
to get returned to the server. Readonly controls usually appear in
browsers as read-only text boxes. Readonly controls are successful. List
items can also be disabled. Attempts to select or deselect disabled items
fail with AttributeError.
If a lot of controls are readonly, it can be useful to do this:
form.set_all_readonly(False)
To clear a control's value attribute, so that it is not successful (until a
value is subsequently set):
form.clear("cheeses")
More examples:
control = form.find_control("cheeses")
control.disabled = False
control.readonly = False
control.get("gruyere").disabled = True
control.items[0].selected = True
See the various Control classes for further documentation. Many methods
take name, type, kind, id, label and nr arguments to specify the control to
be operated on: see HTMLForm.find_control.__doc__.
ControlNotFoundError (subclass of ValueError) is raised if the specified
control can't be found. This includes occasions where a non-ListControl
is found, but the method (set, for example) requires a ListControl.
ItemNotFoundError (subclass of ValueError) is raised if a list item can't
be found. ItemCountError (subclass of ValueError) is raised if an attempt
is made to select more than one item and the control doesn't allow that, or
set/get_single are called and the control contains more than one item.
AttributeError is raised if a control or item is readonly or disabled and
an attempt is made to alter its value.
Security note: Remember that any passwords you store in HTMLForm instances
will be saved to disk in the clear if you pickle them (directly or
indirectly). The simplest solution to this is to avoid pickling HTMLForm
objects. You could also pickle before filling in any password, or just set
the password to "" before pickling.
Public attributes:
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form (None if no name was specified)
attrs: dictionary mapping original HTML form attributes to their values
controls: list of Control instances; do not alter this list
(instead, call form.new_control to make a Control and add it to the
form, or control.add_to_form if you already have a Control instance)
Methods for form filling:
-------------------------
Most of the these methods have very similar arguments. See
HTMLForm.find_control.__doc__ for details of the name, type, kind, label
and nr arguments.
def find_control(self,
name=None, type=None, kind=None, id=None, predicate=None,
nr=None, label=None)
get_value(name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
set_value(value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
clear_all()
clear(name=None, type=None, kind=None, id=None, nr=None, label=None)
set_all_readonly(readonly)
Method applying only to FileControls:
add_file(file_object,
content_type="application/octet-stream", filename=None,
name=None, id=None, nr=None, label=None)
Methods applying only to clickable controls:
click(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
click_request_data(name=None, type=None, id=None, nr=0, coord=(1,1),
label=None)
click_pairs(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
"""
type2class = {
"text": TextControl,
"password": PasswordControl,
"hidden": HiddenControl,
"textarea": TextareaControl,
"isindex": IsindexControl,
"file": FileControl,
"button": IgnoreControl,
"buttonbutton": IgnoreControl,
"reset": IgnoreControl,
"resetbutton": IgnoreControl,
"submit": SubmitControl,
"submitbutton": SubmitButtonControl,
"image": ImageControl,
"radio": RadioControl,
"checkbox": CheckboxControl,
"select": SelectControl,
}
#---------------------------------------------------
# Initialisation. Use ParseResponse / ParseFile instead.
def __init__(self, action, method="GET",
enctype="application/x-www-form-urlencoded",
name=None, attrs=None,
request_class=urllib2.Request,
forms=None, labels=None, id_to_labels=None,
backwards_compat=True):
"""
In the usual case, use ParseResponse (or ParseFile) to create new
HTMLForm objects.
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form
attrs: dictionary mapping original HTML form attributes to their values
"""
self.action = action
self.method = method
self.enctype = enctype
self.name = name
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.controls = []
self._request_class = request_class
# these attributes are used by zope.testbrowser
self._forms = forms # this is a semi-public API!
self._labels = labels # this is a semi-public API!
self._id_to_labels = id_to_labels # this is a semi-public API!
self.backwards_compat = backwards_compat # note __setattr__
def __getattr__(self, name):
if name == "backwards_compat":
return self._backwards_compat
return getattr(HTMLForm, name)
def __setattr__(self, name, value):
# yuck
if name == "backwards_compat":
name = "_backwards_compat"
value = bool(value)
for cc in self.controls:
try:
items = cc.items
except AttributeError:
continue
else:
for ii in items:
for ll in ii.get_labels():
ll._backwards_compat = value
self.__dict__[name] = value
def new_control(self, type, name, attrs,
ignore_unknown=False, select_default=False, index=None):
"""Adds a new control to the form.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
Note that controls representing lists of items are built up from
controls holding only a single list item. See ListControl.__doc__ for
further information.
type: type of control (see Control.__doc__ for a list)
attrs: HTML attributes of control
ignore_unknown: if true, use a dummy Control instance for controls of
unknown type; otherwise, use a TextControl
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present (this defaulting happens when the HTMLForm.fixup method is
called)
index: index of corresponding element in HTML (see
MoreFormTests.test_interspersed_controls for motivation)
"""
type = type.lower()
klass = self.type2class.get(type)
if klass is None:
if ignore_unknown:
klass = IgnoreControl
else:
klass = TextControl
a = attrs.copy()
if issubclass(klass, ListControl):
control = klass(type, name, a, select_default, index)
else:
control = klass(type, name, a, index)
control.add_to_form(self)
def fixup(self):
"""Normalise form after all controls have been added.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
This method should only be called once, after all controls have been
added to the form.
"""
for control in self.controls:
control.fixup()
self.backwards_compat = self._backwards_compat
#---------------------------------------------------
def __str__(self):
header = "%s%s %s %s" % (
(self.name and self.name+" " or ""),
self.method, self.action, self.enctype)
rep = [header]
for control in self.controls:
rep.append(" %s" % str(control))
return "<%s>" % "\n".join(rep)
#---------------------------------------------------
# Form-filling methods.
def __getitem__(self, name):
return self.find_control(name).value
def __contains__(self, name):
return bool(self.find_control(name))
def __setitem__(self, name, value):
control = self.find_control(name)
try:
control.value = value
except AttributeError, e:
raise ValueError(str(e))
def get_value(self,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Return value of control.
If only name and value arguments are supplied, equivalent to
form[name]
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.get_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
return meth()
else:
return c.value
def set_value(self, value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Set value of control.
If only name and value arguments are supplied, equivalent to
form[name] = value
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.set_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
meth(value)
else:
c.value = value
def get_value_by_label(
self, name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
return c.get_value_by_label()
def set_value_by_label(
self, value,
name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.set_value_by_label(value)
def set_all_readonly(self, readonly):
for control in self.controls:
control.readonly = bool(readonly)
def clear_all(self):
"""Clear the value attributes of all controls in the form.
See HTMLForm.clear.__doc__.
"""
for control in self.controls:
control.clear()
def clear(self,
name=None, type=None, kind=None, id=None, nr=None, label=None):
"""Clear the value attribute of a control.
As a result, the affected control will not be successful until a value
is subsequently set. AttributeError is raised on readonly controls.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.clear()
#---------------------------------------------------
# Form-filling methods applying only to ListControls.
def possible_items(self, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=False, label=None):
"""Return a list of all values that the specified control can take."""
c = self._find_list_control(name, type, kind, id, label, nr)
return c.possible_items(by_label)
def set(self, selected, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Select / deselect named list item.
selected: boolean selected state
"""
self._find_list_control(name, type, kind, id, label, nr).set(
selected, item_name, by_label)
def toggle(self, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Toggle selected state of named list item."""
self._find_list_control(name, type, kind, id, label, nr).toggle(
item_name, by_label)
def set_single(self, selected, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None):
"""Select / deselect list item in a control having only one item.
If the control has multiple list items, ItemCountError is raised.
This is just a convenience method, so you don't need to know the item's
name -- the item name in these single-item controls is usually
something meaningless like "1" or "on".
For example, if a checkbox has a single item named "on", the following
two calls are equivalent:
control.toggle("on")
control.toggle_single()
""" # by_label ignored and deprecated
self._find_list_control(
name, type, kind, id, label, nr).set_single(selected)
def toggle_single(self, name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None): # deprecated
"""Toggle selected state of list item in control having only one item.
The rest is as for HTMLForm.set_single.__doc__.
""" # by_label ignored and deprecated
self._find_list_control(name, type, kind, id, label, nr).toggle_single()
#---------------------------------------------------
# Form-filling method applying only to FileControls.
def add_file(self, file_object, content_type=None, filename=None,
name=None, id=None, nr=None, label=None):
"""Add a file to be uploaded.
file_object: file-like object (with read method) from which to read
data to upload
content_type: MIME content type of data to upload
filename: filename to pass to server
If filename is None, no filename is sent to the server.
If content_type is None, the content type is guessed based on the
filename and the data from read from the file object.
XXX
At the moment, guessed content type is always application/octet-stream.
Use sndhdr, imghdr modules. Should also try to guess HTML, XML, and
plain text.
Note the following useful HTML attributes of file upload controls (see
HTML 4.01 spec, section 17):
accept: comma-separated list of content types that the server will
handle correctly; you can use this to filter out non-conforming files
size: XXX IIRC, this is indicative of whether form wants multiple or
single files
maxlength: XXX hint of max content length in bytes?
"""
self.find_control(name, "file", id=id, label=label, nr=nr).add_file(
file_object, content_type, filename)
#---------------------------------------------------
# Form submission methods, applying only to clickable controls.
def click(self, name=None, type=None, id=None, nr=0, coord=(1,1),
request_class=urllib2.Request,
label=None):
"""Return request that would result from clicking on a control.
The request object is a urllib2.Request instance, which you can pass to
urllib2.urlopen (or ClientCookie.urlopen).
Only some control types (INPUT/SUBMIT & BUTTON/SUBMIT buttons and
IMAGEs) can be clicked.
Will click on the first clickable control, subject to the name, type
and nr arguments (as for find_control). If no name, type, id or number
is specified and there are no clickable controls, a request will be
returned for the form in its current, un-clicked, state.
IndexError is raised if any of name, type, id or nr is specified but no
matching control is found. ValueError is raised if the HTMLForm has an
enctype attribute that is not recognised.
You can optionally specify a coordinate to click at, which only makes a
difference if you clicked on an image.
"""
return self._click(name, type, id, label, nr, coord, "request",
self._request_class)
def click_request_data(self,
name=None, type=None, id=None,
nr=0, coord=(1,1),
request_class=urllib2.Request,
label=None):
"""As for click method, but return a tuple (url, data, headers).
You can use this data to send a request to the server. This is useful
if you're using httplib or urllib rather than urllib2. Otherwise, use
the click method.
# Untested. Have to subclass to add headers, I think -- so use urllib2
# instead!
import urllib
url, data, hdrs = form.click_request_data()
r = urllib.urlopen(url, data)
# Untested. I don't know of any reason to use httplib -- you can get
# just as much control with urllib2.
import httplib, urlparse
url, data, hdrs = form.click_request_data()
tup = urlparse(url)
host, path = tup[1], urlparse.urlunparse((None, None)+tup[2:])
conn = httplib.HTTPConnection(host)
if data:
httplib.request("POST", path, data, hdrs)
else:
httplib.request("GET", path, headers=hdrs)
r = conn.getresponse()
"""
return self._click(name, type, id, label, nr, coord, "request_data",
self._request_class)
def click_pairs(self, name=None, type=None, id=None,
nr=0, coord=(1,1),
label=None):
"""As for click_request_data, but returns a list of (key, value) pairs.
You can use this list as an argument to ClientForm.urlencode. This is
usually only useful if you're using httplib or urllib rather than
urllib2 or ClientCookie. It may also be useful if you want to manually
tweak the keys and/or values, but this should not be necessary.
Otherwise, use the click method.
Note that this method is only useful for forms of MIME type
x-www-form-urlencoded. In particular, it does not return the
information required for file upload. If you need file upload and are
not using urllib2, use click_request_data.
Also note that Python 2.0's urllib.urlencode is slightly broken: it
only accepts a mapping, not a sequence of pairs, as an argument. This
messes up any ordering in the argument. Use ClientForm.urlencode
instead.
"""
return self._click(name, type, id, label, nr, coord, "pairs",
self._request_class)
#---------------------------------------------------
def find_control(self,
name=None, type=None, kind=None, id=None,
predicate=None, nr=None,
label=None):
"""Locate and return some specific control within the form.
At least one of the name, type, kind, predicate and nr arguments must
be supplied. If no matching control is found, ControlNotFoundError is
raised.
If name is specified, then the control must have the indicated name.
If type is specified then the control must have the specified type (in
addition to the types possible for <input> HTML tags: "text",
"password", "hidden", "submit", "image", "button", "radio", "checkbox",
"file" we also have "reset", "buttonbutton", "submitbutton",
"resetbutton", "textarea", "select" and "isindex").
If kind is specified, then the control must fall into the specified
group, each of which satisfies a particular interface. The types are
"text", "list", "multilist", "singlelist", "clickable" and "file".
If id is specified, then the control must have the indicated id.
If predicate is specified, then the control must match that function.
The predicate function is passed the control as its single argument,
and should return a boolean value indicating whether the control
matched.
nr, if supplied, is the sequence number of the control (where 0 is the
first). Note that control 0 is the first control matching all the
other arguments (if supplied); it is not necessarily the first control
in the form. If no nr is supplied, AmbiguityError is raised if
multiple controls match the other arguments (unless the
.backwards-compat attribute is true).
If label is specified, then the control must have this label. Note
that radio controls and checkboxes never have labels: their items do.
"""
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (predicate is None) and
(nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label, predicate, nr)
#---------------------------------------------------
# Private methods.
def _find_list_control(self,
name=None, type=None, kind=None, id=None,
label=None, nr=None):
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label,
is_listcontrol, nr)
def _find_control(self, name, type, kind, id, label, predicate, nr):
if (name is not None) and not isstringlike(name):
raise TypeError("control name must be string-like")
if (type is not None) and not isstringlike(type):
raise TypeError("control type must be string-like")
if (kind is not None) and not isstringlike(kind):
raise TypeError("control kind must be string-like")
if (id is not None) and not isstringlike(id):
raise TypeError("control id must be string-like")
if (label is not None) and not isstringlike(label):
raise TypeError("control label must be string-like")
if (predicate is not None) and not callable(predicate):
raise TypeError("control predicate must be callable")
if (nr is not None) and nr < 0:
raise ValueError("control number must be a positive integer")
orig_nr = nr
found = None
ambiguous = False
if nr is None and self.backwards_compat:
nr = 0
for control in self.controls:
if name is not None and name != control.name:
continue
if type is not None and type != control.type:
continue
if kind is not None and not control.is_of_kind(kind):
continue
if id is not None and id != control.id:
continue
if predicate and not predicate(control):
continue
if label:
for l in control.get_labels():
if l.text.find(label) > -1:
break
else:
continue
if nr is not None:
if nr == 0:
return control # early exit: unambiguous due to nr
nr -= 1
continue
if found:
ambiguous = True
break
found = control
if found and not ambiguous:
return found
description = []
if name is not None: description.append("name '%s'" % name)
if type is not None: description.append("type '%s'" % type)
if kind is not None: description.append("kind '%s'" % kind)
if id is not None: description.append("id '%s'" % id)
if label is not None: description.append("label '%s'" % label)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr: description.append("nr %d" % orig_nr)
description = ", ".join(description)
if ambiguous:
raise AmbiguityError("more than one control matching "+description)
elif not found:
raise ControlNotFoundError("no control matching "+description)
assert False
def _click(self, name, type, id, label, nr, coord, return_type,
request_class=urllib2.Request):
try:
control = self._find_control(
name, type, "clickable", id, label, None, nr)
except ControlNotFoundError:
if ((name is not None) or (type is not None) or (id is not None) or
(nr != 0)):
raise
# no clickable controls, but no control was explicitly requested,
# so return state without clicking any control
return self._switch_click(return_type, request_class)
else:
return control._click(self, coord, return_type, request_class)
def _pairs(self):
"""Return sequence of (key, value) pairs suitable for urlencoding."""
return [(k, v) for (i, k, v, c_i) in self._pairs_and_controls()]
def _pairs_and_controls(self):
"""Return sequence of (index, key, value, control_index)
of totally ordered pairs suitable for urlencoding.
control_index is the index of the control in self.controls
"""
pairs = []
for control_index in range(len(self.controls)):
control = self.controls[control_index]
for ii, key, val in control._totally_ordered_pairs():
pairs.append((ii, key, val, control_index))
# stable sort by ONLY first item in tuple
pairs.sort()
return pairs
def _request_data(self):
"""Return a tuple (url, data, headers)."""
method = self.method.upper()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(self.action)
parts = urlparse.urlparse(self.action)
rest, (query, frag) = parts[:-2], parts[-2:]
if method == "GET":
if self.enctype != "application/x-www-form-urlencoded":
raise ValueError(
"unknown GET form encoding type '%s'" % self.enctype)
parts = rest + (urlencode(self._pairs()), "")
uri = urlparse.urlunparse(parts)
return uri, None, []
elif method == "POST":
parts = rest + (query, "")
uri = urlparse.urlunparse(parts)
if self.enctype == "application/x-www-form-urlencoded":
return (uri, urlencode(self._pairs()),
[("Content-type", self.enctype)])
elif self.enctype == "multipart/form-data":
data = StringIO()
http_hdrs = []
mw = MimeWriter(data, http_hdrs)
f = mw.startmultipartbody("form-data", add_to_http_hdrs=True,
prefix=0)
for ii, k, v, control_index in self._pairs_and_controls():
self.controls[control_index]._write_mime_data(mw, k, v)
mw.lastpart()
return uri, data.getvalue(), http_hdrs
else:
raise ValueError(
"unknown POST form encoding type '%s'" % self.enctype)
else:
raise ValueError("Unknown method '%s'" % method)
def _switch_click(self, return_type, request_class=urllib2.Request):
# This is called by HTMLForm and clickable Controls to hide switching
# on return_type.
if return_type == "pairs":
return self._pairs()
elif return_type == "request_data":
return self._request_data()
else:
req_data = self._request_data()
req = request_class(req_data[0], req_data[1])
for key, val in req_data[2]:
add_hdr = req.add_header
if key.lower() == "content-type":
try:
add_hdr = req.add_unredirected_header
except AttributeError:
# pre-2.4 and not using ClientCookie
pass
add_hdr(key, val)
return req
| aaronsw/watchdog | vendor/python-clientform/ClientForm-0.2.2.py | Python | agpl-3.0 | 119,500 |
"""
Library example from the EMF wikipedia page:
https://fr.wikipedia.org/wiki/Eclipse_Modeling_Framework#/media/File:EMF_based_meta-model.png
The static metamodel had been produced by hand in this example
"""
import sys
import pyecore.ecore as Ecore
from pyecore.ecore import EObject, EAttribute, EString, EEnum, EReference, \
MetaEClass, EInteger
name = 'library'
nsPrefix = 'lib'
nsURI = 'http://emf.wikipedia.org/2011/Library'
# Do not remove
eClass = Ecore.EPackage(name=name, nsURI=nsURI, nsPrefix=nsPrefix)
BookCategory = EEnum('BookCategory', literals=['ScienceFiction',
'Biography',
'Mistery'])
class Book(EObject, metaclass=MetaEClass):
title = EAttribute(eType=EString)
pages = EAttribute(eType=EInteger)
category = EAttribute(eType=BookCategory,
default_value=BookCategory.ScienceFiction)
def __init__(self):
super().__init__()
class Writer(EObject, metaclass=MetaEClass):
name = EAttribute(eType=EString)
books = EReference(eType=Book, lower=1, upper=-1)
Book.authors = EReference('authors', Writer, lower=1, upper=-1,
eOpposite=Writer.books)
Book.eClass.eStructuralFeatures.append(Book.authors)
class Employee(EObject, metaclass=MetaEClass):
name = EAttribute(eType=EString)
age = EAttribute(eType=EInteger)
class Library(EObject, metaclass=MetaEClass):
name = EAttribute(eType=EString)
address = EAttribute(eType=EString)
employees = EReference(eType=Employee, upper=-1, containment=True)
writers = EReference(eType=Writer, upper=-1, containment=True)
books = EReference(eType=Book, upper=-1, containment=True)
# ==
# Warning, do not remove
# ==
eURIFragment = Ecore.default_eURIFragment
eModule = sys.modules[__name__]
otherClassifiers = [BookCategory]
for classif in otherClassifiers:
eClassifiers[classif.name] = classif
classif._container = Book.eClass.ePackage
for classif in eClassifiers.values():
eClass.eClassifiers.append(classif.eClass)
| pyecore/pyecore | tests/wikilibrary/wikilib.py | Python | bsd-3-clause | 2,126 |
from __future__ import division
def solve(n):
print ' ' * 2 + "/\\"
for i in range(n):
print ' ' * 2 + '||'
print ' /||\\'
print '/:||:\\'
for i in range(n - 1):
print '|:||:|'
print '|/||\\|'
print ' **\n **'
if __name__ == '__main__':
n = int(raw_input())
solve(n)
| m00nlight/hackerrank | algorithm/contests/Indeed-Prime-Challenges/A.py | Python | gpl-2.0 | 287 |
import sys
from newrelic.agent import WSGIApplicationWrapper, wrap_out_function
def _nr_wrapper_Application_wsgi_(application):
# Normally Application.wsgi() returns a WSGI application, but in
# the case of the Tornado worker it can return an Tornado ASYNC
# application object. Not being a WSGI application object we can
# not wrap it with a WSGI application wrapper as the prototype
# mismatch will cause it to fail when called.
#
# Having to have this check in this way is a bit annoying, but
# the only other alternative was to instrument separately all the
# different worker types which would have been more work. Thus
# tolerate having the check here.
if not 'tornado.web' in sys.modules:
return WSGIApplicationWrapper(application)
try:
import tornado.web
except ImportError:
return WSGIApplicationWrapper(application)
if not isinstance(application, tornado.web.Application):
return WSGIApplicationWrapper(application)
return application
def instrument_gunicorn_app_base(module):
wrap_out_function(module, 'Application.wsgi',
_nr_wrapper_Application_wsgi_)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/adapter_gunicorn.py | Python | agpl-3.0 | 1,184 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# AMJPureBLEKbd documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 22 21:36:49 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.imgmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AMJPureBLEKbd'
copyright = '2016, HanChen'
author = 'HanChen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'cn'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'AMJPureBLEKbd v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AMJPureBLEKbddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AMJPureBLEKbd.tex', 'AMJPureBLEKbd Documentation',
'HanChen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'amjpureblekbd', 'AMJPureBLEKbd Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AMJPureBLEKbd', 'AMJPureBLEKbd Documentation',
author, 'AMJPureBLEKbd', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| AMJKeyboard/AMJPureBLEKbd | docs/source/conf.py | Python | bsd-2-clause | 9,869 |
from jirafs import utils
from jirafs.plugin import CommandPlugin
class Command(CommandPlugin):
"""Create a subtask of a given issue."""
MIN_VERSION = "2.0.0"
MAX_VERSION = "3.0.0"
def main(self, folder, args, **kwargs):
summary = " ".join(args.summary)
issue_data = {
"project": {"key": folder.issue.fields.project.key},
"summary": summary,
"issuetype": {"name": "Sub-task"},
"parent": {"id": folder.issue.key},
}
if args.issuetype is not None:
issue_data["issuetype"]["name"] = args.issuetype
folder.jira.create_issue(fields=issue_data)
commands = utils.get_installed_commands()
jira = utils.lazy_get_jira()
commands["fetch"].execute_command(
[],
jira=jira,
path=folder.path,
command_name="fetch",
)
def add_arguments(self, parser):
parser.add_argument("summary", nargs="+")
parser.add_argument("--issuetype", default=None, type=str)
| coddingtonbear/jirafs | jirafs/commands/subtask.py | Python | mit | 1,060 |
#!/bin/python3
import argparse
import sys, os
import subprocess
import shutil
import urllib.request
from zipfile import ZipFile
import re
parser = argparse.ArgumentParser(description='Set up a Kattis skeleton')
parser.add_argument('name', help='the name of the problem')
args = parser.parse_args()
subprocess.check_call([shutil.which("curl"), "-o", "samples.zip", "https://open.kattis.com/problems/" + args.name + "/file/statement/samples.zip"])
os.mkdir(args.name)
os.mkdir(args.name + "/src")
os.mkdir(args.name + "/samples")
shutil.copy("_skeleton/src/_skeleton.cpp", args.name + "/src/" + args.name + ".cpp")
if os.path.isfile("samples.zip"):
with ZipFile("samples.zip") as f:
inre = re.compile(r'[a-zA-Z0-9_\.\-]*\.in', re.IGNORECASE)
ansre = re.compile(r'[a-zA-Z0-9_\.\-]*\.ans', re.IGNORECASE)
infiles = []
ansfiles = []
for name in f.namelist():
match = inre.fullmatch(name)
if match:
infiles.append(name)
continue
match = ansre.fullmatch(name)
if match:
ansfiles.append(name)
continue
print("ERROR: samples.zip contains the weird file \"", name, "\"", sep='')
sys.exit(1)
if len(infiles) != len(ansfiles):
print("ERROR: Number of input files (", len(infiles), ") is not equal to the number of answer files (", len(ansfiles), ")", sep='')
sys.exit(1)
infiles.sort()
ansfiles.sort()
sampleid = 0
for t in zip(infiles, ansfiles):
sampleid += 1
if t[0][:-3] != t[1][:-4]:
print("ERROR: Name of input file (", t[0], ") does not match name of answer file (", t[1], ")", sep='')
sys.exit(1)
with f.open(t[0]) as infile, open(args.name + "/samples/" + args.name + "-" + str(sampleid) + ".in", 'bx') as outfile:
shutil.copyfileobj(infile, outfile)
with f.open(t[1]) as infile, open(args.name + "/samples/" + args.name + "-" + str(sampleid) + ".ans", 'bx') as outfile:
shutil.copyfileobj(infile, outfile)
os.remove("samples.zip") | danielschemmel/kattis_skeleton | get.py | Python | mit | 1,924 |
#!/usr/bin/env python
import rospy
import os
import roslib
roslib.load_manifest("denso_pendant_publisher")
roslib.load_manifest("actionlib_msgs")
import denso_pendant_publisher.msg
import std_msgs.msg
import actionlib_msgs.msg
rospy.init_node("moveit_canceler")
g_runnable = True
g_prev_status = None
def pendantCB(msg):
global g_runnable, g_prev_status
if g_prev_status:
if (not g_prev_status.button_cancel and msg.button_cancel) or (not g_prev_status.button_stop and msg.button_stop): # canceled or stopped
g_runnable = False
# here we should send cancel
cancel = actionlib_msgs.msg.GoalID()
cancel.id = ""
cancel_pub.publish(cancel)
rospy.loginfo("cancel")
g_prev_status = msg
sub = rospy.Subscriber("/denso_pendant_publisher/status", denso_pendant_publisher.msg.PendantStatus, pendantCB)
cancel_pub = rospy.Publisher("/arm_controller/follow_joint_trajectory/cancel", actionlib_msgs.msg.GoalID);
# cancel_pub = rospy.Publisher("/move_group/cancel", actionlib_msgs.msg.GoalID);
rospy.spin()
| mikewrock/phd_backup_full | src/wrock/vs060/scripts/moveit_canceler.py | Python | apache-2.0 | 1,094 |
"""Test the Tradfri config flow."""
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tradfri import config_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_auth():
"""Mock authenticate."""
with patch(
"homeassistant.components.tradfri.config_flow.authenticate"
) as mock_auth:
yield mock_auth
async def test_user_connection_successful(hass, mock_auth, mock_entry_setup):
"""Test a successful connection."""
mock_auth.side_effect = lambda hass, host, code: {"host": host, "gateway_id": "bla"}
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "123.123.123.123", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_user_connection_timeout(hass, mock_auth, mock_entry_setup):
"""Test a connection timeout."""
mock_auth.side_effect = config_flow.AuthError("timeout")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "timeout"}
async def test_user_connection_bad_key(hass, mock_auth, mock_entry_setup):
"""Test a connection with bad key."""
mock_auth.side_effect = config_flow.AuthError("invalid_security_code")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"security_code": "invalid_security_code"}
async def test_discovery_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via discovery."""
mock_auth.side_effect = lambda hass, host, code: {"host": host, "gateway_id": "bla"}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == "homekit-id"
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_import_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via import."""
mock_auth.side_effect = lambda hass, host, code: {
"host": host,
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": True},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_no_groups(hass, mock_auth, mock_entry_setup):
"""Test a connection via import and no groups allowed."""
mock_auth.side_effect = lambda hass, host, code: {
"host": host,
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": False},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy(hass, mock_gateway_info, mock_entry_setup):
"""Test a connection via import."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": "mock-gateway",
}
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy_no_groups(
hass, mock_gateway_info, mock_entry_setup
):
"""Test a connection via legacy import and no groups allowed."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": "mock-gateway",
}
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_discovery_duplicate_aborted(hass):
"""Test a duplicate discovery host aborts and updates existing entry."""
entry = MockConfigEntry(
domain="tradfri", data={"host": "some-host"}, unique_id="homekit-id"
)
entry.add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "new-host", "properties": {"id": "homekit-id"}},
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
assert entry.data["host"] == "new-host"
async def test_import_duplicate_aborted(hass):
"""Test a duplicate import host is ignored."""
MockConfigEntry(domain="tradfri", data={"host": "some-host"}).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "import"}, data={"host": "some-host"}
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
async def test_duplicate_discovery(hass, mock_auth, mock_entry_setup):
"""Test a duplicate discovery in progress is ignored."""
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery host aborts and updates existing entry."""
entry = MockConfigEntry(domain="tradfri", data={"host": "some-host"},)
entry.add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "some-host", "properties": {"id": "homekit-id"}},
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
assert entry.unique_id == "homekit-id"
| nkgilley/home-assistant | tests/components/tradfri/test_config_flow.py | Python | apache-2.0 | 9,174 |
""" Base Storage Class provides the base interface for all storage plug-ins
exists()
These are the methods for manipulating files:
isFile()
getFile()
putFile()
removeFile()
getFileMetadata()
getFileSize()
prestageFile()
getTransportURL()
These are the methods for manipulating directories:
isDirectory()
getDirectory()
putDirectory()
createDirectory()
removeDirectory()
listDirectory()
getDirectoryMetadata()
getDirectorySize()
These are the methods for manipulating the client:
changeDirectory()
getCurrentDirectory()
getName()
getParameters()
getCurrentURL()
These are the methods for getting information about the Storage:
getOccupancy()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import errno
import json
import os
import shutil
import tempfile
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
class StorageBase(object):
"""
.. class:: StorageBase
"""
PROTOCOL_PARAMETERS = ["Protocol", "Host", "Path", "Port", "SpaceToken", "WSUrl"]
# Options to be prepended in the URL
# keys are the name of the parameters in the CS
# values are the name of the options as they appear in the URL
DYNAMIC_OPTIONS = {}
def __init__(self, name, parameterDict):
self.name = name
self.pluginName = ""
self.protocolParameters = {}
self.__updateParameters(parameterDict)
# Keep the list of all parameters passed for constructions
# Taken from the CS
# In a further major release, this could be nerged together
# with protocolParameters. There is no reason for it to
# be so strict about the possible content.
self._allProtocolParameters = parameterDict
if "InputProtocols" in parameterDict:
self.protocolParameters["InputProtocols"] = parameterDict["InputProtocols"].replace(" ", "").split(",")
elif hasattr(self, "_INPUT_PROTOCOLS"):
self.protocolParameters["InputProtocols"] = getattr(self, "_INPUT_PROTOCOLS")
else:
self.protocolParameters["InputProtocols"] = [self.protocolParameters["Protocol"], "file"]
if "OutputProtocols" in parameterDict:
self.protocolParameters["OutputProtocols"] = parameterDict["OutputProtocols"].replace(" ", "").split(",")
elif hasattr(self, "_OUTPUT_PROTOCOLS"):
self.protocolParameters["OutputProtocols"] = getattr(self, "_OUTPUT_PROTOCOLS")
else:
self.protocolParameters["OutputProtocols"] = [self.protocolParameters["Protocol"]]
self.basePath = parameterDict["Path"]
self.cwd = self.basePath
self.se = None
self.isok = True
# use True for backward compatibility
self.srmSpecificParse = True
def setStorageElement(self, se):
self.se = se
def setParameters(self, parameterDict):
"""Set standard parameters, method can be overriden in subclasses
to process specific parameters
"""
self.__updateParameters(parameterDict)
def __updateParameters(self, parameterDict):
"""setParameters implementation method"""
for item in self.PROTOCOL_PARAMETERS:
self.protocolParameters[item] = parameterDict.get(item, "")
def getParameters(self):
"""Get the parameters with which the storage was instantiated"""
parameterDict = dict(self.protocolParameters)
parameterDict["StorageName"] = self.name
parameterDict["PluginName"] = self.pluginName
parameterDict["URLBase"] = self.getURLBase().get("Value", "")
parameterDict["Endpoint"] = self.getEndpoint().get("Value", "")
return parameterDict
def exists(self, *parms, **kws):
"""Check if the given path exists"""
return S_ERROR("Storage.exists: implement me!")
#############################################################
#
# These are the methods for file manipulation
#
def isFile(self, *parms, **kws):
"""Check if the given path exists and it is a file"""
return S_ERROR("Storage.isFile: implement me!")
def getFile(self, *parms, **kws):
"""Get a local copy of the file specified by its path"""
return S_ERROR("Storage.getFile: implement me!")
def putFile(self, *parms, **kws):
"""Put a copy of the local file to the current directory on the
physical storage
"""
return S_ERROR("Storage.putFile: implement me!")
def removeFile(self, *parms, **kws):
"""Remove physically the file specified by its path"""
return S_ERROR("Storage.removeFile: implement me!")
def getFileMetadata(self, *parms, **kws):
"""Get metadata associated to the file"""
return S_ERROR("Storage.getFileMetadata: implement me!")
def getFileSize(self, *parms, **kws):
"""Get the physical size of the given file"""
return S_ERROR("Storage.getFileSize: implement me!")
def prestageFile(self, *parms, **kws):
"""Issue prestage request for file"""
return S_ERROR("Storage.prestageFile: implement me!")
def prestageFileStatus(self, *parms, **kws):
"""Obtain the status of the prestage request"""
return S_ERROR("Storage.prestageFileStatus: implement me!")
def pinFile(self, *parms, **kws):
"""Pin the file on the destination storage element"""
return S_ERROR("Storage.pinFile: implement me!")
def releaseFile(self, *parms, **kws):
"""Release the file on the destination storage element"""
return S_ERROR("Storage.releaseFile: implement me!")
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory(self, *parms, **kws):
"""Check if the given path exists and it is a directory"""
return S_ERROR("Storage.isDirectory: implement me!")
def getDirectory(self, *parms, **kws):
"""Get locally a directory from the physical storage together with all its
files and subdirectories.
"""
return S_ERROR("Storage.getDirectory: implement me!")
def putDirectory(self, *parms, **kws):
"""Put a local directory to the physical storage together with all its
files and subdirectories.
"""
return S_ERROR("Storage.putDirectory: implement me!")
def createDirectory(self, *parms, **kws):
"""Make a new directory on the physical storage"""
return S_ERROR("Storage.createDirectory: implement me!")
def removeDirectory(self, *parms, **kws):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
"""
return S_ERROR("Storage.removeDirectory: implement me!")
def listDirectory(self, *parms, **kws):
"""List the supplied path"""
return S_ERROR("Storage.listDirectory: implement me!")
def getDirectoryMetadata(self, *parms, **kws):
"""Get the metadata for the directory"""
return S_ERROR("Storage.getDirectoryMetadata: implement me!")
def getDirectorySize(self, *parms, **kws):
"""Get the size of the directory on the storage"""
return S_ERROR("Storage.getDirectorySize: implement me!")
#############################################################
#
# These are the methods for manipulating the client
#
def isOK(self):
return self.isok
def resetCurrentDirectory(self):
"""Reset the working directory to the base dir"""
self.cwd = self.basePath
def changeDirectory(self, directory):
"""Change the directory to the supplied directory"""
if directory.startswith("/"):
self.cwd = "%s/%s" % (self.basePath, directory)
else:
self.cwd = "%s/%s" % (self.cwd, directory)
def getCurrentDirectory(self):
"""Get the current directory"""
return self.cwd
def getCurrentURL(self, fileName):
"""Obtain the current file URL from the current working directory and the filename
:param self: self reference
:param str fileName: path on storage
"""
urlDict = dict(self.protocolParameters)
if not fileName.startswith("/"):
# Relative path is given
urlDict["Path"] = self.cwd
result = pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
if not result["OK"]:
return result
cwdUrl = result["Value"]
fullUrl = "%s%s" % (cwdUrl, fileName)
return S_OK(fullUrl)
def getName(self):
"""The name with which the storage was instantiated"""
return self.name
def getURLBase(self, withWSUrl=False):
"""This will get the URL base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withWSUrl: flag to include Web Service part of the url
:returns: URL
"""
urlDict = dict(self.protocolParameters)
if not withWSUrl:
urlDict["WSUrl"] = ""
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def getEndpoint(self):
"""This will get endpoint of the storage. It basically is the same as :py:meth:`getURLBase`
but without the basePath
:returns: 'proto://hostname<:port>'
"""
urlDict = dict(self.protocolParameters)
# We remove the basePath
urlDict["Path"] = ""
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def isURL(self, path):
"""Guess if the path looks like a URL
:param self: self reference
:param string path: input file LFN or URL
:returns boolean: True if URL, False otherwise
"""
if self.basePath and path.startswith(self.basePath):
return S_OK(True)
result = pfnparse(path, srmSpecific=self.srmSpecificParse)
if not result["OK"]:
return result
if len(result["Value"]["Protocol"]) != 0:
return S_OK(True)
if result["Value"]["Path"].startswith(self.basePath):
return S_OK(True)
return S_OK(False)
def getTransportURL(self, pathDict, protocols):
"""Get a transport URL for a given URL. For a simple storage plugin
it is just returning input URL if the plugin protocol is one of the
requested protocols
:param dict pathDict: URL obtained from File Catalog or constructed according
to convention
:param protocols: a list of acceptable transport protocols in priority order
:type protocols: `python:list`
"""
res = checkArgumentFormat(pathDict)
if not res["OK"]:
return res
urls = res["Value"]
successful = {}
failed = {}
if protocols and not self.protocolParameters["Protocol"] in protocols:
return S_ERROR(errno.EPROTONOSUPPORT, "No native protocol requested")
for url in urls:
successful[url] = url
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def constructURLFromLFN(self, lfn, withWSUrl=False):
"""Construct URL from the given LFN according to the VO convention for the
primary protocol of the storage plagin
:param str lfn: file LFN
:param boolean withWSUrl: flag to include the web service part into the resulting URL
:return result: result['Value'] - resulting URL
"""
# Check the LFN convention:
# 1. LFN must start with the VO name as the top level directory
# 2. VO name must not appear as any subdirectory or file name
lfnSplitList = lfn.split("/")
voLFN = lfnSplitList[1]
# TODO comparison to Sandbox below is for backward compatibility, should
# be removed in the next release
if voLFN != self.se.vo and voLFN != "SandBox" and voLFN != "Sandbox":
return S_ERROR("LFN (%s) path must start with VO name (%s)" % (lfn, self.se.vo))
urlDict = dict(self.protocolParameters)
urlDict["Options"] = "&".join(
"%s=%s" % (optionName, urlDict[paramName])
for paramName, optionName in self.DYNAMIC_OPTIONS.items()
if urlDict.get(paramName)
)
if not withWSUrl:
urlDict["WSUrl"] = ""
urlDict["FileName"] = lfn.lstrip("/")
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def updateURL(self, url, withWSUrl=False):
"""Update the URL according to the current SE parameters"""
result = pfnparse(url, srmSpecific=self.srmSpecificParse)
if not result["OK"]:
return result
urlDict = result["Value"]
urlDict["Protocol"] = self.protocolParameters["Protocol"]
urlDict["Host"] = self.protocolParameters["Host"]
urlDict["Port"] = self.protocolParameters["Port"]
urlDict["WSUrl"] = ""
if withWSUrl:
urlDict["WSUrl"] = self.protocolParameters["WSUrl"]
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def isNativeURL(self, url):
"""Check if URL :url: is valid for :self.protocol:
:param self: self reference
:param str url: URL
"""
res = pfnparse(url, srmSpecific=self.srmSpecificParse)
if not res["OK"]:
return res
urlDict = res["Value"]
return S_OK(urlDict["Protocol"] == self.protocolParameters["Protocol"])
@staticmethod
def _addCommonMetadata(metadataDict):
"""To make the output of getFileMetadata uniform throughout the protocols
this returns a minimum set of metadata with default value,
that are then complemented with the protocol specific metadata
:param metadataDict: specific metadata of the protocol
:returns: dictionnary with all the metadata (specific and basic)
"""
commonMetadata = {
"Checksum": "",
"Directory": False,
"File": False,
"Mode": 0o000,
"Size": 0,
"Accessible": True,
}
commonMetadata.update(metadataDict)
return commonMetadata
def _isInputURL(self, url):
"""Check if the given url can be taken as input
:param self: self reference
:param str url: URL
"""
res = pfnparse(url)
if not res["OK"]:
return res
urlDict = res["Value"]
# Special case of 'file' protocol which can be just a URL
if not urlDict["Protocol"] and "file" in self.protocolParameters["InputProtocols"]:
return S_OK(True)
return S_OK(urlDict["Protocol"] == self.protocolParameters["Protocol"])
#############################################################
#
# These are the methods for getting information about the Storage element:
#
def getOccupancy(self, **kwargs):
"""Get the StorageElement occupancy info in MB.
This generic implementation download a json file supposed to contain the necessary info.
:param occupancyLFN: (mandatory named argument) LFN of the json file.
:returns: S_OK/S_ERROR dictionary. The S_OK value should contain a dictionary with Total and Free space in MB
"""
# Build the URL for the occupancyLFN:
occupancyLFN = kwargs["occupancyLFN"]
res = self.constructURLFromLFN(occupancyLFN)
if not res["OK"]:
return res
occupancyURL = res["Value"]
try:
# download the file locally
tmpDirName = tempfile.mkdtemp()
res = returnSingleResult(self.getFile(occupancyURL, localPath=tmpDirName))
if not res["OK"]:
return res
filePath = os.path.join(tmpDirName, os.path.basename(occupancyLFN))
# Read its json content
with open(filePath, "r") as occupancyFile:
return S_OK(json.load(occupancyFile))
except Exception as e:
return S_ERROR(repr(e))
finally:
# Clean the temporary dir
shutil.rmtree(tmpDirName)
| ic-hep/DIRAC | src/DIRAC/Resources/Storage/StorageBase.py | Python | gpl-3.0 | 16,613 |
from functools import partial
from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.models import UserProfile
EPIC_NAME_TEMPLATE = "**{name}**"
STORY_NAME_TEMPLATE = "[{name}]({app_url})"
COMMENT_ADDED_TEMPLATE = "New comment added to the {entity} {name_template}:\n``` quote\n{text}\n```"
NEW_DESC_ADDED_TEMPLATE = "New description added to the {entity} {name_template}:\n``` quote\n{new}\n```"
DESC_CHANGED_TEMPLATE = ("Description for the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```")
DESC_REMOVED_TEMPLATE = "Description for the {entity} {name_template} was removed."
STATE_CHANGED_TEMPLATE = "State of the {entity} {name_template} was changed from **{old}** to **{new}**."
NAME_CHANGED_TEMPLATE = ("The name of the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```")
ARCHIVED_TEMPLATE = "The {entity} {name_template} was {action}."
STORY_TASK_TEMPLATE = "Task **{task_description}** was {action} the story {name_template}."
STORY_TASK_COMPLETED_TEMPLATE = "Task **{task_description}** ({name_template}) was completed. :tada:"
STORY_ADDED_REMOVED_EPIC_TEMPLATE = ("The story {story_name_template} was {action} the"
" epic {epic_name_template}.")
STORY_EPIC_CHANGED_TEMPLATE = ("The story {story_name_template} was moved from {old_epic_name_template}"
" to {new_epic_name_template}.")
STORY_ESTIMATE_TEMPLATE = "The estimate for the story {story_name_template} was set to {estimate}."
FILE_ATTACHMENT_TEMPLATE = "A {type} attachment `{file_name}` was added to the story {name_template}."
STORY_LABEL_TEMPLATE = "The label **{label_name}** was added to the story {name_template}."
STORY_UPDATE_PROJECT_TEMPLATE = ("The story {name_template} was moved from"
" the **{old}** project to **{new}**.")
STORY_UPDATE_TYPE_TEMPLATE = ("The type of the story {name_template} was changed"
" from **{old_type}** to **{new_type}**.")
DELETE_TEMPLATE = "The {entity_type} **{name}** was deleted."
STORY_UPDATE_OWNER_TEMPLATE = "New owner added to the story {name_template}."
STORY_GITHUB_PR_TEMPLATE = ("New GitHub PR [#{name}]({url}) opened for story"
" {name_template} ({old} -> {new}).")
STORY_GITHUB_BRANCH_TEMPLATE = ("New GitHub branch [{name}]({url})"
" associated with story {name_template} ({old} -> {new}).")
def get_action_with_primary_id(payload: Dict[str, Any]) -> Dict[str, Any]:
for action in payload["actions"]:
if payload["primary_id"] == action["id"]:
action_with_primary_id = action
return action_with_primary_id
def get_event(payload: Dict[str, Any]) -> Optional[str]:
action = get_action_with_primary_id(payload)
event = "{}_{}".format(action["entity_type"], action["action"])
if event in IGNORED_EVENTS:
return None
changes = action.get("changes")
if changes is not None:
if changes.get("description") is not None:
event = "{}_{}".format(event, "description")
elif changes.get("state") is not None:
event = "{}_{}".format(event, "state")
elif changes.get("workflow_state_id") is not None:
event = "{}_{}".format(event, "state")
elif changes.get("name") is not None:
event = "{}_{}".format(event, "name")
elif changes.get("archived") is not None:
event = "{}_{}".format(event, "archived")
elif changes.get("complete") is not None:
event = "{}_{}".format(event, "complete")
elif changes.get("epic_id") is not None:
event = "{}_{}".format(event, "epic")
elif changes.get("estimate") is not None:
event = "{}_{}".format(event, "estimate")
elif changes.get("file_ids") is not None:
event = "{}_{}".format(event, "attachment")
elif changes.get("label_ids") is not None:
event = "{}_{}".format(event, "label")
elif changes.get("project_id") is not None:
event = "{}_{}".format(event, "project")
elif changes.get("story_type") is not None:
event = "{}_{}".format(event, "type")
elif changes.get("owner_ids") is not None:
event = "{}_{}".format(event, "owner")
return event
def get_topic_function_based_on_type(payload: Dict[str, Any]) -> Any:
entity_type = get_action_with_primary_id(payload)["entity_type"]
return EVENT_TOPIC_FUNCTION_MAPPER.get(entity_type)
def get_delete_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
return DELETE_TEMPLATE.format(**action)
def get_story_create_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
if action.get("epic_id") is None:
message = "New story [{name}]({app_url}) of type **{story_type}** was created."
kwargs = action
else:
message = "New story [{name}]({app_url}) was created and added to the epic **{epic_name}**."
kwargs = {
"name": action["name"],
"app_url": action["app_url"],
}
epic_id = action["epic_id"]
refs = payload["references"]
for ref in refs:
if ref["id"] == epic_id:
kwargs["epic_name"] = ref["name"]
return message.format(**kwargs)
def get_epic_create_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
message = "New epic **{name}**({state}) was created."
return message.format(**action)
def get_comment_added_body(payload: Dict[str, Any], entity: str) -> str:
actions = payload["actions"]
kwargs = {"entity": entity}
for action in actions:
if action["id"] == payload["primary_id"]:
kwargs["text"] = action["text"]
elif action["entity_type"] == entity:
name_template = get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
)
kwargs["name_template"] = name_template
return COMMENT_ADDED_TEMPLATE.format(**kwargs)
def get_update_description_body(payload: Dict[str, Any], entity: str) -> str:
action = get_action_with_primary_id(payload)
desc = action["changes"]["description"]
kwargs = {
"entity": entity,
"new": desc["new"],
"old": desc["old"],
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
}
if kwargs["new"] and kwargs["old"]:
body = DESC_CHANGED_TEMPLATE.format(**kwargs)
elif kwargs["new"]:
body = NEW_DESC_ADDED_TEMPLATE.format(**kwargs)
else:
body = DESC_REMOVED_TEMPLATE.format(**kwargs)
return body
def get_epic_update_state_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
state = action["changes"]["state"]
kwargs = {
"entity": "epic",
"new": state["new"],
"old": state["old"],
"name_template": EPIC_NAME_TEMPLATE.format(name=action["name"]),
}
return STATE_CHANGED_TEMPLATE.format(**kwargs)
def get_story_update_state_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
workflow_state_id = action["changes"]["workflow_state_id"]
references = payload["references"]
state = {}
for ref in references:
if ref["id"] == workflow_state_id["new"]:
state["new"] = ref["name"]
if ref["id"] == workflow_state_id["old"]:
state["old"] = ref["name"]
kwargs = {
"entity": "story",
"new": state["new"],
"old": state["old"],
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action.get("app_url"),
),
}
return STATE_CHANGED_TEMPLATE.format(**kwargs)
def get_update_name_body(payload: Dict[str, Any], entity: str) -> str:
action = get_action_with_primary_id(payload)
name = action["changes"]["name"]
kwargs = {
"entity": entity,
"new": name["new"],
"old": name["old"],
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
}
return NAME_CHANGED_TEMPLATE.format(**kwargs)
def get_update_archived_body(payload: Dict[str, Any], entity: str) -> str:
primary_action = get_action_with_primary_id(payload)
archived = primary_action["changes"]["archived"]
if archived["new"]:
action = "archived"
else:
action = "unarchived"
kwargs = {
"entity": entity,
"name_template": get_name_template(entity).format(
name=primary_action["name"],
app_url=primary_action.get("app_url"),
),
"action": action,
}
return ARCHIVED_TEMPLATE.format(**kwargs)
def get_story_task_body(payload: Dict[str, Any], action: str) -> str:
primary_action = get_action_with_primary_id(payload)
kwargs = {
"task_description": primary_action["description"],
"action": action,
}
for a in payload["actions"]:
if a["entity_type"] == "story":
kwargs["name_template"] = STORY_NAME_TEMPLATE.format(
name=a["name"],
app_url=a["app_url"],
)
return STORY_TASK_TEMPLATE.format(**kwargs)
def get_story_task_completed_body(payload: Dict[str, Any]) -> Optional[str]:
action = get_action_with_primary_id(payload)
kwargs = {
"task_description": action["description"],
}
story_id = action["story_id"]
for ref in payload["references"]:
if ref["id"] == story_id:
kwargs["name_template"] = STORY_NAME_TEMPLATE.format(
name=ref["name"],
app_url=ref["app_url"],
)
if action["changes"]["complete"]["new"]:
return STORY_TASK_COMPLETED_TEMPLATE.format(**kwargs)
else:
return None
def get_story_update_epic_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
kwargs = {
"story_name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new_id = action["changes"]["epic_id"].get("new")
old_id = action["changes"]["epic_id"].get("old")
for ref in payload["references"]:
if ref["id"] == new_id:
kwargs["new_epic_name_template"] = EPIC_NAME_TEMPLATE.format(
name=ref["name"])
if ref["id"] == old_id:
kwargs["old_epic_name_template"] = EPIC_NAME_TEMPLATE.format(
name=ref["name"])
if new_id and old_id:
return STORY_EPIC_CHANGED_TEMPLATE.format(**kwargs)
elif new_id:
kwargs["epic_name_template"] = kwargs["new_epic_name_template"]
kwargs["action"] = "added to"
else:
kwargs["epic_name_template"] = kwargs["old_epic_name_template"]
kwargs["action"] = "removed from"
return STORY_ADDED_REMOVED_EPIC_TEMPLATE.format(**kwargs)
def get_story_update_estimate_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
kwargs = {
"story_name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new = action["changes"]["estimate"].get("new")
if new:
kwargs["estimate"] = f"{new} points"
else:
kwargs["estimate"] = "*Unestimated*"
return STORY_ESTIMATE_TEMPLATE.format(**kwargs)
def get_reference_by_id(payload: Dict[str, Any], ref_id: int) -> Dict[str, Any]:
ref: Dict[str, Any] = {}
for reference in payload['references']:
if reference['id'] == ref_id:
ref = reference
return ref
def get_story_create_github_entity_body(payload: Dict[str, Any],
entity: str) -> str:
action = get_action_with_primary_id(payload)
story: Dict[str, Any] = {}
for a in payload['actions']:
if (a['entity_type'] == 'story' and
a['changes'].get('workflow_state_id') is not None):
story = a
new_state_id = story['changes']['workflow_state_id']['new']
old_state_id = story['changes']['workflow_state_id']['old']
new_state = get_reference_by_id(payload, new_state_id)['name']
old_state = get_reference_by_id(payload, old_state_id)['name']
kwargs = {
'name_template': STORY_NAME_TEMPLATE.format(**story),
'name': action.get('number') if entity == 'pull-request' else action.get('name'),
'url': action['url'],
'new': new_state,
'old': old_state,
}
template = STORY_GITHUB_PR_TEMPLATE if entity == 'pull-request' else STORY_GITHUB_BRANCH_TEMPLATE
return template.format(**kwargs)
def get_story_update_attachment_body(payload: Dict[str, Any]) -> Optional[str]:
action = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
file_ids_added = action["changes"]["file_ids"].get("adds")
# If this is a payload for when an attachment is removed, ignore it
if not file_ids_added:
return None
file_id = file_ids_added[0]
for ref in payload["references"]:
if ref["id"] == file_id:
kwargs.update({
"type": ref["entity_type"],
"file_name": ref["name"],
})
return FILE_ATTACHMENT_TEMPLATE.format(**kwargs)
def get_story_label_body(payload: Dict[str, Any]) -> Optional[str]:
action = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
label_ids_added = action["changes"]["label_ids"].get("adds")
# If this is a payload for when a label is removed, ignore it
if not label_ids_added:
return None
label_id = label_ids_added[0]
label_name = ''
for action in payload["actions"]:
if action['id'] == label_id:
label_name = action.get('name', '')
if not label_name:
for reference in payload["references"]:
if reference["id"] == label_id:
label_name = reference.get('name', '')
kwargs.update({"label_name": label_name})
return STORY_LABEL_TEMPLATE.format(**kwargs)
def get_story_update_project_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new_project_id = action["changes"]["project_id"]["new"]
old_project_id = action["changes"]["project_id"]["old"]
for ref in payload["references"]:
if ref["id"] == new_project_id:
kwargs.update({"new": ref["name"]})
if ref["id"] == old_project_id:
kwargs.update({"old": ref["name"]})
return STORY_UPDATE_PROJECT_TEMPLATE.format(**kwargs)
def get_story_update_type_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
"new_type": action["changes"]["story_type"]["new"],
"old_type": action["changes"]["story_type"]["old"],
}
return STORY_UPDATE_TYPE_TEMPLATE.format(**kwargs)
def get_story_update_owner_body(payload: Dict[str, Any]) -> str:
action = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
return STORY_UPDATE_OWNER_TEMPLATE.format(**kwargs)
def get_entity_name(payload: Dict[str, Any], entity: Optional[str]=None) -> Optional[str]:
action = get_action_with_primary_id(payload)
name = action.get("name")
if name is None or action['entity_type'] == 'branch':
for action in payload["actions"]:
if action["entity_type"] == entity:
name = action["name"]
if name is None:
for ref in payload["references"]:
if ref["entity_type"] == entity:
name = ref["name"]
return name
def get_name_template(entity: str) -> str:
if entity == "story":
return STORY_NAME_TEMPLATE
return EPIC_NAME_TEMPLATE
EVENT_BODY_FUNCTION_MAPPER = {
"story_update_archived": partial(get_update_archived_body, entity='story'),
"epic_update_archived": partial(get_update_archived_body, entity='epic'),
"story_create": get_story_create_body,
"pull-request_create": partial(get_story_create_github_entity_body, entity='pull-request'),
"branch_create": partial(get_story_create_github_entity_body, entity='branch'),
"story_delete": get_delete_body,
"epic_delete": get_delete_body,
"story-task_create": partial(get_story_task_body, action="added to"),
"story-task_delete": partial(get_story_task_body, action="removed from"),
"story-task_update_complete": get_story_task_completed_body,
"story_update_epic": get_story_update_epic_body,
"story_update_estimate": get_story_update_estimate_body,
"story_update_attachment": get_story_update_attachment_body,
"story_update_label": get_story_label_body,
"story_update_owner": get_story_update_owner_body,
"story_update_project": get_story_update_project_body,
"story_update_type": get_story_update_type_body,
"epic_create": get_epic_create_body,
"epic-comment_create": partial(get_comment_added_body, entity='epic'),
"story-comment_create": partial(get_comment_added_body, entity='story'),
"epic_update_description": partial(get_update_description_body, entity='epic'),
"story_update_description": partial(get_update_description_body, entity='story'),
"epic_update_state": get_epic_update_state_body,
"story_update_state": get_story_update_state_body,
"epic_update_name": partial(get_update_name_body, entity='epic'),
"story_update_name": partial(get_update_name_body, entity='story'),
}
EVENT_TOPIC_FUNCTION_MAPPER = {
"story": partial(get_entity_name, entity='story'),
"pull-request": partial(get_entity_name, entity='story'),
"branch": partial(get_entity_name, entity='story'),
"story-comment": partial(get_entity_name, entity='story'),
"story-task": partial(get_entity_name, entity='story'),
"epic": partial(get_entity_name, entity='epic'),
"epic-comment": partial(get_entity_name, entity='epic'),
}
IGNORED_EVENTS = {
'story-comment_update',
}
@api_key_only_webhook_view('ClubHouse')
@has_request_variables
def api_clubhouse_webhook(
request: HttpRequest, user_profile: UserProfile,
payload: Optional[Dict[str, Any]]=REQ(argument_type='body'),
) -> HttpResponse:
# Clubhouse has a tendency to send empty POST requests to
# third-party endpoints. It is unclear as to which event type
# such requests correspond to. So, it is best to ignore such
# requests for now.
if payload is None:
return json_success()
event = get_event(payload)
if event is None:
return json_success()
body_func: Any = EVENT_BODY_FUNCTION_MAPPER.get(event)
topic_func = get_topic_function_based_on_type(payload)
if body_func is None or topic_func is None:
raise UnexpectedWebhookEventType('Clubhouse', event)
topic = topic_func(payload)
body = body_func(payload)
if topic and body:
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| timabbott/zulip | zerver/webhooks/clubhouse/view.py | Python | apache-2.0 | 20,329 |
"""
avocado.utils.partition unittests
:author: Lukas Doktor <ldoktor@redhat.com>
:copyright: 2016 Red Hat, Inc
"""
import os
import shutil
import sys
import tempfile
import time
from flexmock import flexmock, flexmock_teardown
from avocado.utils import partition, process
from avocado.utils import path as utils_path
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest # pylint: disable=E0401
else:
import unittest # pylint: disable=C0411
def missing_binary(binary):
try:
utils_path.find_command(binary)
return False
except utils_path.CmdNotFoundError:
return True
def cannot_sudo(command):
try:
process.run(command, sudo=True)
False
except (process.CmdError, OSError):
return True
class TestPartition(unittest.TestCase):
"""
Unit tests for avocado.utils.partition
"""
@unittest.skipIf(missing_binary('mkfs.ext2'),
"mkfs.ext2 is required for these tests to run.")
@unittest.skipIf(missing_binary('sudo'),
"sudo is required for these tests to run.")
@unittest.skipIf(cannot_sudo('mount'),
'current user must be allowed to run "mount" under sudo')
@unittest.skipIf(cannot_sudo('mkfs.ext2 -V'),
'current user must be allowed to run "mkfs.ext2" under sudo')
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="avocado_" + __name__)
self.mountpoint = os.path.join(self.tmpdir, "disk")
os.mkdir(self.mountpoint)
self.disk = partition.Partition(os.path.join(self.tmpdir, "block"), 1,
self.mountpoint)
def test_basic(self):
""" Test the basic workflow """
self.assertEqual(None, self.disk.get_mountpoint())
self.disk.mkfs()
self.disk.mount()
self.assertIn(self.mountpoint, open("/proc/mounts").read())
self.assertEqual(self.mountpoint, self.disk.get_mountpoint())
self.disk.unmount()
self.assertNotIn(self.mountpoint, open("/proc/mounts").read())
def test_force_unmount(self):
""" Test force-unmount feature """
self.disk.mkfs()
self.disk.mount()
self.assertIn(self.mountpoint, open("/proc/mounts").read())
proc = process.SubProcess("cd %s; while :; do echo a > a; rm a; done"
% self.mountpoint, shell=True)
proc.start()
self.assertTrue(self.disk.unmount())
self.assertEqual(proc.poll(), -9) # Process should be killed -9
self.assertNotIn(self.mountpoint, open("/proc/mounts").read())
def test_double_mount(self):
""" Check the attempt for second mount fails """
self.disk.mkfs()
self.disk.mount()
self.assertIn(self.mountpoint, open("/proc/mounts").read())
self.assertRaises(partition.PartitionError, self.disk.mount)
self.assertIn(self.mountpoint, open("/proc/mounts").read())
def test_double_umount(self):
""" Check double unmount works well """
self.disk.mkfs()
self.disk.mount()
self.assertIn(self.mountpoint, open("/proc/mounts").read())
self.disk.unmount()
self.assertNotIn(self.mountpoint, open("/proc/mounts").read())
self.disk.unmount()
self.assertNotIn(self.mountpoint, open("/proc/mounts").read())
def test_format_mounted(self):
""" Check format on mounted device fails """
self.disk.mkfs()
self.disk.mount()
self.assertIn(self.mountpoint, open("/proc/mounts").read())
self.assertRaises(partition.PartitionError, self.disk.mkfs)
def tearDown(self):
self.disk.unmount()
shutil.rmtree(self.tmpdir)
class TestMtabLock(unittest.TestCase):
"""
Unit tests for avocado.utils.partition
"""
def test_lock(self):
""" Check double-lock raises exception after 60s (in 0.1s) """
with partition.MtabLock():
# speedup the process a bit
(flexmock(time).should_receive("time").and_return(1)
.and_return(2).and_return(62))
self.assertRaises(partition.PartitionError,
partition.MtabLock().__enter__)
flexmock_teardown()
if __name__ == '__main__':
unittest.main()
| adereis/avocado | selftests/unit/test_utils_partition.py | Python | gpl-2.0 | 4,346 |
from raspyrfm_client import RaspyRFMClient
rfm_client = RaspyRFMClient()
gateways = rfm_client.search()
for gateway in gateways:
print(gateway)
| markusressel/raspyrfm-client | example_search.py | Python | gpl-3.0 | 151 |
from symengine.utilities import raises
from symengine import (Interval, EmptySet, FiniteSet, I, oo, Eq, Symbol,
linsolve)
from symengine.lib.symengine_wrapper import solve
def test_solve():
x = Symbol("x")
reals = Interval(-oo, oo)
assert solve(1, x, reals) == EmptySet
assert solve(0, x, reals) == reals
assert solve(x + 3, x, reals) == FiniteSet(-3)
assert solve(x + 3, x, Interval(0, oo)) == EmptySet
assert solve(x, x, reals) == FiniteSet(0)
assert solve(x**2 + 1, x) == FiniteSet(-I, I)
assert solve(x**2 - 2*x + 1, x) == FiniteSet(1)
assert solve(Eq(x**3 + 3*x**2 + 3*x, -1), x, reals) == FiniteSet(-1)
assert solve(x**3 - x, x) == FiniteSet(0, 1, -1)
def test_linsolve():
x = Symbol("x")
y = Symbol("y")
assert linsolve([x - 2], [x]) == (2,)
assert linsolve([x - 2, y - 3], [x, y]) == (2, 3)
assert linsolve([x + y - 3, x + 2*y - 4], [x, y]) == (2, 1)
| bjodah/symengine.py | symengine/tests/test_solve.py | Python | mit | 930 |
# Seamless DVD Player
# Copyright (C) 2004-2006 Martin Soto <martinsoto@users.sourceforge.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import gtk
import videowidget
class MainWindow(gtk.Window):
"""The Seamless main window."""
__slots_ = ('mainUi',
'player',
'topBox',
'video',
'leaveFullScreenBox',
'fullScreenActive')
def __init__(self, mainUi):
super(MainWindow, self).__init__()
self.mainUi = mainUi
self.player = mainUi.getPlayer()
# Give the window a reasonable minimum size.
self.set_size_request(480, 360)
# Set the initial dimensions of the window to 75% of the screen.
(rootWidth, rootHeight) = \
self.get_root_window().get_geometry()[2:4]
self.set_default_size(int(rootWidth * 0.75),
int(rootHeight * 0.75))
# Define the toolbar.
self.mainUi.add_ui_from_string('''
<ui>
<toolbar name="toolbar">
<toolitem action="menu"/>
<separator/>
<toolitem action="pause"/>
<separator/>
<toolitem action="prevProgram"/>
<toolitem action="backward10"/>
<toolitem action="forward10"/>
<toolitem action="nextProgram"/>
<separator/>
<toolitem action="fullScreen"/>
</toolbar>
<accelerator action="menu"/>
<accelerator action="pause"/>
<accelerator action="prevProgram"/>
<accelerator action="nextProgram"/>
<accelerator action="backward10"/>
<accelerator action="forward10"/>
<accelerator action="nextAudioStream"/>
<accelerator action="nextAngle"/>
<accelerator action="quit"/>
<accelerator action="debugConsoleAsync"/>
</ui>
''')
# Add the central AccelGroup to the window.
accelgroup = mainUi.get_accel_group()
self.add_accel_group(accelgroup)
self.set_title(_('Seamless DVD Player'))
self.set_border_width(0)
self.set_property('can-focus', True)
vbox = gtk.VBox()
vbox.show()
self.add(vbox)
# An additional box makes it possible to hide/show all top
# elements in a single operation.
self.topBox = gtk.VBox()
vbox.pack_start(self.topBox, expand=False)
toolbar = self.mainUi.get_widget('/toolbar')
toolbar.show()
self.topBox.pack_start(toolbar, expand=False)
self.video = videowidget.VideoWidget()
self.video.show()
vbox.pack_start(self.video)
self.video.set_property('can-focus', True)
self.video.connect('key-press-event', self.videoKeyPress)
self.video.connect('ready', self.videoReady)
self.video.grab_focus()
self.video.setCursorTimeout(None)
# FIXME: If the video sink doesn't support XOverlay, we have a
# problem.
self.video.setOverlay(self.player.getVideoSink())
# A table container allows us to lay widgets on top of the
# video display.
table = gtk.Table(3, 3)
table.show()
self.video.add(table)
# An expansive empty label in the middle position forces
# widgets in the corners to shrink to their natural sizes.
expandLabel = gtk.Label()
expandLabel.show()
table.attach(expandLabel, left_attach=1, right_attach=2,
top_attach=1, bottom_attach=2)
# The fullscreen cancel button. In order for widgets to be
# visible on top of the video overlay, they must have a
# window. For this reason we put the button in an event box.
self.leaveFullScreenBox = gtk.EventBox()
table.attach(self.leaveFullScreenBox, left_attach=2, right_attach=3,
top_attach=0, bottom_attach=1,
xoptions=0, yoptions=0, xpadding=10, ypadding=10)
leaveFullScreen = gtk.Button(stock=gtk.STOCK_LEAVE_FULLSCREEN)
leaveFullScreen.show()
leaveFullScreen.connect('clicked', self._leaveFullScreenClicked)
self.leaveFullScreenBox.add(leaveFullScreen)
# No fullscreen by default.
self.fullScreenActive = False
self.video.connect('cursor-hidden', self._videoCursorHidden)
self.video.connect('cursor-shown', self._videoCursorShown)
#
# Full Screen Support
#
def fullScreen(self, activate):
self.fullScreenActive = activate
if activate:
self.topBox.hide()
self.video.grab_focus()
self.video.setCursorTimeout(5)
self.fullscreen()
self.set_keep_above(1)
self.leaveFullScreenBox.show()
else:
self.leaveFullScreenBox.hide()
self.unfullscreen()
self.set_keep_above(0)
self.topBox.show()
self.video.setCursorTimeout(None)
def _videoCursorHidden(self, widget):
if self.fullScreenActive:
self.leaveFullScreenBox.hide()
def _videoCursorShown(self, widget):
if self.fullScreenActive:
self.leaveFullScreenBox.show()
def _leaveFullScreenClicked(self, widget):
self.mainUi.fullScreen.set_active(False)
#
# Callbacks
#
def videoReady(self, widget):
# Start the player.
self.player.start()
def videoKeyPress(self, widget, event):
keyName = gtk.gdk.keyval_name(event.keyval)
# These five actions must be handled here explicitly since
# their corresponding keys cannot be used in accelerators.
if keyName == 'Up':
self.mainUi.up.activate()
elif keyName == 'Down':
self.mainUi.down.activate()
elif event.state == 0 and keyName == 'Left':
self.mainUi.left.activate()
elif event.state == 0 and keyName == 'Right':
self.mainUi.right.activate()
elif keyName == 'Return':
self.mainUi.confirm.activate()
elif keyName == 'Escape':
self.mainUi.fullScreen.set_active(False)
else:
return False
return True
| MartinSoto/Seamless | src/mainwindow.py | Python | gpl-2.0 | 6,953 |
import unittest
import numpy as np
import networkx as nx
from pele.rates import RateCalculation
from pele.rates._rate_calculations import GraphReduction, kmcgraph_from_rates
np.random.seed(0)
def make_rates_complete(nnodes=10):
rates = dict()
for i in range(nnodes):
for j in range(i+1,nnodes):
rates[(i,j)] = float(i+j) / (i+1)
rates[(j,i)] = float(i+j) / (j+1)
return rates
class _MakeRandomGraph(object):
def __init__(self, nnodes=10, nedges=20, node_set=None):
self.nodes = np.array(range(nnodes))
self.nedges = nedges
self.node_set = set(node_set)
self.rates = dict()
self.graph = nx.Graph()
self.graph.add_nodes_from(self.nodes)
def node_set_connected(self):
u = iter(self.node_set).next()
cc = nx.node_connected_component(self.graph, u)
cc = set(cc)
return len(cc.intersection(self.node_set)) == len(self.node_set)
def add_random_edge(self):
np.random.shuffle(self.nodes)
u, v = self.nodes[:2]
r1, r2 = np.random.uniform(.5,1.5, 2)
self.rates[(u,v)] = r1
self.rates[(v,u)] = r2
self.graph.add_edge(u,v)
# print len(self.rates) / 2, "edges"
return u, v
def make_rates(self):
nnodes = len(self.nodes)
if self.node_set is not None:
# add edges until u and v are connected
while not self.node_set_connected():
self.add_random_edge()
nedges = min(self.nedges, nnodes*(nnodes-1))
while len(self.rates) < nedges:
self.add_random_edge()
# print "made random graph with", len(self.nodes), "nodes and", len(self.rates) / 2, "edges"
return self.rates
def run(self):
self.make_rates()
return kmcgraph_from_rates(self.rates)
def _three_state_rates():
tmatrix = [ [0., 1., 1.,], [1., 0., 1.,], [1., 1., 0.] ]
rates = dict()
for i in range(3):
for j in range(3):
if i != j:
rates[(i,j)] = tmatrix[i][j]
return rates
def _three_state_graph():
return kmcgraph_from_rates(_three_state_rates())
class TestGraphReduction3(unittest.TestCase):
def setUp(self):
self.rates = _three_state_rates()
# all rates after graph renormalization should be 1.0
self.final_rate = 1.0
def _test_rate(self, i, j):
reducer = GraphReduction(self.rates, [i], [j], debug=True)
reducer.check_graph()
reducer.compute_rates()
rAB = reducer.get_rate_AB()
rBA = reducer.get_rate_BA()
reducer.check_graph()
self.assertEqual(reducer.graph.number_of_nodes(), 2)
self.assertEqual(reducer.graph.number_of_edges(), 4)
self.assertAlmostEqual(rAB, self.final_rate, 7)
self.assertAlmostEqual(rBA, self.final_rate, 7)
def test01(self):
self._test_rate(0,1)
def test12(self):
self._test_rate(1,2)
def test02(self):
self._test_rate(0,2)
class TestGraphReductionRandom(unittest.TestCase):
def do_check(self, A, B, nnodes=20, nedges=20):
maker = _MakeRandomGraph(nnodes=20, nedges=20, node_set=A+B)
graph = maker.run()
reducer = GraphReduction(maker.rates, A, B, debug=False)
reducer.check_graph()
reducer.compute_rates()
rAB = reducer.get_rate_AB()
rBA = reducer.get_rate_BA()
reducer.check_graph()
self.assertEqual(reducer.graph.number_of_nodes(), len(A) + len(B))
if len(A) == 1 and len(B) == 1:
self.assertLessEqual(reducer.graph.number_of_edges(), 4)
def test(self):
A, B = [0], [1]
self.do_check(A, B)
def test_setA(self):
A, B = [0, 1, 2], [3]
self.do_check(A, B)
def test_setAB(self):
A, B = [0, 1, 2], [3, 4, 5, 6]
self.do_check(A, B)
if __name__ == "__main__":
unittest.main()
| kjs73/pele | pele/rates/tests/test_graph_transformation.py | Python | gpl-3.0 | 3,988 |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-admin-actions:%s'
admin_actions_policies = [
policy.DocumentedRuleDefault(
POLICY_ROOT % 'reset_state',
base.RULE_ADMIN_API,
"Reset the state of a given server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-resetState)'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
POLICY_ROOT % 'inject_network_info',
base.RULE_ADMIN_API,
"Inject network information into the server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (injectNetworkInfo)'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
POLICY_ROOT % 'reset_network',
base.RULE_ADMIN_API,
"Reset networking on a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resetNetwork)'
}
],
scope_types=['system'])
]
def list_rules():
return admin_actions_policies
| rahulunair/nova | nova/policies/admin_actions.py | Python | apache-2.0 | 1,849 |
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
from common.helpers.utils import dict2json
from scenario import Scenario, Flag, SwitchAttr
class Api(Scenario):
''' API analysis example '''
file_path = SwitchAttr(["--file", "-f"], str,
help="Local file to run API on", excludes=["-p", "--store"])
proj_commit = SwitchAttr(["--project-commit"], str,
help="Commit of the project", requires=["-p"])
proj_commit_date = SwitchAttr(["--project-commit-date"], str,
help="Commit date (needed when storing results)", requires=["--project-commit"])
project = SwitchAttr(["--project", "-p"], str,
help="Remote project to run API analysis on", requires=["--project-commit"])
store = Flag(["--store"],
help="Save computed results to ApiStorage")
meta = Flag(["--meta", "-m"],
help="show meta information in output as well")
package_name = SwitchAttr(["--package-name"], str,
help="Package to run API analysis on",
requires=["--package-version", "--package-release", "--package-distro"],
excludes=["--file", "--project", "--package"])
pkg_version = SwitchAttr(["--package-version"], str,
help="Package version to run API analysis on")
pkg_release = SwitchAttr(["--package-release"], str,
help="Package release to run API analysis on")
pkg_distro = SwitchAttr(["--package-distro"], str,
help="Package distro to run API analysis on")
pkg_arch = SwitchAttr(["--package-arch"], str,
help="Package architecture to run API analysis on; if omitted, source RPM is used")
package = SwitchAttr(["--package"], str,
help="Package name (fully qualified name) to run " +
"API analysis on (e.g. flannel-0.5.5-5.fc24.x86_64.rpm)")
def main(self):
with self.get_system() as system:
file_id = self.prepare_file_by_args(system)
if file_id is None:
raise ValueError("No file specification supplied")
api = system.async_call.api_analysis(file_id.get_result())
if self.store and self.project:
if not self.proj_commit_date:
raise ValueError("Commit date required when storing API results of a project")
system.call.api_store_project(self.project, self.proj_commit,
self.proj_commit_date, api.result, api.meta)
elif self.store and self.package_name:
system.call.api_store_package(self.package_name, self.pkg_version, self.pkg_release,
self.pkg_distro, api.result, api.meta)
elif self.store:
# when --package
raise RuntimeError("Store API not supported")
if self.meta:
print dict2json(api.get_result_with_meta())
else:
print dict2json(api.result)
return 0
if __name__ == '__main__':
sys.exit(1)
| gofed/gofed-ng | scenarios/api.py | Python | gpl-3.0 | 4,252 |
#!/usr/bin/env python
"""
@package ion.agents.platform.platform_agent_stream_publisher
@file ion/agents/platform/platform_agent_stream_publisher.py
@author Carlos Rueda
@brief Stream publishing support for platform agents.
"""
__author__ = 'Carlos Rueda'
import logging
import uuid
from coverage_model.parameter import ParameterDictionary
import numpy
from pyon.public import log
from pyon.core.bootstrap import get_obj_registry
from pyon.core.object import IonObjectDeserializer
from pyon.ion.stream import StreamPublisher
from ion.services.dm.utility.granule.record_dictionary import RecordDictionaryTool
from interface.objects import StreamRoute
class PlatformAgentStreamPublisher(object):
"""
Stream publishing support for platform agents.
"""
def __init__(self, agent):
self._agent = agent
self._platform_id = agent._platform_id
self.resource_id = agent.resource_id
self._pp = agent._pp
self.CFG = agent.CFG
# Dictionaries used for data publishing.
self._data_streams = {}
self._param_dicts = {}
self._stream_defs = {}
self._data_publishers = {}
self._connection_ID = None
self._connection_index = {}
# Set of parameter names received in event notification but not
# configured. Allows to log corresponding warning only once.
self._unconfigured_params = set()
stream_info = self.CFG.get('stream_config', None)
if stream_info is None:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("%r: No stream_config given in CFG", self._platform_id)
return
for stream_name, stream_config in stream_info.iteritems():
self._construct_stream_and_publisher(stream_name, stream_config)
log.debug("%r: PlatformAgentStreamPublisher complete", self._platform_id)
def _construct_stream_and_publisher(self, stream_name, stream_config):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: _construct_stream_and_publisher: "
"stream_name:%r, stream_config:\n%s",
self._platform_id, stream_name,
self._pp.pformat(stream_config))
decoder = IonObjectDeserializer(obj_registry=get_obj_registry())
if 'stream_def_dict' not in stream_config:
# should not happen: PlatformAgent._validate_configuration validates this.
log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name)
return
stream_def_dict = stream_config['stream_def_dict']
stream_def_dict['type_'] = 'StreamDefinition'
stream_def_obj = decoder.deserialize(stream_def_dict)
self._stream_defs[stream_name] = stream_def_obj
routing_key = stream_config['routing_key']
stream_id = stream_config['stream_id']
exchange_point = stream_config['exchange_point']
parameter_dictionary = stream_def_dict['parameter_dictionary']
log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id)
self._data_streams[stream_name] = stream_id
self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary)
stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key)
publisher = self._create_publisher(stream_id, stream_route)
self._data_publishers[stream_name] = publisher
log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
def _create_publisher(self, stream_id, stream_route):
publisher = StreamPublisher(process=self._agent,
stream_id=stream_id,
stream_route=stream_route)
return publisher
def reset_connection(self):
self._connection_ID = uuid.uuid4()
self._connection_index = {stream_name : 0 for
stream_name in self._data_streams.keys()}
log.debug("%r: reset_connection: connection_id=%s, connection_index=%s",
self._platform_id, self._connection_ID.hex, self._connection_index)
def handle_attribute_value_event(self, driver_event):
if log.isEnabledFor(logging.TRACE): # pragma: no cover
# show driver_event as retrieved (driver_event.vals_dict might be large)
log.trace("%r: driver_event = %s", self._platform_id, driver_event)
log.trace("%r: vals_dict:\n%s",
self._platform_id, self._pp.pformat(driver_event.vals_dict))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())
stream_name = driver_event.stream_name
publisher = self._data_publishers.get(stream_name, None)
if not publisher:
log.warn('%r: no publisher configured for stream_name=%r. '
'Configured streams are: %s',
self._platform_id, stream_name, self._data_publishers.keys())
return
param_dict = self._param_dicts[stream_name]
stream_def = self._stream_defs[stream_name]
if isinstance(stream_def, str):
rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
stream_definition_id=stream_def)
else:
rdt = RecordDictionaryTool(stream_definition=stream_def)
self._publish_granule_with_multiple_params(publisher, driver_event,
param_dict, rdt)
def _publish_granule_with_multiple_params(self, publisher, driver_event,
param_dict, rdt):
stream_name = driver_event.stream_name
pub_params = {}
selected_timestamps = None
for param_name, param_value in driver_event.vals_dict.iteritems():
param_name = param_name.lower()
if not param_name in rdt:
if param_name not in self._unconfigured_params:
# an unrecognized attribute for this platform:
self._unconfigured_params.add(param_name)
log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r'
' rdt.keys=%s',
self._platform_id, param_name, stream_name, list(rdt.iterkeys()))
continue
# separate values and timestamps:
vals, timestamps = zip(*param_value)
self._agent._dispatch_value_alerts(stream_name, param_name, vals)
# Use fill_value in context to replace any None values:
param_ctx = param_dict.get_context(param_name)
if param_ctx:
fill_value = param_ctx.fill_value
log.debug("%r: param_name=%r fill_value=%s",
self._platform_id, param_name, fill_value)
# do the replacement:
vals = [fill_value if val is None else val for val in vals]
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: vals array after replacing None with fill_value:\n%s",
self._platform_id, self._pp.pformat(vals))
else:
log.warn("%r: unexpected: parameter context not found for %r",
self._platform_id, param_name)
# Set values in rdt:
rdt[param_name] = numpy.array(vals)
pub_params[param_name] = vals
selected_timestamps = timestamps
if selected_timestamps is None:
# that is, all param_name's were unrecognized; just return:
return
self._publish_granule(stream_name, publisher, param_dict, rdt,
pub_params, selected_timestamps)
def _publish_granule(self, stream_name, publisher, param_dict, rdt,
pub_params, timestamps):
# Set timestamp info in rdt:
if param_dict.temporal_parameter_name is not None:
temp_param_name = param_dict.temporal_parameter_name
rdt[temp_param_name] = numpy.array(timestamps)
#@TODO: Ensure that the preferred_timestamp field is correct
rdt['preferred_timestamp'] = numpy.array(['internal_timestamp'] * len(timestamps))
if log.isEnabledFor(logging.DEBUG): # pragma: no cover
log.debug('Preferred timestamp is unresolved, using "internal_timestamp"')
else:
log.warn("%r: Not including timestamp info in granule: "
"temporal_parameter_name not defined in parameter dictionary",
self._platform_id)
g = rdt.to_granule(data_producer_id=self.resource_id,
connection_id=self._connection_ID.hex,
connection_index=str(self._connection_index[stream_name]))
try:
publisher.publish(g)
if log.isEnabledFor(logging.TRACE): # pragma: no cover
log.trace("%r: Platform agent published data granule on stream %r: "
"%s timestamps: %s",
self._platform_id, stream_name,
self._pp.pformat(pub_params), self._pp.pformat(timestamps))
elif log.isEnabledFor(logging.DEBUG): # pragma: no cover
summary_params = {attr_id: "(%d vals)" % len(vals)
for attr_id, vals in pub_params.iteritems()}
summary_timestamps = "(%d vals)" % len(timestamps)
log.debug("%r: Platform agent published data granule on stream %r: "
"%s timestamps: %s",
self._platform_id, stream_name,
summary_params, summary_timestamps)
log.debug("%r: granule published with connection_id=%s, connection_index=%i",
self._platform_id,
self._connection_ID.hex,
self._connection_index[stream_name])
self._connection_index[stream_name] += 1
except Exception:
log.exception("%r: Platform agent could not publish data on stream %s.",
self._platform_id, stream_name)
def reset(self):
self._unconfigured_params.clear()
| ooici/coi-services | ion/agents/platform/platform_agent_stream_publisher.py | Python | bsd-2-clause | 10,637 |
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from cinder.api.v1 import snapshots
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v1 import stubs
from cinder import volume
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
UUID = '00000000-0000-0000-0000-000000000001'
INVALID_UUID = '00000000-0000-0000-0000-000000000002'
def _get_default_snapshot_param():
return {'id': UUID,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
def stub_snapshot_create(self, context, volume_id, name, description):
snapshot = _get_default_snapshot_param()
snapshot['volume_id'] = volume_id
snapshot['display_name'] = name
snapshot['display_description'] = description
return snapshot
def stub_snapshot_delete(self, context, snapshot):
if snapshot['id'] != UUID:
raise exception.NotFound
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != UUID:
raise exception.NotFound
param = _get_default_snapshot_param()
return param
def stub_snapshot_get_all(self, context, search_opts=None):
param = _get_default_snapshot_param()
return [param]
class SnapshotApiTest(test.TestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
self.controller = snapshots.SnapshotsController()
self.stubs.Set(db, 'snapshot_get_all_by_project',
stubs.stub_snapshot_get_all_by_project)
self.stubs.Set(db, 'snapshot_get_all',
stubs.stub_snapshot_get_all)
def test_snapshot_create(self):
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get)
snapshot = {"volume_id": '12',
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
def test_snapshot_create_force(self):
self.stubs.Set(volume.api.API,
"create_snapshot_force",
stub_snapshot_create)
self.stubs.Set(volume.api.API, 'get', stubs.stub_volume_get)
snapshot = {"volume_id": '12',
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp_dict = self.controller.create(req, body)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['display_name'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['display_description'],
snapshot['display_description'])
snapshot = {"volume_id": "12",
"force": "**&&^^%%$$##@@",
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.assertRaises(exception.InvalidParameterValue,
self.controller.create,
req,
body)
def test_snapshot_update(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "update_snapshot",
stubs.stub_snapshot_update)
updates = {"display_name": "Updated Test Name", }
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
res_dict = self.controller.update(req, UUID, body)
expected = {'snapshot': {
'id': UUID,
'volume_id': 12,
'status': 'available',
'size': 100,
'created_at': None,
'display_name': 'Updated Test Name',
'display_description': 'Default description',
}}
self.assertEquals(expected, res_dict)
def test_snapshot_update_missing_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req, UUID, body)
def test_snapshot_update_invalid_body(self):
body = {'display_name': 'missing top level snapshot key'}
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update, req, UUID, body)
def test_snapshot_update_not_found(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
updates = {
"display_name": "Updated Test Name",
}
body = {"snapshot": updates}
req = fakes.HTTPRequest.blank('/v1/snapshots/not-the-uuid')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update, req,
'not-the-uuid', body)
def test_snapshot_delete(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
resp = self.controller.delete(req, snapshot_id)
self.assertEqual(resp.status_int, 202)
def test_snapshot_delete_invalid_id(self):
self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete)
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
snapshot_id)
def test_snapshot_show(self):
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
resp_dict = self.controller.show(req, UUID)
self.assertTrue('snapshot' in resp_dict)
self.assertEqual(resp_dict['snapshot']['id'], UUID)
def test_snapshot_show_invalid_id(self):
snapshot_id = INVALID_UUID
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % snapshot_id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
snapshot_id)
def test_snapshot_detail(self):
self.stubs.Set(volume.api.API,
"get_all_snapshots",
stub_snapshot_get_all)
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
resp_dict = self.controller.detail(req)
self.assertTrue('snapshots' in resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(len(resp_snapshots), 1)
resp_snapshot = resp_snapshots.pop()
self.assertEqual(resp_snapshot['id'], UUID)
def test_snapshot_list_by_status(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
stubs.stub_snapshot(1, display_name='backup1',
status='available'),
stubs.stub_snapshot(2, display_name='backup2',
status='available'),
stubs.stub_snapshot(3, display_name='backup3',
status='creating'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# no status filter
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 3)
# single match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=creating')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['status'], 'creating')
# multiple match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=available')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 2)
for snapshot in resp['snapshots']:
self.assertEquals(snapshot['status'], 'available')
# no match
req = fakes.HTTPRequest.blank('/v1/snapshots?status=error')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 0)
def test_snapshot_list_by_volume(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
stubs.stub_snapshot(1, volume_id='vol1', status='creating'),
stubs.stub_snapshot(2, volume_id='vol1', status='available'),
stubs.stub_snapshot(3, volume_id='vol2', status='available'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# single match
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol2')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol2')
# multiple match
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 2)
for snapshot in resp['snapshots']:
self.assertEqual(snapshot['volume_id'], 'vol1')
# multiple filters
req = fakes.HTTPRequest.blank('/v1/snapshots?volume_id=vol1'
'&status=available')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEqual(resp['snapshots'][0]['volume_id'], 'vol1')
self.assertEqual(resp['snapshots'][0]['status'], 'available')
def test_snapshot_list_by_name(self):
def stub_snapshot_get_all_by_project(context, project_id):
return [
stubs.stub_snapshot(1, display_name='backup1'),
stubs.stub_snapshot(2, display_name='backup2'),
stubs.stub_snapshot(3, display_name='backup3'),
]
self.stubs.Set(db, 'snapshot_get_all_by_project',
stub_snapshot_get_all_by_project)
# no display_name filter
req = fakes.HTTPRequest.blank('/v1/snapshots')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 3)
# filter by one name
req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup2')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 1)
self.assertEquals(resp['snapshots'][0]['display_name'], 'backup2')
# filter no match
req = fakes.HTTPRequest.blank('/v1/snapshots?display_name=backup4')
resp = self.controller.index(req)
self.assertEqual(len(resp['snapshots']), 0)
def test_admin_list_snapshots_limited_to_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
def test_admin_list_snapshots_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(3, len(res['snapshots']))
def test_all_tenants_non_admin_gets_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots?all_tenants=1')
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
def test_non_admin_get_by_project(self):
req = fakes.HTTPRequest.blank('/v1/fake/snapshots')
res = self.controller.index(req)
self.assertTrue('snapshots' in res)
self.assertEqual(1, len(res['snapshots']))
class SnapshotSerializerTest(test.TestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual(tree.tag, 'snapshot')
for attr in ('id', 'status', 'size', 'created_at',
'display_name', 'display_description', 'volume_id'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = snapshots.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap_name',
display_description='snap_desc',
volume_id='vol_id', )
text = serializer.serialize(dict(snapshot=raw_snapshot))
print text
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = snapshots.SnapshotsTemplate()
raw_snapshots = [dict(id='snap1_id',
status='snap1_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap1_name',
display_description='snap1_desc',
volume_id='vol1_id', ),
dict(id='snap2_id',
status='snap2_status',
size=1024,
created_at=datetime.datetime.now(),
display_name='snap2_name',
display_description='snap2_desc',
volume_id='vol2_id', )]
text = serializer.serialize(dict(snapshots=raw_snapshots))
print text
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
class SnapshotsUnprocessableEntityTestCase(test.TestCase):
"""
Tests of places we throw 422 Unprocessable Entity from
"""
def setUp(self):
super(SnapshotsUnprocessableEntityTestCase, self).setUp()
self.controller = snapshots.SnapshotsController()
def _unprocessable_snapshot_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/snapshots')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_snapshot_create(body=None)
def test_create_missing_snapshot(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_snapshot_create(body=body)
def test_create_malformed_entity(self):
body = {'snapshot': 'string'}
self._unprocessable_snapshot_create(body=body)
| citrix-openstack-build/cinder | cinder/tests/api/v1/test_snapshots.py | Python | apache-2.0 | 16,658 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# pika
from pika.spec import FRAME_MAX_SIZE, PORT
# Django
from django import forms
# Zato
from zato.common.util import make_repr
class CreateForm(forms.Form):
name = forms.CharField(widget=forms.TextInput(attrs={'style':'width:100%'}))
host = forms.CharField(widget=forms.TextInput(attrs={'style':'width:50%'}))
port = forms.CharField(initial=PORT, widget=forms.TextInput(attrs={'style':'width:20%'}))
vhost = forms.CharField(initial='/', widget=forms.TextInput(attrs={'style':'width:50%'}))
username = forms.CharField(widget=forms.TextInput(attrs={'style':'width:50%'}))
frame_max = forms.CharField(initial=FRAME_MAX_SIZE, widget=forms.TextInput(attrs={'style':'width:20%'}))
heartbeat = forms.CharField(initial=0, widget=forms.TextInput(attrs={'style':'width:10%'}))
def __repr__(self):
return make_repr(self)
class EditForm(CreateForm):
pass
| alirizakeles/zato | code/zato-web-admin/src/zato/admin/web/forms/definition/amqp.py | Python | gpl-3.0 | 1,135 |
'''
Created on Jun 19, 2016
@author: riccardo
'''
import os
import sys
import glob
from click.testing import CliRunner
from gfzreport.cli import main as gfzreport_main
from gfzreport.sphinxbuild import get_logfilename as get_build_logfilename
from gfzreport.templates.utils import get_logfilename as get_template_logfilename
# from gfzreport.templates.network.__init__ import main as network_reportgen_main
# from gfzreport.sphinxbuild.__init__ import main as sphinxbuild_main, get_logfilename
import shutil
from contextlib import contextmanager
from obspy.core.inventory.inventory import read_inventory
from mock import patch, Mock
from _io import BytesIO
# from cStringIO import StringIO
from matplotlib.image import imread
from urllib2 import URLError
from shutil import rmtree
from gfzreport.templates.network.core import otherstations_df, geofonstations_df
# tests fix for #4 and #2
def test_otherstations_df():
'''Tests for fix #2 anmd fix #4'''
# just test that it works with no exception (before, AttributeError was
# raised)
_ = otherstations_df(geofonstations_df('5M', 2015), None)
# global paths defined once
DATADIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "testdata")
TEMPLATE_NETWORK = ["template", "n"]
BUILD = ['build']
@contextmanager
def invoke(*args):
'''creates a click runner,
converts args to parsed_args (basically, converting all paths argument to be relative to DATADIR)
invokes with runner.invoke(parsed_args) in an isolated file system (i.e. in a
tmp directory `current_dir`) that will be deleted (rmtree) at the end
`current_dir` is where the output is written and does NOT need to be specified in *args.
Called result the click result object, this function needs to be invoked in a with
statement:
```
with invoke(*args) as _:
result, current_dir, parsed_args = _
... test your stuff
```
'''
argz = list(args)
for i, a in enumerate(args):
if a == '-i' or a == '--inst_uptimes' or a == '-p' or a == '--noise_pdf':
argz[i+1] = os.path.join(DATADIR, args[i+1])
runner = CliRunner()
with runner.isolated_filesystem():
argz.extend(['-o', os.getcwd()])
yield runner.invoke(gfzreport_main, TEMPLATE_NETWORK + argz, catch_exceptions=False), os.getcwd(), argz
@patch('gfzreport.templates.network.core.iterdcurl', side_effect=lambda *a, **v: _getdatacenters(*a, **v))
@patch('gfzreport.templates.network.core.utils.urllib2.urlopen')
def test_netgen_configonly_flag(mock_urlopen, mock_get_dcs):
# set args, with wildcards
# mock urllib returns our testdata files
setupurlread(mock_urlopen)
# set args:
args = ['-n', 'ZE', '-s' '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/*"]
with invoke(*args) as _:
result, outpath, args = _
outpath_ = os.path.join(outpath, "ZE_2014")
conf_files_dir = os.path.join(outpath_, 'conf_files')
data_dir = os.path.join(outpath_, 'data')
confiles_subdirs = sorted(os.listdir(conf_files_dir))
logfile = os.path.join(outpath_, get_template_logfilename())
assert os.path.isfile(logfile)
#assert logfile content has something:
with open(logfile) as opn:
logcontent = opn.read()
assert len(logcontent) > 0
# store modification time
logmtime = os.stat(logfile).st_mtime
shutil.rmtree(conf_files_dir)
shutil.rmtree(data_dir)
assert not os.path.isdir(conf_files_dir)
assert not os.path.isdir(data_dir)
confile_path = os.path.join(outpath_, 'conf.py')
# modify conf.py and assert later that we overwrote it:
with open(confile_path) as opn_:
text = "dummyvar='ert'" + opn_.read()
with open(confile_path, 'w') as opn_:
opn_.write(text)
with open(confile_path) as opn_:
assert "dummyvar='ert'" in opn_.read()
rstreport_path = os.path.join(outpath_, 'report.rst')
# modify conf.py and assert later that we overwrote it:
with open(rstreport_path) as opn_:
text = opn_.read() + "\na simple text__"
with open(rstreport_path, 'w') as opn_:
opn_.write(text)
with open(rstreport_path) as opn_:
assert "\na simple text__" in opn_.read()
# needs to run it inside the with statement otherwise the dest dir is removed
runner = CliRunner()
args += ['-c']
result = runner.invoke(gfzreport_main, TEMPLATE_NETWORK + args, catch_exceptions=False)
confiles_subdirs2 = sorted(os.listdir(conf_files_dir))
# conf_files_dir was deleted, assert it has again the proper subdirs:
assert confiles_subdirs == confiles_subdirs2
# confile_path was modified, assert it has been overwritten:
with open(confile_path) as opn_:
assert "dummyvar='ert'" not in opn_.read()
# rstreport_path was modified, assert it was NOT overwritten:
with open(rstreport_path) as opn_:
assert "\na simple text__" in opn_.read()
# data dir has not been newly created in update config mode:
assert not os.path.isdir(data_dir)
# assert we did not modify logfile in -c mode:
assert logmtime == os.stat(logfile).st_mtime
def _getdatacenters(*a, **v):
"""returns the datacenters as the returned response that the eida routing service
would give. The returned string is the datacenters.txt file in the testdata folder"""
with open(os.path.join(DATADIR, "datacenters.txt"), 'rb') as opn:
ret = opn.read()
for dc in ret.splitlines():
if dc[:7] == 'http://':
yield dc
def setupurlread(mock_urlopen, geofon_retval=None, others_retval=None, doicit_retval=None):
'''sets up urlopen.urlread mock
:param geofon_retval: the string returned from urlopen.read when querying geofon network
If None, defaults to "ZE.network.xml" content (file defined in testdata dir)
If Exception, then it will be raised
:param others_retval: the string returned from urlopen.read when querying NON geofon stations
within the geofon network boundaries
If None, defaults to "other_stations.xml" content (file defined in testdata dir)
If Exception, then it will be raised
:param doicit_retval: the string returne from urlopen.read when querying for a DOI citation
defaults is a string formatted as a doi citation according to the input doi url
If Exception, then it will be raised
'''
def sideeffect(url_, timeout=None):
try:
url = url_.get_full_url()
except AttributeError:
url = url_
if "geofon" in url:
if isinstance(geofon_retval, Exception):
raise geofon_retval # pylint: disable=raising-bad-type
if geofon_retval is None:
with open(os.path.join(DATADIR, "ZE.network.xml")) as opn:
return BytesIO(opn.read())
else:
return BytesIO(geofon_retval)
elif 'doi.org' in url:
if isinstance(doicit_retval, Exception):
raise doicit_retval # pylint: disable=raising-bad-type
if doicit_retval is None:
return BytesIO("Marc Smith (2002): A Paper. %s" % url.encode('utf8'))
return BytesIO(doicit_retval)
else:
if isinstance(others_retval, Exception):
raise others_retval # pylint: disable=raising-bad-type
if others_retval is None:
with open(os.path.join(DATADIR, "other_stations.xml")) as opn:
return BytesIO(opn.read())
else:
return BytesIO(others_retval)
mock_urlopen.side_effect = sideeffect
@patch('gfzreport.templates.network.core.iterdcurl', side_effect=lambda *a, **v: _getdatacenters(*a, **v))
@patch('gfzreport.templates.network.core.utils.urllib2.urlopen')
def test_netgen_ok_sphinxbuild_err(mock_urlopen, mock_get_dcs):
# set args, with wildcards
# mock urllib returns our testdata files
setupurlread(mock_urlopen)
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/sta1*"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 0
assert "ZE_2014" in os.listdir(outpath)
with open(os.path.join(outpath, "ZE_2014", "report.rst")) as opn:
rst = opn.read()
# assert we have the network stations (just few):
assert 'MS02 -23.34335 43.89449 s #980000' in rst
assert 'AM01 -21.07725 48.23924 s #FF0000 L4-3D' in rst
# assert we have the non-network stations:
# Note that testing if we filtered out some stations baseed on the network boundary box
# is hard as we should run a real test
assert 'OUT_RANGE_LON_85 -22.47355 85.0 o #FFFFFF' in rst
assert 'IN_RANGE -22.47355 45.56681 o #FFFFFF' in rst
assert 'OUT_RANGE_LON_75 -22.47355 75.0 o #FFFFFF' in rst
assert 'OUT_RANGE_LON_85 -22.47355 85.0 o #FFFFFF' in rst
# assert we do have default margins:
m = 0.5 # default margin when missing
assert (':map_mapmargins: %sdeg, %sdeg, %sdeg, %sdeg') % (m, m, m, m) in rst
# assert we copied the right files. For noise_pdf, everything except sta2.*
assert all(not 'sta2' in filename for filename in
os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'noise_pdf')))
# for inst_uptimes, everything (2 files):
assert sorted(os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'inst_uptimes'))) == \
['ok1.png', 'ok2.jpg', 'x1.png', 'x2.png']
# Now try to run sphinx build:
# NOW, IMPORTANT: for some weird reason when querying for the map (arcgisimage)
# the urllib called from within arcgisimage is our mock (??!!)
# Our mock returns inventory objects, so
# this will cause to return an inventory object instead of an image,
# so we should get an error from every build EXCEPT FOR html,
# as html does NOT USE arcgis images!!
# while the dir is already open, test that we cannot override it:
runner = CliRunner()
result = runner.invoke(gfzreport_main, TEMPLATE_NETWORK + args, catch_exceptions=False)
assert result.exit_code == 1
assert " already exists" in result.output
runner = CliRunner()
# args_ = [os.path.join(outpath, "ZE_2014"),
# os.path.join(outpath, "build"), "-b", ""]
for buildtype, expected_ext in [("", '.tex'), ("latex", '.tex'), ("pdf", '.pdf'),
("html", '.html')]:
btype = 'latex' if buildtype != 'html' else buildtype
outdir = os.path.join(os.path.join(outpath, "build"), btype)
if buildtype in ('latex', 'pdf'):
assert os.path.isdir(outdir)
else:
assert not os.path.isdir(outdir)
indir = os.path.join(outpath, "ZE_2014")
args_ = [indir, outdir]
if buildtype:
args_.extend(['-b', buildtype])
result = runner.invoke(gfzreport_main, BUILD + args_, catch_exceptions=False)
if '.html' == expected_ext:
# html does not use arcgis images, so no error should be raised:
assert os.path.isfile(os.path.join(outdir, 'report%s' % expected_ext))
assert result.exit_code == 0
else:
# in the other cases no report:
assert not os.path.isfile(os.path.join(outdir, 'report%s' % expected_ext))
with open(os.path.join(outdir, get_build_logfilename())) as fopen:
logcontent = fopen.read()
assert "ValueError: invalid PNG header" in logcontent
assert result.exit_code == 2
# Now re-set our mock library to return an exception (the mock_url
# is intended to distinguish if 'geofon' is in the url or not, provide
# an exception for both cases to be sure)
# Our map module will handle silently the error by returning a map
# with coastal lines drawn
setupurlread(mock_urlopen, URLError('wat?'), URLError('wat?'))
# and re-run:
runner = CliRunner()
# Set expected ret values as list, although the value is just one, for the cases
# if we have more than one ret_val possible (some bug when running py.test from
# the terminal)
for buildtype, expected_ext, exp_exitcode in [("", '.tex', [0]),
("latex", '.tex', [0]),
("pdf", '.pdf', [0]),
("html", '.html', [0]),
]:
btype = 'latex' if buildtype != 'html' else buildtype
outdir = os.path.join(os.path.join(outpath, "build"), btype)
assert os.path.isdir(outdir) # we already created it above
indir = os.path.join(outpath, "ZE_2014")
args_ = [indir, outdir]
if buildtype:
args_.extend(['-b', buildtype])
result = runner.invoke(gfzreport_main, BUILD + args_, catch_exceptions=False)
assert os.path.isdir(outdir)
assert os.path.isfile(os.path.join(outdir, 'report%s' % expected_ext))
# assert "ValueError: invalid PNG header" in result.output
# if result.exit_code == 2:
# with open(os.path.join(outdir, get_logfilename())) as fopen:
# logcontent = fopen.read()
# print("\n\n\nWTF")
# print(logcontent)
# print("\n\nWTF")
assert result.exit_code in exp_exitcode
if buildtype == 'pdf':
# if we are running pdf, test a particular thing:
# replace ":errorsastext: yes" in gridfigure directive with ":errorsastext: no"
# what does it means? that for grid figures we create images also on errors
# (file not found). Now, the current grid figure for the current network and
# start_after
# has a lot of stations, thus a lot of pdfs images. Pdflatex breaks
# and does not create the pdf if there are more than 100 includegraphics errors
# (the 100 is hard-coded in latex and cannot be changed)
# Test this case
reporttext = os.path.join(indir, 'report.rst')
with open(reporttext, 'r') as opn_:
content = opn_.read()
content = content.replace(":errorsastext: yes", ":errorsastext: no")
with open(reporttext, 'w') as opn_:
opn_.write(content)
rmtree(outdir)
result = runner.invoke(gfzreport_main, BUILD + args_, catch_exceptions=False)
assert result.exit_code == 2
assert not os.path.isfile(os.path.join(outdir, 'report%s' % expected_ext))
# check if we deleted the temop dir:
assert not os.path.isdir(outpath)
@patch('gfzreport.templates.network.core.iterdcurl', side_effect=lambda *a, **v: _getdatacenters(*a, **v))
@patch('gfzreport.templates.network.core.utils.urllib2.urlopen')
def test_netgen_ok_sphinxbuild_ok(mock_urlopen, mock_get_dcs):
# set args, with wildcards
# mock urllib returns our testdata files
setupurlread(mock_urlopen)
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/ok*", "-p",
"noise_pdf/ok*.png"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 0
assert "ZE_2014" in os.listdir(outpath)
with open(os.path.join(outpath, "ZE_2014", "report.rst")) as opn:
rst = opn.read()
# assert we copied the right files. For noise_pdf, everything except sta2.*
assert sorted(os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'noise_pdf'))) \
== ['ok1_HHE.png', 'ok1_HHN.png', 'ok1_HHZ.png']
# for inst_uptimes, everything (2 files):
assert sorted(os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'inst_uptimes'))) \
== ['ok1.png', 'ok2.jpg']
# assert "Aborted: No files copied" in result.output
# assert result.exit_code != 0
# Now try to run sphinx build:
# Now re-set our mock library to return an exception (the mock_url
# is intended to distinguish if 'geofon' is in the url or not, provide
# an exception for both cases to be sure)
# Our map module will handle silently the error by returning a map
# with coastal lines drawn
setupurlread(mock_urlopen, URLError('wat?'), URLError('wat?'))
# while the dir is already open, test that we cannot override it:
runner = CliRunner()
result = runner.invoke(gfzreport_main, TEMPLATE_NETWORK + args, catch_exceptions=False)
assert result.exit_code == 1
assert " already exists" in result.output
runner = CliRunner()
#with runner.isolated_filesystem():
args_ = [os.path.join(outpath, "ZE_2014"),
os.path.join(outpath, "build"), "-b", ""]
for buildtype, expected_ext in {"": '.tex', "latex": '.tex', "pdf": '.pdf',
"html": '.html'}.iteritems():
btype = 'latex' if not buildtype else buildtype
outdir = os.path.join(args_[1], btype)
indir = os.path.join(outpath, "ZE_2014")
args_ = [indir, outdir]
if buildtype:
args_.extend(['-b', buildtype])
if buildtype != 'latex':
# if buildtype is latex, we already executed a build with no buyild arg
# which defaults to latex, so the dir exists
assert not os.path.isdir(outdir)
result = runner.invoke(gfzreport_main, BUILD + args_, catch_exceptions=False)
assert os.path.isdir(outdir)
assert os.path.isfile(os.path.join(outdir, 'report%s' % expected_ext))
# check if we deleted the temop dir:
assert not os.path.isdir(outpath)
@patch('gfzreport.templates.network.core.iterdcurl', side_effect=lambda *a, **v: _getdatacenters(*a, **v))
@patch('gfzreport.templates.network.core.utils.urllib2.urlopen')
def test_netgen_errors(mock_urlopen, mock_get_dcs):
# test that help works (without raising)
args = ["--help"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 0
assert os.listdir(outpath) == []
setupurlread(mock_urlopen)
# first test some edge cases, e.g. responses are empty:
setupurlread(mock_urlopen, '')
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/sta1*"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 1
assert 'error while fetching network stations' in result.output
# first test some edge cases, e.g. responses for other stations_df (for the map) are empty:
setupurlread(mock_urlopen, None, '')
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/sta1*"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 0
# FIXME: we print all errors for all stations, should we? or just an error at the end?
assert 'Warning: error fetching inventory' in result.output
# FIXME: we should test a case where we issue an error when retrieving other stations,
# as before it was:
# setupurlread(mock_urlopen, None, '')
# with invoke(*args) as _:
# result, outpath, args = _
# assert result.exit_code == 1
# # FIXME: we print all errors for all stations, should we? or just an error at the end?
# assert 'error while fetching other stations within network stations boundaries' in result.output
# responses raise:
setupurlread(mock_urlopen, Exception('wat?'))
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/sta1*"]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 1
assert 'error while fetching network stations' in result.output
# set args,one a directory, the other one file
# mock urllib returns our testdata files
setupurlread(mock_urlopen)
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes", "-p", "noise_pdf/sta1_HHE.png"]
with invoke(*args) as _:
result, outpath, args = _
# assert we copied the right files. For noise_pdf, everything except sta2.*
assert [filename for filename in
os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'noise_pdf'))] == ['sta1_HHE.png']
# for inst_uptimes, everything (2 files):
assert sorted(os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'inst_uptimes'))) == \
['ok1.png', 'ok2.jpg', 'x1.png', 'x2.png']
# The same as above but with more than one arg:
setupurlread(mock_urlopen)
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes", "-p", "noise_pdf/sta1_HHE.png",
"-p", "noise_pdf/sta2*.png"]
with invoke(*args) as _:
result, outpath, args = _
# assert we copied the right files. For noise_pdf, everything except sta2.*
assert sorted([filename for filename in
os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'noise_pdf'))]) == \
['sta1_HHE.png', 'sta2_HHE.png']
# for inst_uptimes, everything (2 files):
assert sorted(os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'inst_uptimes'))) == \
['ok1.png', 'ok2.jpg', 'x1.png', 'x2.png']
# assert "Aborted: No files copied" in result.output
# assert result.exit_code != 0
# test the mv option:
# create two temp files:
tmpfiles = [os.path.join(DATADIR, "noise_pdf", "xxx.png"),
os.path.join(DATADIR, "inst_uptimes", "XXX.png")]
for file_ in tmpfiles:
open(file_, 'a').close()
try:
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/XXX*", "-p", "noise_pdf/xxx*", "-m"]
with invoke(*args) as _:
result, outpath, args = _
assert not os.path.isfile(os.path.join(DATADIR, "noise_pdf", "xxx.png"))
assert not os.path.isfile(os.path.join(DATADIR, "inst_uptimes", "xxx.png"))
assert os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'noise_pdf')) == ['xxx.png']
assert os.listdir(os.path.join(outpath, 'ZE_2014', 'data', 'inst_uptimes')) == ['XXX.png']
except Exception as exc:
for file_ in tmpfiles:
try:
if os.path.isfile(file_):
os.remove(file_)
except:
pass
raise exc
#
# setupurlread(mock_urlopen)
# args = ['ZE', '2014', "--noprompt", "-i", "inst_uptimes/xxx*", "-n", "noise_pdf/xxx*"]
# with invoke(*args) as _:
# result, outpath, args = _
# assert result.exit_code == 0
# now change margins. Expected margins are top, right, bottom, left
# (see parse_margins in gfzreport.sphinxbuild.core.map.__init__.py):
# keys of the dict (given margins as command line) are:
#
# "top,left,bottom,right (4 values), "
# "top,left_and_right, bottom (3 values), "
# "top_and_bottom, left_and_right (2 values), "
# "or a single value that will be applied to all directions. "
# (see main in network package)
for margins, expected_rst_val in {'0.125': ':map_mapmargins: 0.125deg, 0.125deg, 0.125deg, 0.125deg',
'0.3 0.4 0.1': ':map_mapmargins: 0.3deg, 0.4deg, 0.1deg, 0.4deg',
'0.3 0.4': ':map_mapmargins: 0.3deg, 0.4deg, 0.3deg, 0.4deg'}.iteritems():
args = ['-n', 'ZE', '-s', '2014', "--noprompt", "-i", "inst_uptimes/*", "-p", "noise_pdf/sta1*",
'-a', margins]
with invoke(*args) as _:
result, outpath, args = _
assert result.exit_code == 0
with open(os.path.join(outpath, "ZE_2014", "report.rst")) as opn:
rst = opn.read()
# assert we do not have margins:
assert expected_rst_val in rst
| rizac/gfzreport | tests/test_templates_network.py | Python | gpl-3.0 | 24,752 |
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, Enum, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from . import Base
from .utils import ModelMixin
class Source(Base, ModelMixin):
__tablename__ = 'source'
__repr_props__ = ['id', 'name']
# internal id
id = Column(Integer, Sequence('source_id_seq'), primary_key=True, unique=True, nullable=False)
# describe source
name = Column(String(50), unique=True, nullable=False)
description = Column(String(250))
class Species(Base, ModelMixin):
__tablename__ = 'species'
__repr_props__ = ['id', 'external_id', 'name']
# internal id
id = Column(Integer, Sequence('species_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(Integer, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
name = Column(String(150), unique=False, nullable=False)
class Compound(Base, ModelMixin):
__tablename__ = 'compound'
__repr_props__ = ['id', 'external_id']
# internal id
id = Column(Integer, Sequence('compound_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
smiles = Column(String(750), nullable=False)
class Target(Base, ModelMixin):
__tablename__ = 'target'
__repr_props__ = ['id', 'external_id']
# internal id
id = Column(Integer, Sequence('target_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, unique=True, nullable=False, index=True)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source')
# define species
species_id = Column(Integer, ForeignKey('species.id'), nullable=False)
species = relationship('Species', backref='targets')
# define target sequence
sequence = Column(String)
ASSAYS = Enum('ADMET', 'Binding', 'Functional', 'Property', 'Unassigned',
name='assay_type')
ACTIVITIES = Enum('Kd', 'AC50', 'Potency', 'XC50', 'IC50', 'Ki', 'EC50',
name='activity_type')
RELATIONS = Enum('=', '>', '<', '<=', '>=', name='relation')
class Activity(Base, ModelMixin):
__tablename__ = 'activity'
__repr_props__ = ['id', 'compound', 'relation', 'value']
# internal id
id = Column(Integer, Sequence('activity_id_seq'), primary_key=True, unique=True, nullable=False)
# record origin
external_id = Column(String, nullable=False)
source_id = Column(Integer, ForeignKey('source.id'), nullable=False)
source = relationship('Source') # many to one, no map back
# define the activity
relation = Column(RELATIONS, nullable=False)
value = Column(Float, nullable=False)
assay_type = Column(ASSAYS, nullable=False)
activity_type = Column(ACTIVITIES, nullable=False)
confidence_score = Column(Integer, index=True)
#Link to target
target_id = Column(Integer, ForeignKey('target.id'), nullable=False)
target = relationship('Target', backref='activities')
#Link to compound
compound_id = Column(Integer, ForeignKey('compound.id'), nullable=False)
compound = relationship('Compound', backref='activities')
def __repr__(self):
return '<Activity(id=\'{id}\' compound=\'{compound}\' '\
'target=\'{target}\' relation=\'{relation}{value}\')>'\
.format(id=self.id,
relation=self.relation,
target=self.target.external_id,
compound=self.compound.external_id,
value=self.value)
| richlewis42/qsardb | qsardb/models/models.py | Python | mit | 3,946 |
# setup.py
from distutils.core import setup, Extension
setup(name="ptexample",
ext_modules=[
Extension("ptexample",
["ptexample.c"],
include_dirs = ['..','.'], # May need pysample.h directory
)
]
)
| tuanavu/python-cookbook-3rd | src/15/defining_and_exporting_c_apis_from_extension_modules/ptsetup.py | Python | mit | 277 |
default_app_config = 'django_aur.apps.AURConfig'
| eyolfson/site-eyl | django_aur/__init__.py | Python | gpl-3.0 | 49 |
# 高精度还是python好
from math import sqrt
from random import randint
isqrt_threshold_small=2**62
isqrt_threshold_medim=2**120
def i_sqrt_small(n):
# assert(n<=isqrt_threshold_small)
z=int(sqrt(float(n)))
q=z*z
qa=q+z+z+1
if q>n:
return z-1
elif qa<=n:
return z+1
else:
return z
def i_sqrt_medium(n):
# assert(n<=isqrt_threshold_medim)
z=int(sqrt(float(n)))
z=(z+n/z)>>1
q=z*z
qa=q+z+z+1
if q>n:
return z-1
elif qa<=n:
return z+1
else:
return z
def i_sqrt_large(n):
bl=n.bit_length()-50
if bl&1:
--bl
zx=int(sqrt(float(n>>bl)))<<(bl/2) # initial guess
nv=n/zx
while 1:
zx=(zx+nv)>>1
nv=n/zx
if nv<zx:
nv,zx=zx,nv
if nv-zx<=2:
break
zx=zx+1
t=zx*zx
t2=t+1+((zx+1)<<1)
if t2<=n:
return zx+1
if t<=n:
return zx
return zx-1
def i_sqrt(n):
if n<=isqrt_threshold_small:
return i_sqrt_small(n)
elif n<=isqrt_threshold_medim:
return i_sqrt_medium(n)
else:
return i_sqrt_large(n)
"""
# Test integer sqrt routines
a=map(eval,raw_input().split(" "))
for i in a:
print i,i_sqrt(i)
# print a
"""
def BinaryDecompose(Gp):
Gt=(Gp&(-Gp)).bit_length()-1
return (Gp>>Gt,Gt)
def Miller(a,n):
# test (probably) primality
# of n
Gp,Gt=BinaryDecompose(n-1)
a=pow(a,Gp,n)
if a==1 or a==n-1:
return 1
for i in xrange(0,Gt-1):
a=a*a%n
if a==1:
return 0
if a==n-1:
return 1
if a!=n-1:
return 0
return 1
def Miller_Rabin_BPSW(n):
if n==2:
return 1
if n&1==0:
return 0
return Miller(2,n)
def Linear_Sieve(n):
a=[0 for i in xrange(0,n+1)]
pr=[]
a[0]=1
a[1]=1
for i in xrange(2,n+1):
if not a[i]:
pr.append(i)
for j in pr:
if i*j>n:
break
a[i*j]=1
if i%j==0:
break
return (a,pr)
sim_prr,sim_primes=Linear_Sieve(10000)
def Miller_Rabin(n,_base=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61]):
if n<len(sim_prr):
return not sim_prr[n]
for i in xrange(0,min(len(sim_primes),20)):
if not (n%sim_primes[i]):
return False
for i in _base:
if not Miller(i,n):
return False
return True
def Range_Sieve(L,R):
tt=i_sqrt_small(R)
a,pr=Linear_Sieve(tt)
len=R-L+1
siv=[0]*len
for i in pr:
for j in xrange(L+(i-L)%i,R+1,i):
siv[j-L]=1
return siv
def Sieve_To_Prime(siv,off):
pr=[]
for i in siv:
if not i:
pr.append(off)
off=off+1
return pr
def Trial_Division(n,prs=[]):
if len(prs)==0:
a,prs=Linear_Sieve(i_sqrt_small(n))
fact=[]
for i in prs:
if n%i==0:
ans=1
n=n/i
while n%i==0:
n=n/i
ans=ans+1
fact.append((i,ans))
if n==1:
break
if n:
fact.append((n,1))
return fact
def gcd_euclid(a,b):
if not b:
return a
return gcd_euclid(b,a%b)
def gcd_bin_rec(a,b):
if a==b:
return a
if a==0:
return b
if b==0:
return a
if a&1:
if b&1:
if a>b:
return gcd_bin_rec(a-b>>1,b)
else:
return gcd_bin_rec(b-a>>1,a)
else:
return gcd_bin_rec(a,b>>1)
else:
if b&1:
return gcd_bin_rec(a>>1,b)
else:
return gcd_bin_rec(a>>1,b>>1)<<1
def gcd_binary(a,b):
if a<0:
a=-a
if b<0:
b=-b
return gcd_bin_rec(a,b)
def exgcd(a,b):
if not b:
return (a,1,0)
A,B,C=exgcd(b,a%b)
return (A,C,B-a/b*C)
def Modular_Inverse(a,n):
t,i,j=exgcd(a,n)
if t!=1:
return -1
return i%n
def Jacobi_2(b):
if (b&7)==1:
return 1
if (b&7)==7:
return 1
return -1
def Recurse_Jacobi(a,b):
a=a%b
if b==1:
return 1
mul=1
pg=Jacobi_2(b)
while not (a&1):
a>>=1
mul=mul*pg
if a==1:
return mul
t=Recurse_Jacobi(b,a)
if (a&3)==1 or (b&3)==1:
return mul*t
return -mul*t
def Jacobi(a,b):
t=gcd_binary(a,b)
if t!=1:
return 0;
return Recurse_Jacobi(a,b)
def Lucas_Delta(n,D):
Jac=Jacobi(D,n)
return n-Jac
def Lucas_Double(n,STATE,P,Q,D):
u,v,q=STATE
return (u*v%n,(v*v-2*q)%n,q*q%n)
def ModularDiv2(a,n):
return ((a*(n+1))>>1)%n # n is odd, not checked
def Lucas_Nextst(n,STATE,P,Q,D):
u,v,q=STATE
return (ModularDiv2(P*u+v,n),ModularDiv2(D*u+P*v,n),q*Q%n)
def Lucas_Eth(n,P,Q,D,e):
# e>0, not checked
if e==1:
return (1,P,Q)
else:
T=Lucas_Eth(n,P,Q,D,e>>1)
T=Lucas_Double(n,T,P,Q,D)
if e&1:
T=Lucas_Nextst(n,T,P,Q,D)
return T
def Lucas(n,P,Q,D):
# D=P^2-4Q, not checked;
# gcd(n,D)=1, not checked;
# n odd, not checked;
d,r=BinaryDecompose(Lucas_Delta(n,D))
# usuallu we let Q=1 and (D/n)=-1
u,v,q=Lucas_Eth(n,P,Q,D,d)
if u==0:
return True
ST=(u,v,q)
for i in xrange(0,r):
if v==0:
return True
u,v,q=Lucas_Double(n,(u,v,q),P,Q,D)
return False
def PerfectSquare(n):
return i_sqrt(n)**2==n
def BPSW(n): # presents a Baillie-PSW Probable Prime Test which has very high probability to be correct
if n<len(sim_prr):
return not sim_prr[n]
for i in xrange(0,min(len(sim_primes),20)):
if not (n%sim_primes[i]):
return False
if not Miller(2,n):
return False
if PerfectSquare(n):
return False
P,D,d=1,-7,-2
while Jacobi(D,n)!=-1:
D=-(D+d)
d=-d
Q=(1-D)/4
if not Lucas(n,P,Q,D):
return False
return True
def getNonResidue(P):
i=randint(0,P-1)
while Jacobi(i,P)!=-1:
i=randint(0,P-1)
return i
def TonelliShanks(n,P):
# P is prime, not checked
# (n/P)=1, not checked
Q,S=BinaryDecompose(P-1)
if S==1:
return pow(n,(P+1)>>2,P)
z=getNonResidue(P)
c=pow(z,Q,P)
R,t,M=pow(n,(Q+1)/2,P),pow(n,Q,P),S
while M>1:
if t==1:
return R
i,tt=1,t*t%P
while i<M:
if tt==1:
break
i=i+1
tt=tt*tt%P
b=pow(c,1<<M-i-1,P)
R,c=R*b%P,b*b%P
t=t*c%P
M=i
if t==1:
return R
return -1
"""
def Main_QuadResidue(n,P):
if Jacobi(n,P)!=1:
print "No residue"
return
a=TonelliShanks(n,P)
print "%d^2(%d)=%d(mod %d)"%(a,a*a%P,n,P)
# helper function
"""
# (OmegaSq/P)=-1
def Fpsq_add(STa,STb,P,OmegaSq):
a,b=STa
c,d=STb
return ((a+c)%P,(b+d)%P)
def Fpsq_mul(STa,STb,P,OmegaSq):
a,b=STa
c,d=STb
return ((a*c+b*d*OmegaSq)%P,(a*d+b*c)%P)
def Fpsq_pow(STa,e,P,OmegaSq):
ans=(1,0)
while e:
if e&1:
ans=Fpsq_mul(ans,STa,P,OmegaSq)
STa=Fpsq_mul(STa,STa,P,OmegaSq)
e=e>>1
return ans
def Cipolla(n,P):
# P is prime, not checked
# (n/P)=1, not checked
i=randint(0,P-1)
while Jacobi(i*i-n,P)!=-1:
i=randint(0,P-1)
j=(i*i-n)%P
ta,tb=Fpsq_pow((i,1),(P+1)/2,P,j)
return ta
| iamstupid/toybi | 信仰的崩塌.py | Python | unlicense | 6,123 |
"""
kombu.mixins
============
Useful mixin classes.
"""
from __future__ import absolute_import
from __future__ import with_statement
import socket
from contextlib import contextmanager
from functools import partial
from itertools import count
from .common import ignore_errors
from .messaging import Consumer
from .log import get_logger
from .utils import cached_property, nested
from .utils.encoding import safe_repr
from .utils.limits import TokenBucket
__all__ = ['ConsumerMixin']
logger = get_logger(__name__)
debug, info, warn, error = logger.debug, logger.info, logger.warn, logger.error
class ConsumerMixin(object):
"""Convenience mixin for implementing consumer threads.
It can be used outside of threads, with threads, or greenthreads
(eventlet/gevent) too.
The basic class would need a :attr:`connection` attribute
which must be a :class:`~kombu.Connection` instance,
and define a :meth:`get_consumers` method that returns a list
of :class:`kombu.Consumer` instances to use.
Supporting multiple consumers is important so that multiple
channels can be used for different QoS requirements.
**Example**:
.. code-block:: python
class Worker(ConsumerMixin):
task_queue = Queue('tasks', Exchange('tasks'), 'tasks'))
def __init__(self, connection):
self.connection = None
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self.task_queue],
callback=[self.on_task])]
def on_task(self, body, message):
print('Got task: %r' % (body, ))
message.ack()
**Additional handler methods**:
* :meth:`extra_context`
Optional extra context manager that will be entered
after the connection and consumers have been set up.
Takes arguments ``(connection, channel)``.
* :meth:`on_connection_error`
Handler called if the connection is lost/ or
is unavailable.
Takes arguments ``(exc, interval)``, where interval
is the time in seconds when the connection will be retried.
The default handler will log the exception.
* :meth:`on_connection_revived`
Handler called when the connection is re-established
after connection failure.
Takes no arguments.
* :meth:`on_consume_ready`
Handler called when the consumer is ready to accept
messages.
Takes arguments ``(connection, channel, consumers)``.
Also keyword arguments to ``consume`` are forwarded
to this handler.
* :meth:`on_consume_end`
Handler called after the consumers are cancelled.
Takes arguments ``(connection, channel)``.
* :meth:`on_iteration`
Handler called for every iteration while draining
events.
Takes no arguments.
* :meth:`on_decode_error`
Handler called if a consumer was unable to decode
the body of a message.
Takes arguments ``(message, exc)`` where message is the
original message object.
The default handler will log the error and
acknowledge the message, so if you override make
sure to call super, or perform these steps yourself.
"""
#: maximum number of retries trying to re-establish the connection,
#: if the connection is lost/unavailable.
connect_max_retries = None
#: When this is set to true the consumer should stop consuming
#: and return, so that it can be joined if it is the implementation
#: of a thread.
should_stop = False
def get_consumers(self, Consumer, channel):
raise NotImplementedError('Subclass responsibility')
def on_connection_revived(self):
pass
def on_consume_ready(self, connection, channel, consumers, **kwargs):
pass
def on_consume_end(self, connection, channel):
pass
def on_iteration(self):
pass
def on_decode_error(self, message, exc):
error("Can't decode message body: %r (type:%r encoding:%r raw:%r')",
exc, message.content_type, message.content_encoding,
safe_repr(message.body))
message.ack()
def on_connection_error(self, exc, interval):
warn('Broker connection error: %r. '
'Trying again in %s seconds.', exc, interval)
@contextmanager
def extra_context(self, connection, channel):
yield
def run(self):
while not self.should_stop:
try:
if self.restart_limit.can_consume(1):
for _ in self.consume(limit=None):
pass
except self.connection.connection_errors:
warn('Connection to broker lost. '
'Trying to re-establish the connection...')
def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs):
elapsed = 0
with self.Consumer() as (connection, channel, consumers):
with self.extra_context(connection, channel):
self.on_consume_ready(connection, channel, consumers, **kwargs)
for i in limit and xrange(limit) or count():
if self.should_stop:
break
self.on_iteration()
try:
connection.drain_events(timeout=safety_interval)
except socket.timeout:
elapsed += safety_interval
if timeout and elapsed >= timeout:
raise socket.timeout()
except socket.error:
if not self.should_stop:
raise
else:
yield
elapsed = 0
debug('consume exiting')
def maybe_conn_error(self, fun):
"""Use :func:`kombu.common.ignore_errors` instead."""
return ignore_errors(self, fun)
@contextmanager
def establish_connection(self):
with self.connection.clone() as conn:
conn.ensure_connection(self.on_connection_error,
self.connect_max_retries)
yield conn
@contextmanager
def Consumer(self):
with self.establish_connection() as conn:
self.on_connection_revived()
info('Connected to %s', conn.as_uri())
channel = conn.default_channel
cls = partial(Consumer, channel,
on_decode_error=self.on_decode_error)
with self._consume_from(*self.get_consumers(cls, channel)) as c:
yield conn, channel, c
debug('Consumers cancelled')
self.on_consume_end(conn, channel)
debug('Connection closed')
def _consume_from(self, *consumers):
return nested(*consumers)
@cached_property
def restart_limit(self):
# the AttributeError that can be catched from amqplib
# poses problems for the too often restarts protection
# in Connection.ensure_connection
return TokenBucket(1)
@cached_property
def connection_errors(self):
return self.connection.connection_errors
@cached_property
def channel_errors(self):
return self.connection.channel_errors
| mozilla/firefox-flicks | vendor-local/lib/python/kombu/mixins.py | Python | bsd-3-clause | 7,526 |
#!/usr/bin/env python
"""
This program is to collect some AWS stuff
"""
import boto3
import threading
from local_helpers.config import accounts_db
from local_helpers import assume_role, get_session, misc
from local_helpers import ec2_helper
if __name__ == "__main__":
"""main"""
"""bucket to hold results"""
output_bucket = []
"""out of accounts_db, get list of available regions from first account"""
regions = get_session.regions()
if regions:
"""generate output header"""
output_bucket.append(ec2_helper.sg_rule_sets_header())
"""go through each account and traverse each region"""
"""------------------------------------------------"""
multi_thread = []
"""get tuple: account -> dict:, temp role creds -> dict:"""
for account in accounts_db.accounts:
"""get temp creds from trusting roles"""
role = assume_role.new_role(account)
if role:
for region in regions.get('Regions'):
"""make an ec2 type client connection"""
ec2 = get_session.connect(
role,
'ec2',
region_name=region.get('RegionName'))
"""
call ec2.describe_instances() in multithreading mode
for better performance. otherwise performance sucks.
"""
thread_call = threading.Thread(
target=ec2_helper.sg_rule_sets,
args=(ec2, account, region, output_bucket))
multi_thread.append(thread_call)
thread_call.start()
"""wait for all threads to finish"""
for t in multi_thread:
t.join()
"""output or export results"""
if output_bucket:
misc.output_lines(output_bucket)
| ndoit/awsdit | aws_audit/sec_sgrules_all_regions.py | Python | mit | 1,924 |
#!/usr/bin/env python
#coding:utf-8
import sys
import os
import glob
import platform
import re
import random
import string
print('''
--------------------------------
TeamViewer ID Changer for MAC OS
--------------------------------
''')
if platform.system() != 'Darwin':
print('This script can be run only on MAC OS.')
sys.exit();
if os.geteuid() != 0:
print('This script must be run form root.')
sys.exit();
if os.environ.has_key('SUDO_USER'):
USERNAME = os.environ['SUDO_USER']
if USERNAME == 'root':
print('Can not find user name. Run this script via sudo from regular user')
sys.exit();
else:
print('Can not find user name. Run this script via sudo from regular user')
sys.exit();
HOMEDIRLIB = '/Users/' + USERNAME + '/library/preferences/'
GLOBALLIB = '/library/preferences/'
CONFIGS = []
# Find config files
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
for file in listdir_fullpath(HOMEDIRLIB):
if 'teamviewer'.lower() in file.lower():
CONFIGS.append(file)
if not CONFIGS:
print ('''
There is no TemViewer configs found.
Maybe you have deleted it manualy or never run TeamViewer after installation.
Nothing to delete.
''')
# Delete config files
else:
print("Configs found:\n")
for file in CONFIGS:
print file
print('''
This files will be DELETED permanently.
All TeamViewer settings will be lost
''')
raw_input("Press Enter to continue or CTR+C to abort...")
for file in CONFIGS:
try:
os.remove(file)
except:
print("Cannot delete config files. Permission denied?")
sys.exit();
print("Done.")
# Find binaryes
TMBINARYES = [
'/Applications/TeamViewer.app/Contents/MacOS/TeamViewer',
'/Applications/TeamViewer.app/Contents/MacOS/TeamViewer_Service',
'/Applications/TeamViewer.app/Contents/Helpers/TeamViewer_Desktop',
]
for file in TMBINARYES:
if os.path.exists(file):
pass
else:
print("File not found: " + file)
print ("Install TeamViewer correctly")
sys.exit();
# Patch files
def idpatch(fpath,platf,serial):
file = open(fpath, 'r+b')
binary = file.read()
PlatformPattern = "IOPlatformExpert.{6}"
SerialPattern = "IOPlatformSerialNumber%s%s%sUUID"
binary = re.sub(PlatformPattern, platf, binary)
binary = re.sub(SerialPattern % (chr(0), "[0-9a-zA-Z]{8,8}", chr(0)), SerialPattern%(chr(0), serial, chr(0)), binary)
file = open(fpath,'wb').write(binary)
return True
def random_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
RANDOMSERIAL = random_generator()
RANDOMPLATFORM = "IOPlatformExpert" + random_generator(6)
for file in TMBINARYES:
try:
idpatch(file,RANDOMPLATFORM,RANDOMSERIAL)
except:
print "Error: can not patch file " + file
print "Wrong version?"
sys.exit();
print "PlatformDevice: " + RANDOMPLATFORM
print "PlatformSerial: " + RANDOMSERIAL
print('''
ID changed sucessfully.
!!! Restart computer before using TeamViewer !!!!
''')
| cheenwe/cheenwe.github.io | _posts/sh/python/teamview.py | Python | mit | 3,173 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import math
import os
import os.path
import re
import subprocess
import sys
# Runs the benchmarks.
#
# It runs several benchmarks across several languages. For each
# benchmark/language pair, it runs a number of trials. Each trial is one run of
# a single benchmark script. It spawns a process and runs the script. The
# script itself is expected to output some result which this script validates
# to ensure the benchmark is running correctly. Then the benchmark prints an
# elapsed time. The benchmark is expected to do the timing itself and only time
# the interesting code under test.
#
# This script then runs several trials and takes the best score. (It does
# multiple trials to account for random variance in running time coming from
# OS, CPU rate-limiting, etc.) It takes the best time on the assumption that
# that represents the language's ideal performance and any variance coming from
# the OS will just slow it down.
#
# After running a series of trials the benchmark runner will compare all of the
# language's performance for a given benchmark. It compares by running time
# and score, which is just the inverse running time.
#
# For Wren benchmarks, it can also compare against a "baseline". That's a
# recorded result of a previous run of the Wren benchmarks. This is useful --
# critical, actually -- for seeing how Wren performance changes. Generating a
# set of baselines before a change to the VM and then comparing those to the
# performance after a change is how we track improvements and regressions.
#
# To generate a baseline file, run this script with "--generate-baseline".
WREN_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
WREN_BIN = os.path.join(WREN_DIR, 'bin')
BENCHMARK_DIR = os.path.join(WREN_DIR, 'test', 'benchmark')
# How many times to run a given benchmark.
NUM_TRIALS = 10
BENCHMARKS = []
def BENCHMARK(name, pattern):
regex = re.compile(pattern + "\n" + r"elapsed: (\d+\.\d+)", re.MULTILINE)
BENCHMARKS.append([name, regex, None])
BENCHMARK("binary_trees", """stretch tree of depth 13 check: -1
8192 trees of depth 4 check: -8192
2048 trees of depth 6 check: -2048
512 trees of depth 8 check: -512
128 trees of depth 10 check: -128
32 trees of depth 12 check: -32
long lived tree of depth 12 check: -1""")
BENCHMARK("binary_trees_gc", """stretch tree of depth 13 check: -1
8192 trees of depth 4 check: -8192
2048 trees of depth 6 check: -2048
512 trees of depth 8 check: -512
128 trees of depth 10 check: -128
32 trees of depth 12 check: -32
long lived tree of depth 12 check: -1""")
BENCHMARK("delta_blue", "14065400")
BENCHMARK("fib", r"""317811
317811
317811
317811
317811""")
BENCHMARK("fibers", r"""4999950000""")
BENCHMARK("for", r"""499999500000""")
BENCHMARK("method_call", r"""true
false""")
BENCHMARK("map_numeric", r"""500000500000""")
BENCHMARK("map_string", r"""12799920000""")
BENCHMARK("string_equals", r"""3000000""")
LANGUAGES = [
("wren", [os.path.join(WREN_BIN, 'wren')], ".wren"),
("lua", ["lua"], ".lua"),
("luajit (-joff)", ["luajit", "-joff"], ".lua"),
("python", ["python"], ".py"),
("python3", ["python3"], ".py"),
("ruby", ["ruby"], ".rb")
]
results = {}
if sys.platform == 'win32':
GREEN = NORMAL = RED = YELLOW = ''
else:
GREEN = '\033[32m'
NORMAL = '\033[0m'
RED = '\033[31m'
YELLOW = '\033[33m'
def green(text):
return GREEN + text + NORMAL
def red(text):
return RED + text + NORMAL
def yellow(text):
return YELLOW + text + NORMAL
def get_score(time):
"""
Converts time into a "score". This is the inverse of the time with an
arbitrary scale applied to get the number in a nice range. The goal here is
to have benchmark results where faster = bigger number.
"""
return 1000.0 / time
def standard_deviation(times):
"""
Calculates the standard deviation of a list of numbers.
"""
mean = sum(times) / len(times)
# Sum the squares of the differences from the mean.
result = 0
for time in times:
result += (time - mean) ** 2
return math.sqrt(result / len(times))
def run_trial(benchmark, language):
"""Runs one benchmark one time for one language."""
args = []
args.extend(language[1])
args.append(os.path.join(BENCHMARK_DIR, benchmark[0] + language[2]))
try:
out = subprocess.check_output(args, universal_newlines=True)
except OSError:
print('Interpreter was not found')
return None
match = benchmark[1].match(out)
if match:
return float(match.group(1))
else:
print("Incorrect output:")
print(out)
return None
def run_benchmark_language(benchmark, language, benchmark_result):
"""
Runs one benchmark for a number of trials for one language.
Adds the result to benchmark_result, which is a map of language names to
results.
"""
name = "{0} - {1}".format(benchmark[0], language[0])
print("{0:30s}".format(name), end=' ')
if not os.path.exists(os.path.join(
BENCHMARK_DIR, benchmark[0] + language[2])):
print("No implementation for this language")
return
times = []
for i in range(0, NUM_TRIALS):
sys.stdout.flush()
time = run_trial(benchmark, language)
if not time:
return
times.append(time)
sys.stdout.write(".")
best = min(times)
score = get_score(best)
comparison = ""
if language[0] == "wren":
if benchmark[2] != None:
ratio = 100 * score / benchmark[2]
comparison = "{:6.2f}% relative to baseline".format(ratio)
if ratio > 105:
comparison = green(comparison)
if ratio < 95:
comparison = red(comparison)
else:
comparison = "no baseline"
else:
# Hack: assumes wren gets run first.
wren_score = benchmark_result["wren"]["score"]
ratio = 100.0 * wren_score / score
comparison = "{:6.2f}%".format(ratio)
if ratio > 105:
comparison = green(comparison)
if ratio < 95:
comparison = red(comparison)
print(" {:4.2f}s {:4.4f} {:s}".format(
best,
standard_deviation(times),
comparison))
benchmark_result[language[0]] = {
"desc": name,
"times": times,
"score": score
}
return score
def run_benchmark(benchmark, languages, graph):
"""Runs one benchmark for the given languages (or all of them)."""
benchmark_result = {}
results[benchmark[0]] = benchmark_result
num_languages = 0
for language in LANGUAGES:
if not languages or language[0] in languages:
num_languages += 1
run_benchmark_language(benchmark, language, benchmark_result)
if num_languages > 1 and graph:
graph_results(benchmark_result)
def graph_results(benchmark_result):
print()
INCREMENT = {
'-': 'o',
'o': 'O',
'O': '0',
'0': '0'
}
# Scale everything by the highest score.
highest = 0
for language, result in benchmark_result.items():
score = get_score(min(result["times"]))
if score > highest: highest = score
print("{0:30s}0 {1:66.0f}".format("", highest))
for language, result in benchmark_result.items():
line = ["-"] * 68
for time in result["times"]:
index = int(get_score(time) / highest * 67)
line[index] = INCREMENT[line[index]]
print("{0:30s}{1}".format(result["desc"], "".join(line)))
print()
def read_baseline():
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
if os.path.exists(baseline_file):
with open(baseline_file) as f:
for line in f.readlines():
name, best = line.split(",")
for benchmark in BENCHMARKS:
if benchmark[0] == name:
benchmark[2] = float(best)
def generate_baseline():
print("generating baseline")
baseline_text = ""
for benchmark in BENCHMARKS:
best = run_benchmark_language(benchmark, LANGUAGES[0], {})
baseline_text += ("{},{}\n".format(benchmark[0], best))
# Write them to a file.
baseline_file = os.path.join(BENCHMARK_DIR, "baseline.txt")
with open(baseline_file, 'w') as out:
out.write(baseline_text)
def print_html():
'''Print the results as an HTML chart.'''
def print_benchmark(benchmark, name):
print('<h3>{}</h3>'.format(name))
print('<table class="chart">')
# Scale everything by the highest time.
highest = 0
for language, result in results[benchmark].items():
time = min(result["times"])
if time > highest: highest = time
languages = sorted(results[benchmark].keys(),
key=lambda lang: results[benchmark][lang]["score"], reverse=True)
for language in languages:
result = results[benchmark][language]
time = float(min(result["times"]))
ratio = int(100 * time / highest)
css_class = "chart-bar"
if language == "wren":
css_class += " wren"
print(' <tr>')
print(' <th>{}</th><td><div class="{}" style="width: {}%;">{:4.2f}s </div></td>'.format(
language, css_class, ratio, time))
print(' </tr>')
print('</table>')
print_benchmark("method_call", "Method Call")
print_benchmark("delta_blue", "DeltaBlue")
print_benchmark("binary_trees", "Binary Trees")
print_benchmark("fib", "Recursive Fibonacci")
def main():
parser = argparse.ArgumentParser(description="Run the benchmarks")
parser.add_argument("benchmark", nargs='?',
default="all",
help="The benchmark to run")
parser.add_argument("--generate-baseline",
action="store_true",
help="Generate a baseline file")
parser.add_argument("--graph",
action="store_true",
help="Display graph results.")
parser.add_argument("-l", "--language",
action="append",
help="Which language(s) to run benchmarks for")
parser.add_argument("--output-html",
action="store_true",
help="Output the results chart as HTML")
args = parser.parse_args()
if args.generate_baseline:
generate_baseline()
return
read_baseline()
# Run the benchmarks.
for benchmark in BENCHMARKS:
if benchmark[0] == args.benchmark or args.benchmark == "all":
run_benchmark(benchmark, args.language, args.graph)
if args.output_html:
print_html()
main()
| Rohansi/wren | util/benchmark.py | Python | mit | 10,277 |
from matplotlib import pyplot as plt
import numpy as np
from types import FunctionType
class HashableDict(dict):
# http://code.activestate.com/recipes/414283-frozen-dictionaries/
def __hash__(self):
return hash(tuple(sorted(self.items())))
def imagesc(data, dest=None, grayscale=True, vmin=None, vmax=None):
plt.ion()
cmap = plt.cm.gray if grayscale else None
if dest is None:
fig = plt.figure(figsize=(7,4))
show = plt.matshow(data, cmap=cmap, fignum=fig.number, vmin=vmin, vmax=vmax)
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
else:
show = dest.matshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
dest.axes.get_xaxis().set_visible(False)
dest.axes.get_yaxis().set_visible(False)
plt.show()
return show
def isunique(lst): return len(set(lst))==1
def isfunction(x): return isinstance(x, FunctionType)
def sigmoid(x):
# from peter's rbm
return 1.0 / (1.0 + np.exp(-x))
def deriv_sigmoid(x):
s = sigmoid(x)
return s * (1. - s)
def tanh(x): return np.tanh(x)
def deriv_tanh(x): return 1. - np.power(tanh(x), 2)
def sumsq(x): return np.sum(np.power(x,2), axis=1)
def vec_to_arr(x): return x.reshape(1, x.size)
def between(x, lower=0, upper=1): return x > lower and x < upper
| gregdetre/evendeeper | rbm/utils/utils.py | Python | mit | 1,332 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all networks that you have access to with the current login
credentials.
A networkCode should be left out for this request."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201411')
# Get all networks that you have access to with the current login credentials.
networks = network_service.getAllNetworks()
# Display results.
for network in networks:
print ('Network with network code \'%s\' and display name \'%s\' was found.'
% (network['networkCode'], network['displayName']))
print '\nNumber of results found: %s' % len(networks)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| coxmediagroup/googleads-python-lib | examples/dfp/v201411/network_service/get_all_networks.py | Python | apache-2.0 | 1,545 |
# -*- coding: utf-8 -*-
#
# This file is part of Django ipinfodb released under the MIT license.
# See the LICENSE for more information.
from django.db import models
class IpCountryManager(models.Manager):
def find_ip(self, ip):
import socket
import struct
ip = struct.unpack('!L', socket.inet_aton(ip))[0]
return self.filter(ip_start__lte=ip).order_by('-ip_start')[0]
class IpCountry(models.Model):
ip_start = models.BigIntegerField(primary_key=True)
ip_cidr = models.IPAddressField()
country_code = models.CharField(max_length=3)
objects = IpCountryManager()
def __unicode__(self):
return u'%s (%s)' % (self.ip_cidr, self.country_code)
| pombredanne/django-ipinfodb | django_ipinfodb/models.py | Python | mit | 734 |
from inspect import getcallargs
from decorator import decorator
from covenant.util import toggled_decorator
from covenant.exceptions import (PreconditionViolationError,
PostconditionViolationError)
@toggled_decorator
@decorator
def constrain(func, *args, **kwargs):
"""Enforce constraints on a function defined by its annotations.
Each annotation should be a callable that takes a single parameter and
returns a True or False value.
"""
callargs = getcallargs(func, *args, **kwargs)
for arg, arg_value in callargs.items():
if arg in func.__annotations__:
try:
result = func.__annotations__[arg](arg_value)
except Exception as e:
raise PreconditionViolationError("{0}: {1}".format(arg_value, e))
if not result:
raise PreconditionViolationError(arg_value)
value = func(*args, **kwargs)
if "return" in func.__annotations__:
try:
result = func.__annotations__["return"](value)
except Exception as e:
raise PostconditionViolationError(e)
if not result:
raise PostconditionViolationError()
return value
__all__ = ["constrain"]
| kisielk/covenant | covenant/annotations.py | Python | mit | 1,255 |
#!/usr/bin/env python3
"""
Compute the number of IBD pair at each position.
@Author: wavefancy@gmail.com
Usage:
IBDPairByPos.py
IBDPairByPos.py -h | --help | -v | --version | -f | --format
Notes:
1. Read results from stdin(output from beagle3 fibd), and output results to stdout.
2. See example by -f.
Options:
-h --help Show this screen.
-v --version Show version.
-f --format Show input/output file format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
def ShowFormat():
'''Input File format example:'''
print('''
#output from beagle3 fibd
------------------------
FGGR111 FGGR114 1 5 9.21E-16
FGGR152 FGGR1351 3 5 6.96E-13
FGGR111 FGGR114 4 6 1.79E-19
#output:
------------------------
1 1
2 1
3 2
4 3
5 1
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
covMap = {} #{pos->cov}
for line in sys.stdin:
line = line.strip()
if line:
ss = line.split()
try:
start = int(ss[2])
end = int(ss[3])
for x in range(start, end):
if x not in covMap:
covMap[x] = 1
else:
covMap[x] = covMap[x] + 1
except ValueError:
sys.stderr.write('Warning: Parse value error at line: %s\n'%(line))
#output results.
out = sorted(covMap.items(), key=lambda x: x[0])
for k,v in out:
sys.stdout.write('%d\t%d\n'%(k,v))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| wavefancy/BIDMC-PYTHON | Exome/beagle/IBDPairByPos/IBDPairByPos.py | Python | mit | 1,899 |
"""
Copyright (c) 2012 Philip Schliehauf (uniphil@gmail.com) and the
Queen's University Applied Sustainability Centre
This project is hosted on github; for up-to-date code and contacts:
https://github.com/Queens-Applied-Sustainability/PyRTM
This file is part of PyRTM.
PyRTM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyRTM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyRTM. If not, see <http://www.gnu.org/licenses/>.
"""
# import unittest
# import shutil
# import time
# from datetime import datetime
# from .. import cache
# class TestVarsToFile(unittest.TestCase):
# def assertClean(self, inp, res):
# clean = cache.vars_to_file(inp)
# self.assertEqual(clean, res)
# def testOneChar(self):
# self.assertClean(['a'], 'a')
# def testOneString(self):
# self.assertClean(['hello'], 'hello')
# def testOtherType(self):
# self.assertClean([1], '1')
# def testStringJoin(self):
# self.assertClean(['a', 'b'], 'a-b')
# def testCharReplace(self):
# some_illegals = ' !@#$%^&*()+=<>?;"\'[]{}~`'
# for illegal in some_illegals:
# dirty = illegal.join(['a', 'b'])
# self.assertClean([dirty], 'a.b')
# def testGeneratorIn(self):
# self.assertClean((str(i) for i in xrange(2)), '0-1')
# class TestGet(unittest.TestCase):
# def setUp(self):
# self.expensive_fn = lambda c: 1
# self.config = {
# 'description': 'test',
# 'longitude': -75.3,
# 'latitude': 44.22,
# 'time': datetime(2012, 1, 1, 0, 0, 0)
# }
# self.cachedconfig = {
# 'description': 'cachedtest',
# 'longitude': -75.3,
# 'latitude': 44.22,
# 'time': datetime(2012, 1, 1, 0, 0, 0)
# }
# cache.get(self.expensive_fn, self.cachedconfig)
# def testFunc(self):
# result = cache.get(self.expensive_fn, self.config)
# self.assertEqual(result, (1, False))
# def testCached(self):
# result = cache.get(self.expensive_fn, self.cachedconfig)
# self.assertEqual(result, (1, True))
# def tearDown(self):
# shutil.rmtree(cache.CACHE_DIR)
# if __name__ == '__main__':
# unittest.main()
| Queens-Applied-Sustainability/PyRTM | rtm/test/test_cache.py | Python | gpl-3.0 | 2,536 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.